hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
a0a85ea1e05b41fe47fe4717f334e9584ad55d4f.hip | // !!! This is a file automatically generated by hipify!!!
#include <cutf/hiprand.hpp>
#include <cutf/cublas.hpp>
#include <cutf/memory.hpp>
#include <rand_projection_base.hpp>
void mtk::rsvd_test::random_projection_tf32::gen_rand(const std::uint64_t seed) {
auto cugen = cutf::hiprand::get_curand_unique_ptr(HIPRAND_RNG_PSEUDO_PHILOX4_32_10);
CUTF_CHECK_ERROR(hiprandSetPseudoRandomGeneratorSeed(*cugen.get(), seed));
CUTF_CHECK_ERROR(hiprandSetStream(*cugen.get(), cuda_stream));
CUTF_CHECK_ERROR(cutf::hiprand::generate_normal(*cugen.get(), rand_matrix_ptr, get_max_src_n() * get_max_target_rank(), 0, 1));
}
void mtk::rsvd_test::random_projection_tf32::apply(
const std::size_t m, const std::size_t n, const std::size_t r,
float* const dst_ptr, const std::size_t ldd,
float* const src_ptr, const std::size_t lds
) {
cublasMath_t org_math_mode;
CUTF_CHECK_ERROR(cublasGetMathMode(cublas_handle, &org_math_mode));
CUTF_CHECK_ERROR(cublasSetMathMode(cublas_handle, CUBLAS_TF32_TENSOR_OP_MATH));
const float alpha = 1.0f, beta = 0.0f;
CUTF_CHECK_ERROR(cutf::cublas::gemm(
cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_T,
m, r, n,
&alpha,
src_ptr, lds,
rand_matrix_ptr, r,
&beta,
dst_ptr, ldd
));
CUTF_CHECK_ERROR(cublasSetMathMode(cublas_handle, org_math_mode));
}
void mtk::rsvd_test::random_projection_tf32::allocate_working_memory() {
rand_matrix_ptr = cutf::memory::malloc_async<float>(get_max_src_n() * get_max_target_rank(), cuda_stream);
}
void mtk::rsvd_test::random_projection_tf32::free_working_memory() {
cutf::memory::free_async(rand_matrix_ptr, cuda_stream);
}
| a0a85ea1e05b41fe47fe4717f334e9584ad55d4f.cu | #include <cutf/curand.hpp>
#include <cutf/cublas.hpp>
#include <cutf/memory.hpp>
#include <rand_projection_base.hpp>
void mtk::rsvd_test::random_projection_tf32::gen_rand(const std::uint64_t seed) {
auto cugen = cutf::curand::get_curand_unique_ptr(CURAND_RNG_PSEUDO_PHILOX4_32_10);
CUTF_CHECK_ERROR(curandSetPseudoRandomGeneratorSeed(*cugen.get(), seed));
CUTF_CHECK_ERROR(curandSetStream(*cugen.get(), cuda_stream));
CUTF_CHECK_ERROR(cutf::curand::generate_normal(*cugen.get(), rand_matrix_ptr, get_max_src_n() * get_max_target_rank(), 0, 1));
}
void mtk::rsvd_test::random_projection_tf32::apply(
const std::size_t m, const std::size_t n, const std::size_t r,
float* const dst_ptr, const std::size_t ldd,
float* const src_ptr, const std::size_t lds
) {
cublasMath_t org_math_mode;
CUTF_CHECK_ERROR(cublasGetMathMode(cublas_handle, &org_math_mode));
CUTF_CHECK_ERROR(cublasSetMathMode(cublas_handle, CUBLAS_TF32_TENSOR_OP_MATH));
const float alpha = 1.0f, beta = 0.0f;
CUTF_CHECK_ERROR(cutf::cublas::gemm(
cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_T,
m, r, n,
&alpha,
src_ptr, lds,
rand_matrix_ptr, r,
&beta,
dst_ptr, ldd
));
CUTF_CHECK_ERROR(cublasSetMathMode(cublas_handle, org_math_mode));
}
void mtk::rsvd_test::random_projection_tf32::allocate_working_memory() {
rand_matrix_ptr = cutf::memory::malloc_async<float>(get_max_src_n() * get_max_target_rank(), cuda_stream);
}
void mtk::rsvd_test::random_projection_tf32::free_working_memory() {
cutf::memory::free_async(rand_matrix_ptr, cuda_stream);
}
|
a094658ff3c687df4a9446bcab923bbe6dfe4a8c.hip | // !!! This is a file automatically generated by hipify!!!
// start very simple cylindrical geom
#pragma warning( disable : 4267) // size_t int mismatch
#pragma warning( disable : 4244) // thrust::reduce int mismatch
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "thrust/host_vector.h"
#include "thrust/device_vector.h"
#include "helper_cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include "cx.h"
#include "timers.h"
#include "scanner.h"
//struct sm_part {
// uint key;
// float val;
//};
__host__ __device__ Lor key2lor(uint key)
{
Lor l;
l.c2 = key & 0x000001ff;
l.z2 = (key >> 9) & 0x0000007f;
l.c1 = (key >> 16) & 0x000001ff;
l.z1 = (key >> 25); // & 0x0000007f; // Not necessary of unsigned shift?
return l;
}
__host__ __device__ int cyc_diff(cint c1, cint c2) {
return c1 > c2 ? c1-c2 : c2-c1;
}
// indexing modulo Range, assumes i is within interval
template <int Range> __host__ __device__ int cyc_sub(cint i, cint step) {
return i >= step ? i - step : i - step + Range;
}
template <int Range> __host__ __device__ int cyc_dec(cint i) {
return i >= 1 ? i - 1 : Range - 1;
}
template <int Range> __host__ __device__ int cyc_add(cint i, cint step) {
return i + step < Range ? i + step : i + step - Range;
}
template <int Range> __host__ __device__ int cyc_inc(cint i) {
return i + 1 < Range ? i + 1 : 0;
}
__host__ __device__ int c2_to_dc2(cint c1, cint c2) {
//size_t cdc = cyc_sub<cryNum>(lor.c2,lor.c1) - cryDiffMin; // from phanom in fullsim
return cyc_sub<cryNum>(c2,c1)-cryDiffMin;
}
//NB this can be called with either z1 or (z2-z1) as argument
// steps in the other variable will then be adjacent in memory
// Using (z2-z1) are argument turns out to be a bit faster.
__device__ int zdz_slice(int z)
{
return detZdZNum - (zNum-z)*(zNum-z+1)/2;
}
// assumes threads = cryNum i.e. 400 so that one thread blocks process all phis for fixed sm value
__global__ void forward_project(cr_Ptr<sm_part> sm, uint smstart, uint smend, cr_Ptr<uint> pet, cr_Ptr<float> vol,int ring, r_Ptr<float> K)
{
int phi = threadIdx.x;
uint smpos = smstart+blockIdx.x;
//uint count = 0;
while (smpos < smend) {
Lor tl = key2lor(sm[smpos].key);
tl.c1 = cyc_add<cryNum>(tl.c1, phi); // rotate by phi
tl.c2 = cyc_add<cryNum>(tl.c2, phi); // rotate by phi
if(tl.z1==0 && tl.z2==0 && tl.c2 <= tl.c1) {smpos += gridDim.x; continue; }// skip unused case
int dc = c2_to_dc2(tl.c1,tl.c2); // sm has actual c2 not delta
int tsum = tl.z1+tl.z2;
float val= sm[smpos].val;
uint lor_index = zdz_slice(tsum)*cryCdCNum + dc*cryNum + tl.c1;
uint vol_index = (ring*zNum + tl.z1)*cryNum + phi; // z+z1 here as voxel index
for (int zs1 = 0; zs1 < zNum-tsum; zs1++) { // zs1 is sliding posn of lh end of lor
if(pet[lor_index]>0){
float element = vol[vol_index] * val;
atomicAdd(&K[lor_index],element);
}
lor_index += cryCdCNum; // for zs1++
vol_index += cryNum; // for zs1++
//count++;
}
smpos += gridDim.x;
}
//if (threadIdx.x==0 && (blockIdx.x==gridDim.x-1 || blockIdx.x==0)) printf("r %2d bl %4d count %u\n",ring,blockIdx.x,count);
}
// assumes threads = cryNum i.e. 400 so that one thread blocks process all phis for fixed sm value
__global__ void backward_project(cr_Ptr<sm_part> sm, uint smstart, uint smend, cr_Ptr<uint> pet, int ring, cr_Ptr<float> K, r_Ptr<float> M)
{
int phi = threadIdx.x;
uint smpos = smstart+blockIdx.x;
while (smpos < smend) {
Lor tl = key2lor(sm[smpos].key);
tl.c1 = cyc_add<cryNum>(tl.c1, phi); // rotate by phi
tl.c2 = cyc_add<cryNum>(tl.c2, phi); // rotate by phi
if(tl.z1==0 && tl.z2==0 && tl.c2 <= tl.c1) {smpos += gridDim.x; continue; }// skip unused case
int dc = c2_to_dc2(tl.c1,tl.c2); // sm has actual c2 not delta
int tsum = tl.z1+tl.z2;
float val= sm[smpos].val;
uint lor_index = zdz_slice(tsum)*cryCdCNum + dc*cryNum + tl.c1; // new july 6
uint vol_index = (ring*zNum + tl.z1)*cryNum + phi; // z1+zs1 here as voxel index
for (int zs1 = 0; zs1 < zNum-tsum; zs1++) { // zs1 is sliding posn of lh end of lor
if(pet[lor_index]>0){
float element = val * pet[lor_index] / K[lor_index]; // val added 27/06/19!!
atomicAdd(&M[vol_index],element);
}
lor_index += cryCdCNum; // for zs1++
vol_index += cryNum; // for zs1++
}
smpos += gridDim.x;
}
}
__global__ void rescale(r_Ptr<float> vol, cr_Ptr<float> M, cr_Ptr<float> norm)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
while(id < zNum*radNum*cryNum){
vol[id] *= M[id] / norm[id/cryNum];
id += blockDim.x*gridDim.x;
}
}
template <typename T> __global__ void clear_vector(r_Ptr<float> a,uint len)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
while(id < len){
a[id] = (T)0;
id += blockDim.x*gridDim.x;
}
}
int normalise_sm(thrustHvec<sm_part> &sm,thrustHvec<float> &norm,thrustHvec<uint> &smhits,thrustHvec<uint> &smstart)
{
// NB all dZ and dC cuuts assumed to have already been made in readspot
uint norm_size = radNum*zNum;
// normalise allowing for voxel volume Router^2 = Rinner^2
for(int r = 0;r<radNum;r++){
uint sm_start = smstart[r];
uint smnum = smhits[r+2];
for(uint k=sm_start;k<sm_start+smnum;k++){
Lor tl = key2lor(sm[k].key);
float val = sm[k].val;
for (int z = tl.z1; z < zNum - tl.z2; z++) {
norm[r*zNum+z] += val;
}
}
}
printf("normalization done for %d rings and %d slices\n",radNum,zNum);
for(uint i=0;i<norm_size;i++) norm[i] /= 1.0e+10; // assume 10^10 genrations per voxel
cx::write_raw("norm_new.raw",norm.data(),norm_size);
//cx::write_raw("norm_recip.raw",norm.data(),norm_size);
return 0;
}
int list_sm(thrustHvec<sm_part> &sm,thrustHvec<uint> &smhits,thrustHvec<uint> &smstart)
{
printf("list sm called\n");
for(int r=0;r<radNum;r++){
printf("list sm called r=%d\n",r);
char name[256];
sprintf(name,"smlist_r%3.3d.txt",r);
FILE * flog = fopen(name,"w");
uint sm_start = smstart[r];
uint smnum = smhits[r+2];
for(uint k=sm_start;k<sm_start+smnum;k++){
Lor tl = key2lor(sm[k].key);
float val = sm[k].val;
fprintf(flog,"smpos %6u lor (%2d %3d)-(%2d %3d) val %.0f\n",k,tl.z1,tl.c1,tl.z2,tl.c2,val);
}
fclose(flog);
}
return 0;
}
int main(int argc,char *argv[])
{
if(argc < 2){
printf("usage reco <pet file (phantom)> <result file> <sm file> <sm nhits file> <iterations>\n");
return 0;
}
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
//struct hipFuncAttributes fa;
//hipError_t cudaResult = hipSuccess;
//cudaResult = hipFuncGetAttributes(&fa,backward_project );
//printf("cacheModeCA %u\n constSizeBytes %u\n localSizeBytes %u\n maxDynamicSharedSizeBytes %u\n
// maxThreadsPerBlock %u\n numRegs %u\n preferredShmemCarveout %u\n",
// fa.cacheModeCA,fa.constSizeBytes,fa.localSizeBytes,fa.maxDynamicSharedSizeBytes,fa.maxThreadsPerBlock,fa.numRegs,fa.preferredShmemCarveout);
//return 0;
int blscale = 1024;
int thscale = 256;
int niter = 10; if(argc> 5) niter = atoi(argv[5]);
// set up system matix
char name[256];
thrustHvec<uint> nhits(radNum+2);
if(cx::read_raw(argv[4],nhits.data(),radNum+2)){printf("bad read %s\n",argv[4]); return 1;}
if(nhits[0] != radNum){printf("bad nhits = %d, expected %d\n",nhits[0],radNum); return 1;}
thrustHvec<uint> nhits_start(radNum);
nhits_start[0] = 0;
for (int k = 1; k < radNum; k++) nhits_start[k] = nhits_start[k - 1] + nhits[1 + k];
//for (int k = 0; k < radNum; k++) printf("hits start %3d %8u end %8u\n",k, nhits_start[k],nhits_start[k]+nhits[k+2]);
// return 0;
uint sm_size = nhits[1];
uint lor_size = cryCdCNum*detZdZNum;
uint vol_size = cryNum*radNum*zNum;
uint norm_size = radNum*zNum;
uint zphi_size = cryNum*zNum;
printf("sm_size = %u, lor_size %u vol_size %u\n", sm_size,lor_size,vol_size);
thrustHvec<sm_part> sm(sm_size);
thrustDvec<sm_part> dev_sm(sm_size);
if (cx::read_raw(argv[3], sm.data(), sm_size)) { printf("bad read on sm_file %s\n", argv[3]); return 1; }
dev_sm = sm;
thrustHvec<uint> pet(lor_size);
thrustDvec<uint> dev_pet(lor_size);
if (cx::read_raw(argv[1], pet.data(), lor_size)) { printf("bad read on pet file %s\n", argv[1]); return 1; }
dev_pet = pet;
thrustHvec<float> K(lor_size); // working space for forward projection (voxels => lors)
thrustDvec<float> dev_K(lor_size);
thrustHvec<float> M(vol_size); // working space for backward projection (lors => voxels)
thrustDvec<float> dev_M(vol_size);
thrustHvec<float> vol(vol_size);
thrustDvec<float> dev_vol(vol_size);
thrustHvec<float> norm(norm_size); // voxel normaliztions depend on ring and z
thrustDvec<float> dev_norm(norm_size);
cx::MYTimer ntim;
ntim.start();
normalise_sm(sm,norm,nhits,nhits_start);
cx::write_raw("smnorm.raw",norm.data(),norm_size);
dev_norm = norm;
ntim.add();
printf("Host normalize call %.3f ms\n",ntim.time());
//list_sm(sm,nhits,nhits_start);
//return 0;
double tot_activity = 0.0;
for (uint k = 0; k < lor_size; k++) tot_activity += pet[k];
//float mean_activity = tot_activity / vol_size;
//for (uint k = 0; k < vol_size; k++) vol[k] = mean_activity;
// new initialisation accounting for voxel volumes (makes little difference)
float roi_volume = cx::pi<float>*roiRadius*roiRadius;
float act_density = tot_activity/roi_volume;
//float act_pervox = tot_activity/vol_size;
float r1 = 0.0f;
float r2 = voxSize;
for(int r=0;r<radNum;r++){
float dr2 = r2*r2-r1*r1;
float voxvol = cx::pi<float>*dr2/cryNum;
for(uint k=0;k<zphi_size;k++) vol[r*zphi_size+k] = act_density*voxvol;
//for(int k=0;k<zphi_size;k++) vol[r*zphi_size+k] = act_pervox;
r1 = r2;
r2 += voxSize;
}
dev_vol = vol;
printf("total activity %.0f, activity density %.0f\n",tot_activity,act_density);
//cx::write_raw("reco_start_vol.raw",vol.data(),vol_size);
int threads = cryNum;
int blocks = 512;
cx::MYTimer tim1;
cx::MYTimer tim2;
cx::MYTimer tim3;
cx::MYTimer all;
all.reset();
for(int iter = 0;iter< niter;iter++){
if(iter>0){
hipLaunchKernelGGL(( clear_vector<float>), dim3(blscale),dim3(thscale), 0, 0, dev_K.data().get(),lor_size);
hipLaunchKernelGGL(( clear_vector<float>), dim3(blscale),dim3(thscale), 0, 0, dev_M.data().get(),vol_size);
}
tim1.reset();
for (int r = 0; r < radNum; r++) {
hipLaunchKernelGGL(( forward_project), dim3(blocks), dim3(threads), 0, 0, dev_sm.data().get(), nhits_start[r], nhits_start[r]+nhits[r+2], dev_pet.data().get(), dev_vol.data().get(), r, dev_K.data().get() );
}
checkCudaErrors(hipDeviceSynchronize());
tim1.add();
tim2.reset();
for (int r = 0; r < radNum; r++) {
hipLaunchKernelGGL(( backward_project), dim3(blocks), dim3(threads), 0, 0, dev_sm.data().get(), nhits_start[r], nhits_start[r]+nhits[r+2], dev_pet.data().get(), r, dev_K.data().get(), dev_M.data().get() );
}
checkCudaErrors(hipDeviceSynchronize());
tim2.add();
tim3.reset();
hipLaunchKernelGGL(( rescale), dim3(blscale),dim3(thscale), 0, 0, dev_vol.data().get(), dev_M.data().get() , dev_norm.data().get());
checkCudaErrors(hipDeviceSynchronize());
tim3.add();
//vol = dev_vol;
//sprintf(name,"%s%3.3d.raw",argv[2],iter+1);
//cx::write_raw(name, vol.data(), vol_size);
checkCudaErrors(hipDeviceSynchronize());
all.add();
printf("iteration %3d times fwd %.3f bwd %.3f rsc %.3f all %.3f ms\n",iter+1,tim1.time(),tim2.time(),tim3.time(),all.time());
}
all.add();
printf("All time %.3f ms\n", all.time());
vol = dev_vol;
sprintf(name,"%s_%d_final.raw",argv[2],niter);
cx::write_raw(name, vol.data(), vol_size);
//sprintf(name,"Kbug%3.3d.raw",niter);
//K = dev_K;
//cx::write_raw(name, K.data(), lor_size);
//M = dev_M;
//sprintf(name,"Mbug%3.3d.raw",niter);
//cx::write_raw(name, M.data(), vol_size);
return 0;
}
| a094658ff3c687df4a9446bcab923bbe6dfe4a8c.cu | // start very simple cylindrical geom
#pragma warning( disable : 4267) // size_t int mismatch
#pragma warning( disable : 4244) // thrust::reduce int mismatch
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "thrust/host_vector.h"
#include "thrust/device_vector.h"
#include "helper_cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include "cx.h"
#include "timers.h"
#include "scanner.h"
//struct sm_part {
// uint key;
// float val;
//};
__host__ __device__ Lor key2lor(uint key)
{
Lor l;
l.c2 = key & 0x000001ff;
l.z2 = (key >> 9) & 0x0000007f;
l.c1 = (key >> 16) & 0x000001ff;
l.z1 = (key >> 25); // & 0x0000007f; // Not necessary of unsigned shift?
return l;
}
__host__ __device__ int cyc_diff(cint c1, cint c2) {
return c1 > c2 ? c1-c2 : c2-c1;
}
// indexing modulo Range, assumes i is within interval
template <int Range> __host__ __device__ int cyc_sub(cint i, cint step) {
return i >= step ? i - step : i - step + Range;
}
template <int Range> __host__ __device__ int cyc_dec(cint i) {
return i >= 1 ? i - 1 : Range - 1;
}
template <int Range> __host__ __device__ int cyc_add(cint i, cint step) {
return i + step < Range ? i + step : i + step - Range;
}
template <int Range> __host__ __device__ int cyc_inc(cint i) {
return i + 1 < Range ? i + 1 : 0;
}
__host__ __device__ int c2_to_dc2(cint c1, cint c2) {
//size_t cdc = cyc_sub<cryNum>(lor.c2,lor.c1) - cryDiffMin; // from phanom in fullsim
return cyc_sub<cryNum>(c2,c1)-cryDiffMin;
}
//NB this can be called with either z1 or (z2-z1) as argument
// steps in the other variable will then be adjacent in memory
// Using (z2-z1) are argument turns out to be a bit faster.
__device__ int zdz_slice(int z)
{
return detZdZNum - (zNum-z)*(zNum-z+1)/2;
}
// assumes threads = cryNum i.e. 400 so that one thread blocks process all phis for fixed sm value
__global__ void forward_project(cr_Ptr<sm_part> sm, uint smstart, uint smend, cr_Ptr<uint> pet, cr_Ptr<float> vol,int ring, r_Ptr<float> K)
{
int phi = threadIdx.x;
uint smpos = smstart+blockIdx.x;
//uint count = 0;
while (smpos < smend) {
Lor tl = key2lor(sm[smpos].key);
tl.c1 = cyc_add<cryNum>(tl.c1, phi); // rotate by phi
tl.c2 = cyc_add<cryNum>(tl.c2, phi); // rotate by phi
if(tl.z1==0 && tl.z2==0 && tl.c2 <= tl.c1) {smpos += gridDim.x; continue; }// skip unused case
int dc = c2_to_dc2(tl.c1,tl.c2); // sm has actual c2 not delta
int tsum = tl.z1+tl.z2;
float val= sm[smpos].val;
uint lor_index = zdz_slice(tsum)*cryCdCNum + dc*cryNum + tl.c1;
uint vol_index = (ring*zNum + tl.z1)*cryNum + phi; // z+z1 here as voxel index
for (int zs1 = 0; zs1 < zNum-tsum; zs1++) { // zs1 is sliding posn of lh end of lor
if(pet[lor_index]>0){
float element = vol[vol_index] * val;
atomicAdd(&K[lor_index],element);
}
lor_index += cryCdCNum; // for zs1++
vol_index += cryNum; // for zs1++
//count++;
}
smpos += gridDim.x;
}
//if (threadIdx.x==0 && (blockIdx.x==gridDim.x-1 || blockIdx.x==0)) printf("r %2d bl %4d count %u\n",ring,blockIdx.x,count);
}
// assumes threads = cryNum i.e. 400 so that one thread blocks process all phis for fixed sm value
__global__ void backward_project(cr_Ptr<sm_part> sm, uint smstart, uint smend, cr_Ptr<uint> pet, int ring, cr_Ptr<float> K, r_Ptr<float> M)
{
int phi = threadIdx.x;
uint smpos = smstart+blockIdx.x;
while (smpos < smend) {
Lor tl = key2lor(sm[smpos].key);
tl.c1 = cyc_add<cryNum>(tl.c1, phi); // rotate by phi
tl.c2 = cyc_add<cryNum>(tl.c2, phi); // rotate by phi
if(tl.z1==0 && tl.z2==0 && tl.c2 <= tl.c1) {smpos += gridDim.x; continue; }// skip unused case
int dc = c2_to_dc2(tl.c1,tl.c2); // sm has actual c2 not delta
int tsum = tl.z1+tl.z2;
float val= sm[smpos].val;
uint lor_index = zdz_slice(tsum)*cryCdCNum + dc*cryNum + tl.c1; // new july 6
uint vol_index = (ring*zNum + tl.z1)*cryNum + phi; // z1+zs1 here as voxel index
for (int zs1 = 0; zs1 < zNum-tsum; zs1++) { // zs1 is sliding posn of lh end of lor
if(pet[lor_index]>0){
float element = val * pet[lor_index] / K[lor_index]; // val added 27/06/19!!
atomicAdd(&M[vol_index],element);
}
lor_index += cryCdCNum; // for zs1++
vol_index += cryNum; // for zs1++
}
smpos += gridDim.x;
}
}
__global__ void rescale(r_Ptr<float> vol, cr_Ptr<float> M, cr_Ptr<float> norm)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
while(id < zNum*radNum*cryNum){
vol[id] *= M[id] / norm[id/cryNum];
id += blockDim.x*gridDim.x;
}
}
template <typename T> __global__ void clear_vector(r_Ptr<float> a,uint len)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
while(id < len){
a[id] = (T)0;
id += blockDim.x*gridDim.x;
}
}
int normalise_sm(thrustHvec<sm_part> &sm,thrustHvec<float> &norm,thrustHvec<uint> &smhits,thrustHvec<uint> &smstart)
{
// NB all dZ and dC cuuts assumed to have already been made in readspot
uint norm_size = radNum*zNum;
// normalise allowing for voxel volume Router^2 = Rinner^2
for(int r = 0;r<radNum;r++){
uint sm_start = smstart[r];
uint smnum = smhits[r+2];
for(uint k=sm_start;k<sm_start+smnum;k++){
Lor tl = key2lor(sm[k].key);
float val = sm[k].val;
for (int z = tl.z1; z < zNum - tl.z2; z++) {
norm[r*zNum+z] += val;
}
}
}
printf("normalization done for %d rings and %d slices\n",radNum,zNum);
for(uint i=0;i<norm_size;i++) norm[i] /= 1.0e+10; // assume 10^10 genrations per voxel
cx::write_raw("norm_new.raw",norm.data(),norm_size);
//cx::write_raw("norm_recip.raw",norm.data(),norm_size);
return 0;
}
int list_sm(thrustHvec<sm_part> &sm,thrustHvec<uint> &smhits,thrustHvec<uint> &smstart)
{
printf("list sm called\n");
for(int r=0;r<radNum;r++){
printf("list sm called r=%d\n",r);
char name[256];
sprintf(name,"smlist_r%3.3d.txt",r);
FILE * flog = fopen(name,"w");
uint sm_start = smstart[r];
uint smnum = smhits[r+2];
for(uint k=sm_start;k<sm_start+smnum;k++){
Lor tl = key2lor(sm[k].key);
float val = sm[k].val;
fprintf(flog,"smpos %6u lor (%2d %3d)-(%2d %3d) val %.0f\n",k,tl.z1,tl.c1,tl.z2,tl.c2,val);
}
fclose(flog);
}
return 0;
}
int main(int argc,char *argv[])
{
if(argc < 2){
printf("usage reco <pet file (phantom)> <result file> <sm file> <sm nhits file> <iterations>\n");
return 0;
}
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
//struct cudaFuncAttributes fa;
//cudaError_t cudaResult = cudaSuccess;
//cudaResult = cudaFuncGetAttributes(&fa,backward_project );
//printf("cacheModeCA %u\n constSizeBytes %u\n localSizeBytes %u\n maxDynamicSharedSizeBytes %u\n
// maxThreadsPerBlock %u\n numRegs %u\n preferredShmemCarveout %u\n",
// fa.cacheModeCA,fa.constSizeBytes,fa.localSizeBytes,fa.maxDynamicSharedSizeBytes,fa.maxThreadsPerBlock,fa.numRegs,fa.preferredShmemCarveout);
//return 0;
int blscale = 1024;
int thscale = 256;
int niter = 10; if(argc> 5) niter = atoi(argv[5]);
// set up system matix
char name[256];
thrustHvec<uint> nhits(radNum+2);
if(cx::read_raw(argv[4],nhits.data(),radNum+2)){printf("bad read %s\n",argv[4]); return 1;}
if(nhits[0] != radNum){printf("bad nhits = %d, expected %d\n",nhits[0],radNum); return 1;}
thrustHvec<uint> nhits_start(radNum);
nhits_start[0] = 0;
for (int k = 1; k < radNum; k++) nhits_start[k] = nhits_start[k - 1] + nhits[1 + k];
//for (int k = 0; k < radNum; k++) printf("hits start %3d %8u end %8u\n",k, nhits_start[k],nhits_start[k]+nhits[k+2]);
// return 0;
uint sm_size = nhits[1];
uint lor_size = cryCdCNum*detZdZNum;
uint vol_size = cryNum*radNum*zNum;
uint norm_size = radNum*zNum;
uint zphi_size = cryNum*zNum;
printf("sm_size = %u, lor_size %u vol_size %u\n", sm_size,lor_size,vol_size);
thrustHvec<sm_part> sm(sm_size);
thrustDvec<sm_part> dev_sm(sm_size);
if (cx::read_raw(argv[3], sm.data(), sm_size)) { printf("bad read on sm_file %s\n", argv[3]); return 1; }
dev_sm = sm;
thrustHvec<uint> pet(lor_size);
thrustDvec<uint> dev_pet(lor_size);
if (cx::read_raw(argv[1], pet.data(), lor_size)) { printf("bad read on pet file %s\n", argv[1]); return 1; }
dev_pet = pet;
thrustHvec<float> K(lor_size); // working space for forward projection (voxels => lors)
thrustDvec<float> dev_K(lor_size);
thrustHvec<float> M(vol_size); // working space for backward projection (lors => voxels)
thrustDvec<float> dev_M(vol_size);
thrustHvec<float> vol(vol_size);
thrustDvec<float> dev_vol(vol_size);
thrustHvec<float> norm(norm_size); // voxel normaliztions depend on ring and z
thrustDvec<float> dev_norm(norm_size);
cx::MYTimer ntim;
ntim.start();
normalise_sm(sm,norm,nhits,nhits_start);
cx::write_raw("smnorm.raw",norm.data(),norm_size);
dev_norm = norm;
ntim.add();
printf("Host normalize call %.3f ms\n",ntim.time());
//list_sm(sm,nhits,nhits_start);
//return 0;
double tot_activity = 0.0;
for (uint k = 0; k < lor_size; k++) tot_activity += pet[k];
//float mean_activity = tot_activity / vol_size;
//for (uint k = 0; k < vol_size; k++) vol[k] = mean_activity;
// new initialisation accounting for voxel volumes (makes little difference)
float roi_volume = cx::pi<float>*roiRadius*roiRadius;
float act_density = tot_activity/roi_volume;
//float act_pervox = tot_activity/vol_size;
float r1 = 0.0f;
float r2 = voxSize;
for(int r=0;r<radNum;r++){
float dr2 = r2*r2-r1*r1;
float voxvol = cx::pi<float>*dr2/cryNum;
for(uint k=0;k<zphi_size;k++) vol[r*zphi_size+k] = act_density*voxvol;
//for(int k=0;k<zphi_size;k++) vol[r*zphi_size+k] = act_pervox;
r1 = r2;
r2 += voxSize;
}
dev_vol = vol;
printf("total activity %.0f, activity density %.0f\n",tot_activity,act_density);
//cx::write_raw("reco_start_vol.raw",vol.data(),vol_size);
int threads = cryNum;
int blocks = 512;
cx::MYTimer tim1;
cx::MYTimer tim2;
cx::MYTimer tim3;
cx::MYTimer all;
all.reset();
for(int iter = 0;iter< niter;iter++){
if(iter>0){
clear_vector<float><<<blscale,thscale>>>(dev_K.data().get(),lor_size);
clear_vector<float><<<blscale,thscale>>>(dev_M.data().get(),vol_size);
}
tim1.reset();
for (int r = 0; r < radNum; r++) {
forward_project<<<blocks, threads>>>(dev_sm.data().get(), nhits_start[r], nhits_start[r]+nhits[r+2], dev_pet.data().get(), dev_vol.data().get(), r, dev_K.data().get() );
}
checkCudaErrors(cudaDeviceSynchronize());
tim1.add();
tim2.reset();
for (int r = 0; r < radNum; r++) {
backward_project<<<blocks, threads>>>(dev_sm.data().get(), nhits_start[r], nhits_start[r]+nhits[r+2], dev_pet.data().get(), r, dev_K.data().get(), dev_M.data().get() );
}
checkCudaErrors(cudaDeviceSynchronize());
tim2.add();
tim3.reset();
rescale<<<blscale,thscale>>>(dev_vol.data().get(), dev_M.data().get() , dev_norm.data().get());
checkCudaErrors(cudaDeviceSynchronize());
tim3.add();
//vol = dev_vol;
//sprintf(name,"%s%3.3d.raw",argv[2],iter+1);
//cx::write_raw(name, vol.data(), vol_size);
checkCudaErrors(cudaDeviceSynchronize());
all.add();
printf("iteration %3d times fwd %.3f bwd %.3f rsc %.3f all %.3f ms\n",iter+1,tim1.time(),tim2.time(),tim3.time(),all.time());
}
all.add();
printf("All time %.3f ms\n", all.time());
vol = dev_vol;
sprintf(name,"%s_%d_final.raw",argv[2],niter);
cx::write_raw(name, vol.data(), vol_size);
//sprintf(name,"Kbug%3.3d.raw",niter);
//K = dev_K;
//cx::write_raw(name, K.data(), lor_size);
//M = dev_M;
//sprintf(name,"Mbug%3.3d.raw",niter);
//cx::write_raw(name, M.data(), vol_size);
return 0;
}
|
36a949a05736bb5c3fcbf91f90c88772739efbb1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2018-2021 Kyle Berney
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../src/binary-search.cuh"
int main(int argc, char *argv[]) {
if (argc != 3) {
fprintf(stderr, "Usage: %s <number of queries> <number of CPU threads>\n", argv[0]);
exit(1);
}
uint64_t q = atol(argv[1]);
if (q <= 0) {
fprintf(stderr, "Number of queries must be a positive integer\n");
}
int p = atoi(argv[2]);
omp_set_num_threads(p);
double time[ITERS];
uint64_t n[13] = {
4194303,
8388607,
10000000,
16777215,
33554431,
67108863,
100000000,
134217727,
268435455,
536870911,
1000000000,
1073741823,
2147483647
};
for (uint32_t i = 0; i < 13; ++i) {
uint32_t *A = (uint32_t *)malloc(n[i] * sizeof(uint32_t));
uint32_t *dev_A;
hipMalloc(&dev_A, n[i] * sizeof(uint32_t));
initSortedList<uint32_t>(A, n[i]);
hipMemcpy(dev_A, A, n[i] * sizeof(uint32_t), hipMemcpyHostToDevice);
//Querying
for (uint32_t j = 0; j < ITERS; ++j) {
time[j] = timeQuery<uint32_t>(A, dev_A, n[i], q);
}
printQueryTimings(n[i], q, time);
free(A);
hipFree(dev_A);
}
return 0;
} | 36a949a05736bb5c3fcbf91f90c88772739efbb1.cu | /*
* Copyright 2018-2021 Kyle Berney
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../src/binary-search.cuh"
int main(int argc, char *argv[]) {
if (argc != 3) {
fprintf(stderr, "Usage: %s <number of queries> <number of CPU threads>\n", argv[0]);
exit(1);
}
uint64_t q = atol(argv[1]);
if (q <= 0) {
fprintf(stderr, "Number of queries must be a positive integer\n");
}
int p = atoi(argv[2]);
omp_set_num_threads(p);
double time[ITERS];
uint64_t n[13] = {
4194303,
8388607,
10000000,
16777215,
33554431,
67108863,
100000000,
134217727,
268435455,
536870911,
1000000000,
1073741823,
2147483647
};
for (uint32_t i = 0; i < 13; ++i) {
uint32_t *A = (uint32_t *)malloc(n[i] * sizeof(uint32_t));
uint32_t *dev_A;
cudaMalloc(&dev_A, n[i] * sizeof(uint32_t));
initSortedList<uint32_t>(A, n[i]);
cudaMemcpy(dev_A, A, n[i] * sizeof(uint32_t), cudaMemcpyHostToDevice);
//Querying
for (uint32_t j = 0; j < ITERS; ++j) {
time[j] = timeQuery<uint32_t>(A, dev_A, n[i], q);
}
printQueryTimings(n[i], q, time);
free(A);
cudaFree(dev_A);
}
return 0;
} |
705525f98f4b8adf3141ac84c3ffd227981607b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - FT
This benchmark is an OpenMP C version of the NPB FT code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to [email protected]
Information on OpenMP activities at RWCP is available at:
http:pdplab.trc.rwcp.or.jppdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http:www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
Authors: D. Bailey
W. Saphir
OpenMP C version: S. Satoh
--------------------------------------------------------------------
*/
#include "npb-C.h"
/* global variables */
#include "global.h"
#ifndef __O2G_HEADER__
#define __O2G_HEADER__
/******************************************/
/* Added codes for OpenMP2GPU translation */
/******************************************/
#include <cutil.h>
#include <math.h>
#define MAX(a,b) (((a) > (b)) ? (a) : (b))
static int gpuNumThreads = BLOCK_SIZE;
static int gpuNumBlocks;
static int gpuNumBlocks1;
static int gpuNumBlocks2;
static int totalNumThreads;
unsigned int gpuGmemSize = 0;
unsigned int gpuSmemSize = 0;
static unsigned int gpuBytes = 0;
#endif
/* End of __O2G_HEADER__ */
int * gpu__fftblock;
int * gpu__fftblockpad;
double * gpu__u_imag;
double * gpu__u_real;
double * gpu__u1_imag__main;
double * gpu__u1_real__main;
double * gpu__u0_imag__main;
double * gpu__u0_real__main;
double * gpu__u2_imag__main;
double * gpu__u2_real__main;
int * gpu__xend;
int * gpu__xstart;
int * gpu__yend;
int * gpu__ystart;
int * gpu__zend;
int * gpu__zstart;
int * gpu__dims;
size_t pitch__dims;
int * gpu__indexmap__main;
double * gpu__ex;
static double tmp__compute_initial_conditions[(((512*2)*512)+1)];
static double yy0_real[512][18];
static double yy0_imag[512][18];
static double yy1_real[512][18];
static double yy1_imag[512][18];
#pragma omp threadprivate(yy0_real)
#pragma omp threadprivate(yy0_imag)
#pragma omp threadprivate(yy1_real)
#pragma omp threadprivate(yy1_imag)
/* function declarations */
static void evolve_cloned0(double u0_real[256][256][512], double u0_imag[256][256][512], double u1_real[256][256][512], double u1_imag[256][256][512], int t, int indexmap[256][256][512], int d[3]);
static void compute_initial_conditions(double u0_real[256][256][512], double u0_imag[256][256][512], int d[3]);
static void ipow46(double a, int exponent, double * result);
static void setup(void );
static void compute_indexmap(int indexmap[256][256][512], int d[3]);
static void compute_indexmap_clnd1(int indexmap[256][256][512], int d[3]);
static void print_timers(void );
static void fft(int dir, double x1_real[256][256][512], double x1_imag[256][256][512], double x2_real[256][256][512], double x2_imag[256][256][512]);
static void fft_clnd2_cloned0(int dir, double x1_real[256][256][512], double x1_imag[256][256][512], double x2_real[256][256][512], double x2_imag[256][256][512]);
static void fft_clnd1(int dir, double x1_real[256][256][512], double x1_imag[256][256][512], double x2_real[256][256][512], double x2_imag[256][256][512]);
static void cffts1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts1_clnd5(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts1_clnd4(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts1_clnd3_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts1_clnd2_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts1_clnd1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts2(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts2_clnd5(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts2_clnd4(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts2_clnd3_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts2_clnd2_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts2_clnd1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts3(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts3_clnd5(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts3_clnd4(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts3_clnd3_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts3_clnd2_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts3_clnd1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void fft_init(int n);
__device__ static void dev_cfftz(int is, int m, int n, double x_real[][512][18], double x_imag[][512][18], double y_real[][512][18], double y_imag[][512][18], int * fftblock, int * fftblockpad, double u_imag[512], double u_real[512], int _gtid);
__device__ static void dev_fftz2(int is, int l, int m, int n, int ny, int ny1, double u_real[512], double u_imag[512], double x_real[][512][18], double x_imag[][512][18], double y_real[][512][18], double y_imag[][512][18], int _gtid);
static int ilog2(int n);
static void checksum(int i, double u1_real[256][256][512], double u1_imag[256][256][512], int d[3]);
static void verify(int d1, int d2, int d3, int nt, int * verified, char * cclass);
/* */
/* E L A P S E D _ T I M E */
/* */
double elapsed_time(void )
{
double t;
wtime(( & t));
return t;
}
double start[64];
double elapsed[64];
/* */
/* T I M E R _ C L E A R */
/* */
void timer_clear(int n)
{
elapsed[n]=0.0;
return ;
}
/* */
/* T I M E R _ S T A R T */
/* */
void timer_start(int n)
{
start[n]=elapsed_time();
return ;
}
/* */
/* T I M E R _ S T O P */
/* */
void timer_stop(int n)
{
double t;
double now;
now=elapsed_time();
t=(now-start[n]);
elapsed[n]+=t;
return ;
}
/* */
/* T I M E R _ R E A D */
/* */
double timer_read(int n)
{
double _ret_val_0;
_ret_val_0=elapsed[n];
return _ret_val_0;
}
static void c_print_results(char * name, char cccccclass, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char * optype, int passed_verification, char * npbversion, char * compiletime, char * cc, char * clink, char * c_lib, char * c_inc, char * cflags, char * clinkflags, char * rand)
{
printf("\n\n %s Benchmark Completed\n", name);
printf(" Class = %c\n", cccccclass);
/* as in IS */
if (((n2==0)&&(n3==0)))
{
printf(" Size = %12d\n", n1);
}
else
{
printf(" Size = %3dx%3dx%3d\n", n1, n2, n3);
}
printf(" Iterations = %12d\n", niter);
printf(" Threads = %12d\n", nthreads);
printf(" Time in seconds = %12.2f\n", t);
printf(" Mop/s total = %12.2f\n", mops);
printf(" Operation type = %24s\n", optype);
if (passed_verification)
{
printf(" Verification = SUCCESSFUL\n");
}
else
{
printf(" Verification = UNSUCCESSFUL\n");
}
printf(" Version = %12s\n", npbversion);
printf(" Compile date = %12s\n", compiletime);
printf("\n Compile options:\n");
printf(" CC = %s\n", cc);
printf(" CLINK = %s\n", clink);
printf(" C_LIB = %s\n", c_lib);
printf(" C_INC = %s\n", c_inc);
printf(" CFLAGS = %s\n", cflags);
printf(" CLINKFLAGS = %s\n", clinkflags);
printf(" RAND = %s\n", rand);
/*
printf( "\n\n" );
printf( " Please send the results of this run to:\n\n" );
printf( " NPB Development Team\n" );
printf( " Internet: [email protected]\n \n" );
printf( " If email is not available, send this to:\n\n" );
printf( " MS T27A-1\n" );
printf( " NASA Ames Research Center\n" );
printf( " Moffett Field, CA 94035-1000\n\n" );
printf( " Fax: 415-604-3957\n\n" );
*/
return ;
}
/*
c---------------------------------------------------------------------
c---------------------------------------------------------------------
*/
double randlc(double * x, double a)
{
/*
c---------------------------------------------------------------------
c---------------------------------------------------------------------
*/
/*
c---------------------------------------------------------------------
c
c This routine returns a uniform pseudorandom double precision number in the
c range (0, 1) by using the linear congruential generator
c
c x_{k+1} = a x_k (mod 2^46)
c
c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
c before repeating. The argument A is the same as 'a' in the above formula,
c and X is the same as x_0. A and X must be odd double precision integers
c in the range (1, 2^46). The returned value RANDLC is normalized to be
c between 0 and 1, i.e. RANDLC = 2^(-46) x_1. X is updated to contain
c the new seed x_1, so that subsequent calls to RANDLC using the same
c arguments will generate a continuous sequence.
c
c This routine should produce the same results on any computer with at least
c 48 mantissa bits in double precision floating point data. On 64 bit
c systems, double precision should be disabled.
c
c David H. Bailey October 26, 1990
c
c---------------------------------------------------------------------
*/
double t1;
double t2;
double t3;
double t4;
double a1;
double a2;
double x1;
double x2;
double z;
/*
c---------------------------------------------------------------------
c Break A into two parts such that A = 2^23 A1 + A2.
c---------------------------------------------------------------------
*/
double _ret_val_0;
t1=(((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*a);
a1=((int)t1);
a2=(a-(((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*a1));
/*
c---------------------------------------------------------------------
c Break X into two parts such that X = 2^23 X1 + X2, compute
c Z = A1 * X2 + A2 * X1 (mod 2^23), and then
c X = 2^23 * Z + A2 * X2 (mod 2^46).
c---------------------------------------------------------------------
*/
t1=(((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*( * x));
x1=((int)t1);
x2=(( * x)-(((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*x1));
t1=((a1*x2)+(a2*x1));
t2=((int)(((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*t1));
z=(t1-(((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*t2));
t3=((((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*z)+(a2*x2));
t4=((int)((((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5))*t3));
( * x)=(t3-((((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0))*t4));
_ret_val_0=((((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5))*( * x));
return _ret_val_0;
}
/*
c---------------------------------------------------------------------
c---------------------------------------------------------------------
*/
void vranlc(int n, double * x_seed, double a, double y[])
{
/*
c---------------------------------------------------------------------
c---------------------------------------------------------------------
*/
/*
c---------------------------------------------------------------------
c
c This routine generates N uniform pseudorandom double precision numbers in
c the range (0, 1) by using the linear congruential generator
c
c x_{k+1} = a x_k (mod 2^46)
c
c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
c before repeating. The argument A is the same as 'a' in the above formula,
c and X is the same as x_0. A and X must be odd double precision integers
c in the range (1, 2^46). The N results are placed in Y and are normalized
c to be between 0 and 1. X is updated to contain the new seed, so that
c subsequent calls to VRANLC using the same arguments will generate a
c continuous sequence. If N is zero, only initialization is performed, and
c the variables X, A and Y are ignored.
c
c This routine is the standard version designed for scalar or RISC systems.
c However, it should produce the same results on any single processor
c computer with at least 48 mantissa bits in double precision floating point
c data. On 64 bit systems, double precision should be disabled.
c
c---------------------------------------------------------------------
*/
int i;
double x;
double t1;
double t2;
double t3;
double t4;
double a1;
double a2;
double x1;
double x2;
double z;
/*
c---------------------------------------------------------------------
c Break A into two parts such that A = 2^23 A1 + A2.
c---------------------------------------------------------------------
*/
t1=(((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*a);
a1=((int)t1);
a2=(a-(((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*a1));
x=( * x_seed);
/*
c---------------------------------------------------------------------
c Generate N results. This loop is not vectorizable.
c---------------------------------------------------------------------
*/
#pragma loop name vranlc#0
for (i=1; i<=n; i ++ )
{
/*
c---------------------------------------------------------------------
c Break X into two parts such that X = 2^23 X1 + X2, compute
c Z = A1 * X2 + A2 * X1 (mod 2^23), and then
c X = 2^23 * Z + A2 * X2 (mod 2^46).
c---------------------------------------------------------------------
*/
t1=(((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*x);
x1=((int)t1);
x2=(x-(((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*x1));
t1=((a1*x2)+(a2*x1));
t2=((int)(((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*t1));
z=(t1-(((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*t2));
t3=((((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*z)+(a2*x2));
t4=((int)((((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5))*t3));
x=(t3-((((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0))*t4));
y[i]=((((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5))*x);
}
( * x_seed)=x;
return ;
}
int main(int argc, char * * argv)
{
/*
c-------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int i;
/*
------------------------------------------------------------------
c u0, u1, u2 are the main arrays in the problem.
c Depending on the decomposition, these arrays will have different
c dimensions. To accomodate all possibilities, we allocate them as
c one-dimensional arrays and pass them to subroutines for different
c views
c - u0 contains the initial (transformed) initial condition
c - u1 and u2 are working arrays
c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the
c time evolution operator.
c-----------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c Large arrays are in common so that they are allocated on the
c heap rather than the stack. This common block is not
c referenced directly anywhere else. Padding is to avoid accidental
c cache problems, since all array sizes are powers of two.
c-------------------------------------------------------------------
*/
static double u0_real[256][256][512];
static double u0_imag[256][256][512];
static double u1_real[256][256][512];
static double u1_imag[256][256][512];
static double u2_real[256][256][512];
static double u2_imag[256][256][512];
static int indexmap[256][256][512];
int iter;
int nthreads = 1;
double total_time;
double mflops;
int verified;
char cclass;
/*
--------------------------------------------------------------------
c Run the entire problem once to make sure all data is touched.
c This reduces variable startup costs, which is important for such a
c short benchmark. The other NPB 2 implementations are similar.
c-------------------------------------------------------------------
*/
int _ret_val_0;
////////////////////////////////
// CUDA Device Initialization //
////////////////////////////////
int deviceCount;
CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "cutil error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
hipDeviceProp_t deviceProp;
CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&deviceProp, dev));
fprintf(stderr, "Using device %d: %s\n", dev, deviceProp.name);
CUDA_SAFE_CALL(hipSetDevice(dev));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__fftblock)), gpuBytes));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__fftblockpad)), gpuBytes));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__u_imag)), gpuBytes));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__u_real)), gpuBytes));
gpuBytes=(((256*256)*512)*sizeof (double));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__u1_imag__main)), gpuBytes));
gpuBytes=(((256*256)*512)*sizeof (double));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__u1_real__main)), gpuBytes));
gpuBytes=(((256*256)*512)*sizeof (double));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__u0_imag__main)), gpuBytes));
gpuBytes=(((256*256)*512)*sizeof (double));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__u0_real__main)), gpuBytes));
gpuBytes=(((256*256)*512)*sizeof (double));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__u2_imag__main)), gpuBytes));
gpuBytes=(((256*256)*512)*sizeof (double));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__u2_real__main)), gpuBytes));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__xend)), gpuBytes));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__xstart)), gpuBytes));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yend)), gpuBytes));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__ystart)), gpuBytes));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__zend)), gpuBytes));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__zstart)), gpuBytes));
CUDA_SAFE_CALL(hipMallocPitch(((void * * )( & gpu__dims)), ( & pitch__dims), (3*sizeof (int)), 3));
gpuBytes=(((256*256)*512)*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__indexmap__main)), gpuBytes));
gpuBytes=(((20*((((512*512)/4)+((256*256)/4))+((256*256)/4)))+1)*sizeof (double));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__ex)), gpuBytes));
#pragma loop name main#0
for (i=0; i<7; i ++ )
{
timer_clear(i);
}
setup();
/* #pragma omp parallel */
{
compute_indexmap(indexmap, dims[2]);
/* #pragma omp single */
{
compute_initial_conditions(u1_real, u1_imag, dims[0]);
fft_init(dims[0][0]);
}
fft(1, u1_real, u1_imag, u0_real, u0_imag);
}
/* end parallel */
/*
--------------------------------------------------------------------
c Start over from the beginning. Note that all operations must
c be timed, in contrast to other benchmarks.
c-------------------------------------------------------------------
*/
#pragma loop name main#1
for (i=0; i<7; i ++ )
{
timer_clear(i);
}
timer_start(0);
if ((0==1))
{
timer_start(1);
}
/* #pragma omp parallel private(iter) firstprivate(niter) */
{
compute_indexmap_clnd1(indexmap, dims[2]);
/* #pragma omp single */
{
compute_initial_conditions(u1_real, u1_imag, dims[0]);
fft_init(dims[0][0]);
}
if ((0==1))
{
/* #pragma omp master */
timer_stop(1);
}
if ((0==1))
{
/* #pragma omp master */
timer_start(2);
}
fft_clnd1(1, u1_real, u1_imag, u0_real, u0_imag);
if ((0==1))
{
/* #pragma omp master */
timer_stop(2);
}
#pragma loop name main#2
for (iter=1; iter<=niter; iter ++ )
{
if ((0==1))
{
/* #pragma omp master */
timer_start(3);
}
evolve_cloned0(u0_real, u0_imag, u1_real, u1_imag, iter, indexmap, dims[0]);
if ((0==1))
{
/* #pragma omp master */
timer_stop(3);
}
if ((0==1))
{
/* #pragma omp master */
timer_start(2);
}
fft_clnd2_cloned0(( - 1), u1_real, u1_imag, u2_real, u2_imag);
if ((0==1))
{
/* #pragma omp master */
timer_stop(2);
}
if ((0==1))
{
/* #pragma omp master */
timer_start(4);
}
checksum(iter, u2_real, u2_imag, dims[0]);
if ((0==1))
{
/* #pragma omp master */
timer_stop(4);
}
}
/* #pragma omp single */
verify(512, 256, 256, niter, ( & verified), ( & cclass));
}
/* end parallel */
timer_stop(0);
total_time=timer_read(0);
if ((total_time!=0.0))
{
mflops=(((1.0E-6*((double)33554432))*((14.8157+(7.19641*log(((double)33554432))))+((5.23518+(7.21113*log(((double)33554432))))*niter)))/total_time);
}
else
{
mflops=0.0;
}
c_print_results("FT", cclass, 512, 256, 256, niter, nthreads, total_time, mflops, " floating point", verified, "2.3", "20 Feb 2012", "gcc", "gcc", "-lm", "-I../common", "-O3 ", "(none)", "randdp");
if ((0==1))
{
print_timers();
}
printf("/***********************/ \n/* Input Configuration */ \n/***********************/ \n");
printf("====> GPU Block Size: 1024 \n");
printf("/**********************/ \n/* Used Optimizations */ \n/**********************/ \n");
printf("====> MallocPitch Opt is used.\n");
printf("====> MatrixTranspose Opt is used.\n");
printf("====> ParallelLoopSwap Opt is used.\n");
printf("====> LoopCollapse Opt is used.\n");
printf("====> Unrolling-on-reduction Opt is used.\n");
printf("====> Allocate GPU variables as global ones.\n");
printf("====> Optimize globally allocated GPU variables .\n");
printf("====> CPU-GPU Mem Transfer Opt Level: 4\n");
printf("====> Cuda Malloc Opt Level: 1\n");
printf("====> Assume that all loops have non-zero iterations.\n");
printf("====> Cache shared scalar variables onto GPU registers.\n");
printf("====> Cache shared array elements onto GPU registers.\n");
printf("====> Cache private array variables onto GPU shared memory.\n");
printf("====> local array reduction variable configuration = 1\n");
CUDA_SAFE_CALL(hipFree(gpu__fftblock));
CUDA_SAFE_CALL(hipFree(gpu__fftblockpad));
CUDA_SAFE_CALL(hipFree(gpu__u_imag));
CUDA_SAFE_CALL(hipFree(gpu__u_real));
CUDA_SAFE_CALL(hipFree(gpu__u1_imag__main));
CUDA_SAFE_CALL(hipFree(gpu__u1_real__main));
CUDA_SAFE_CALL(hipFree(gpu__u0_imag__main));
CUDA_SAFE_CALL(hipFree(gpu__u0_real__main));
CUDA_SAFE_CALL(hipFree(gpu__u2_imag__main));
CUDA_SAFE_CALL(hipFree(gpu__u2_real__main));
CUDA_SAFE_CALL(hipFree(gpu__xend));
CUDA_SAFE_CALL(hipFree(gpu__xstart));
CUDA_SAFE_CALL(hipFree(gpu__yend));
CUDA_SAFE_CALL(hipFree(gpu__ystart));
CUDA_SAFE_CALL(hipFree(gpu__zend));
CUDA_SAFE_CALL(hipFree(gpu__zstart));
CUDA_SAFE_CALL(hipFree(gpu__dims));
CUDA_SAFE_CALL(hipFree(gpu__indexmap__main));
CUDA_SAFE_CALL(hipFree(gpu__ex));
fflush(stdout);
fflush(stderr);
return _ret_val_0;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__global__ void evolve_cloned0_kernel0(int * d, double * ex, int indexmap[256][256][512], int * t, double u0_imag[256][256][512], double u0_real[256][256][512], double u1_imag[256][256][512], double u1_real[256][256][512])
{
double ex_0;
int t_0;
int i;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
i=_gtid;
t_0=( * t);
if (i<d[0])
{
#pragma loop name evolve#0#0
for (j=0; j<d[1]; j ++ )
{
#pragma loop name evolve#0#0#0
for (k=0; k<d[2]; k ++ )
{
ex_0=ex[(t_0*indexmap[k][j][i])];
u1_real[k][j][i]=(u0_real[k][j][i]*ex_0);
u1_imag[k][j][i]=(u0_imag[k][j][i]*ex_0);
}
}
}
}
static void evolve_cloned0(double u0_real[256][256][512], double u0_imag[256][256][512], double u1_real[256][256][512], double u1_imag[256][256][512], int t, int indexmap[256][256][512], int d[3])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c evolve u0 -> u1 (t time steps) in fourier space
c-------------------------------------------------------------------
*/
int * gpu__d;
int * gpu__t;
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[0])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=(((20*((((512*512)/4)+((256*256)/4))+((256*256)/4)))+1)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(gpu__ex, ex, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__t)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__t, ( & t), gpuBytes, hipMemcpyHostToDevice));
#pragma omp parallel for shared(d, ex, indexmap, t, u0_imag, u0_real, u1_imag, u1_real) private(i, j, k)
#pragma cuda gpurun noc2gmemtr(indexmap, u0_imag, u0_real, u1_imag, u1_real)
#pragma cuda gpurun nocudamalloc(indexmap, u0_imag, u0_real, u1_imag, u1_real)
#pragma cuda gpurun nocudafree(ex, indexmap, u0_imag, u0_real, u1_imag, u1_real)
#pragma cuda gpurun multisrccg(ex)
#pragma cuda gpurun nog2cmemtr(d, ex, indexmap, t, u0_imag, u0_real, u1_imag, u1_real)
#pragma cuda ainfo kernelid(0) procname(evolve_cloned0)
#pragma cuda gpurun registerRO(ex[(t*indexmap[k][j][i])], t)
#pragma cuda gpurun cudafree(d, t)
hipLaunchKernelGGL(( evolve_cloned0_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__ex, ((int (*)[256][512])gpu__indexmap__main), gpu__t, ((double (*)[256][512])gpu__u0_imag__main), ((double (*)[256][512])gpu__u0_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__t));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static void compute_initial_conditions(double u0_real[256][256][512], double u0_imag[256][256][512], int d[3])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c Fill in array u0 with initial conditions from
c random number generator
c-------------------------------------------------------------------
*/
int k;
double x0;
double start;
double an;
double dummy;
int i;
int j;
int t;
start=3.14159265E8;
/*
--------------------------------------------------------------------
c Jump to the starting element for our first plane.
c-------------------------------------------------------------------
*/
ipow46(1.220703125E9, (((((zstart[0]-1)*2)*512)*256)+(((ystart[0]-1)*2)*512)), ( & an));
dummy=randlc(( & start), an);
ipow46(1.220703125E9, ((2*512)*256), ( & an));
/*
--------------------------------------------------------------------
c Go through by z planes filling in one square at a time.
c-------------------------------------------------------------------
*/
#pragma loop name compute_initial_conditions#0
for (k=0; k<dims[0][2]; k ++ )
{
x0=start;
vranlc(((2*512)*dims[0][1]), ( & x0), 1.220703125E9, tmp__compute_initial_conditions);
t=1;
#pragma loop name compute_initial_conditions#0#0
for (j=0; j<dims[0][1]; j ++ )
{
#pragma loop name compute_initial_conditions#0#0#0
for (i=0; i<512; i ++ )
{
u0_real[k][j][i]=tmp__compute_initial_conditions[(t ++ )];
u0_imag[k][j][i]=tmp__compute_initial_conditions[(t ++ )];
}
}
if ((k!=dims[0][2]))
{
dummy=randlc(( & start), an);
}
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static void ipow46(double a, int exponent, double * result)
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c compute a^exponent mod 2^46
c-------------------------------------------------------------------
*/
double dummy;
double q;
double r;
int n;
int n2;
/*
--------------------------------------------------------------------
c Use
c a^n = a^(n2)*a^(n/2) if n even else
c a^n = a*a^(n-1) if n odd
c-------------------------------------------------------------------
*/
( * result)=1;
if ((exponent==0))
{
return ;
}
q=a;
r=1;
n=exponent;
while (n>1)
{
n2=(n/2);
if (((n2*2)==n))
{
dummy=randlc(( & q), q);
n=n2;
}
else
{
dummy=randlc(( & r), q);
n=(n-1);
}
}
dummy=randlc(( & r), q);
( * result)=r;
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static void setup(void )
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int i;
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"" - FT Benchmark\n\n");
niter=20;
printf(" Size : %3dx%3dx%3d\n", 512, 256, 256);
printf(" Iterations : %7d\n", niter);
/*
1004 format(' Number of processes : ', i7)
1005 format(' Processor array : ', i3, 'x', i3)
1006 format(' WARNING: compiled for ', i5, ' processes. ',
> ' Will not verify. ')
*/
#pragma loop name setup#0
for (i=0; i<3; i ++ )
{
dims[i][0]=512;
dims[i][1]=256;
dims[i][2]=256;
}
#pragma loop name setup#1
for (i=0; i<3; i ++ )
{
xstart[i]=1;
xend[i]=512;
ystart[i]=1;
yend[i]=256;
zstart[i]=1;
zend[i]=256;
}
/*
--------------------------------------------------------------------
c Set up info for blocking of ffts and transposes. This improves
c performance on cache-based systems. Blocking involves
c working on a chunk of the problem at a time, taking chunks
c along the first, second, or third dimension.
c
c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim)
c - In cffts23 blocking is on 1st dimension (with fft on 2nd and 3rd dims)
c Since 1st dim is always in processor, we'll assume it's long enough
c (default blocking factor is 16 so min size for 1st dim is 16)
c The only case we have to worry about is cffts1 in a 2d decomposition.
c so the blocking factor should not be larger than the 2nd dimension.
c-------------------------------------------------------------------
*/
fftblock=16;
fftblockpad=18;
if ((fftblock!=16))
{
fftblockpad=(fftblock+3);
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__global__ void compute_indexmap_kernel0(int * dims, size_t pitch__dims, int indexmap[256][256][512], int * xstart_i, int * ystart_i, int * zstart_i)
{
int i;
int ii;
int ii2;
int ij2;
int j;
int jj;
int k;
int kk;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
i=_gtid;
if (i<( * (((int * )(((char * )dims)+(2*pitch__dims)))+0)))
{
#pragma loop name compute_indexmap#0#0
for (j=0; j<( * (((int * )(((char * )dims)+(2*pitch__dims)))+1)); j ++ )
{
#pragma loop name compute_indexmap#0#0#0
for (k=0; k<( * (((int * )(((char * )dims)+(2*pitch__dims)))+2)); k ++ )
{
ii=((((((i+1)+( * xstart_i))-2)+(512/2))%512)-(512/2));
ii2=(ii*ii);
jj=((((((j+1)+( * ystart_i))-2)+(256/2))%256)-(256/2));
ij2=((jj*jj)+ii2);
kk=((((((k+1)+( * zstart_i))-2)+(256/2))%256)-(256/2));
indexmap[k][j][i]=((kk*kk)+ij2);
}
}
}
}
static void compute_indexmap(int indexmap[256][256][512], int d[3])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2
c for time evolution exponent.
c-------------------------------------------------------------------
*/
int i;
double ap;
int xstart_i;
int ystart_i;
int zstart_i;
/*
--------------------------------------------------------------------
c basically we want to convert the fortran indices
c 1 2 3 4 5 6 7 8
c to
c 0 1 2 3 -4 -3 -2 -1
c The following magic formula does the trick:
c mod(i-1+n2, n) - n/2
c-------------------------------------------------------------------
*/
int * gpu__xstart_i;
int * gpu__ystart_i;
int * gpu__zstart_i;
xstart_i=xstart[2];
ystart_i=ystart[2];
zstart_i=zstart[2];
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)dims[2][0])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
CUDA_SAFE_CALL(hipMemcpy2D(gpu__dims, pitch__dims, dims, (3*sizeof (int)), (3*sizeof (int)), 3, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__xstart_i)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__xstart_i, ( & xstart_i), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__ystart_i)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__ystart_i, ( & ystart_i), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__zstart_i)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__zstart_i, ( & zstart_i), gpuBytes, hipMemcpyHostToDevice));
#pragma omp parallel for shared(dims, indexmap, xstart_i, ystart_i, zstart_i) private(i, ii, ii2, ij2, j, jj, k, kk) schedule(static)
#pragma cuda gpurun nocudafree(dims, indexmap)
#pragma cuda gpurun nog2cmemtr(dims, indexmap, xstart_i, ystart_i, zstart_i)
#pragma cuda ainfo kernelid(0) procname(compute_indexmap)
#pragma cuda gpurun cudafree(xstart_i, ystart_i, zstart_i)
#pragma cuda gpurun noc2gmemtr(indexmap)
hipLaunchKernelGGL(( compute_indexmap_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__dims, pitch__dims, ((int (*)[256][512])gpu__indexmap__main), gpu__xstart_i, gpu__ystart_i, gpu__zstart_i);
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__zstart_i));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__ystart_i));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__xstart_i));
/*
--------------------------------------------------------------------
c compute array of exponentials for time evolution.
c-------------------------------------------------------------------
*/
/* #pragma omp single */
{
ap=(((( - 4.0)*1.0E-6)*3.141592653589793)*3.141592653589793);
ex[0]=1.0;
ex[1]=exp(ap);
#pragma loop name compute_indexmap#1
for (i=2; i<=(20*((((512*512)/4)+((256*256)/4))+((256*256)/4))); i ++ )
{
ex[i]=(ex[(i-1)]*ex[1]);
}
}
/* end single */
return ;
}
__global__ void compute_indexmap_clnd1_kernel0(int * dims, size_t pitch__dims, int indexmap[256][256][512], int * xstart_i, int * ystart_i, int * zstart_i)
{
int i;
int ii;
int ii2;
int ij2;
int j;
int jj;
int k;
int kk;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
i=_gtid;
if (i<( * (((int * )(((char * )dims)+(2*pitch__dims)))+0)))
{
#pragma loop name compute_indexmap#0#0
for (j=0; j<( * (((int * )(((char * )dims)+(2*pitch__dims)))+1)); j ++ )
{
#pragma loop name compute_indexmap#0#0#0
for (k=0; k<( * (((int * )(((char * )dims)+(2*pitch__dims)))+2)); k ++ )
{
ii=((((((i+1)+( * xstart_i))-2)+(512/2))%512)-(512/2));
ii2=(ii*ii);
jj=((((((j+1)+( * ystart_i))-2)+(256/2))%256)-(256/2));
ij2=((jj*jj)+ii2);
kk=((((((k+1)+( * zstart_i))-2)+(256/2))%256)-(256/2));
indexmap[k][j][i]=((kk*kk)+ij2);
}
}
}
}
static void compute_indexmap_clnd1(int indexmap[256][256][512], int d[3])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2
c for time evolution exponent.
c-------------------------------------------------------------------
*/
int i;
double ap;
int xstart_i;
int ystart_i;
int zstart_i;
/*
--------------------------------------------------------------------
c basically we want to convert the fortran indices
c 1 2 3 4 5 6 7 8
c to
c 0 1 2 3 -4 -3 -2 -1
c The following magic formula does the trick:
c mod(i-1+n2, n) - n/2
c-------------------------------------------------------------------
*/
int * gpu__xstart_i;
int * gpu__ystart_i;
int * gpu__zstart_i;
xstart_i=xstart[2];
ystart_i=ystart[2];
zstart_i=zstart[2];
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__xstart_i)), gpuBytes));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)dims[2][0])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
CUDA_SAFE_CALL(hipMemcpy(gpu__xstart_i, ( & xstart_i), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__ystart_i)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__ystart_i, ( & ystart_i), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__zstart_i)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__zstart_i, ( & zstart_i), gpuBytes, hipMemcpyHostToDevice));
#pragma omp parallel for shared(dims, indexmap, xstart_i, ystart_i, zstart_i) private(i, ii, ii2, ij2, j, jj, k, kk) schedule(static)
#pragma cuda gpurun noc2gmemtr(dims, indexmap)
#pragma cuda gpurun nocudamalloc(dims, indexmap)
#pragma cuda gpurun nocudafree(dims, indexmap)
#pragma cuda gpurun nog2cmemtr(dims, indexmap, xstart_i, ystart_i, zstart_i)
#pragma cuda ainfo kernelid(0) procname(compute_indexmap_clnd1)
#pragma cuda gpurun cudafree(xstart_i, ystart_i, zstart_i)
hipLaunchKernelGGL(( compute_indexmap_clnd1_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__dims, pitch__dims, ((int (*)[256][512])gpu__indexmap__main), gpu__xstart_i, gpu__ystart_i, gpu__zstart_i);
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__zstart_i));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__ystart_i));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__xstart_i));
/*
--------------------------------------------------------------------
c compute array of exponentials for time evolution.
c-------------------------------------------------------------------
*/
/* #pragma omp single */
{
ap=(((( - 4.0)*1.0E-6)*3.141592653589793)*3.141592653589793);
ex[0]=1.0;
ex[1]=exp(ap);
#pragma loop name compute_indexmap#1
for (i=2; i<=(20*((((512*512)/4)+((256*256)/4))+((256*256)/4))); i ++ )
{
ex[i]=(ex[(i-1)]*ex[1]);
}
}
/* end single */
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static void print_timers(void )
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int i;
char * tstrings[] = { " total ", " setup ", " fft ", " evolve ", " checksum ", " fftlow ", " fftcopy " } ;
#pragma loop name print_timers#0
for (i=0; i<7; i ++ )
{
if ((timer_read(i)!=0.0))
{
printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i));
}
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static void fft(int dir, double x1_real[256][256][512], double x1_imag[256][256][512], double x2_real[256][256][512], double x2_imag[256][256][512])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/* dcomplex y0[NX][FFTBLOCKPAD]; */
/* dcomplex y0[NX][FFTBLOCKPAD]; */
/* dcomplex y1[NX][FFTBLOCKPAD]; */
/*
--------------------------------------------------------------------
c note: args x1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xinxout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------
*/
if ((dir==1))
{
/* cffts1(1, dims[0], x1, x1, y0, y1); x1 -> x1 */
cffts1(1, dims[0], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts2(1, dims[1], x1, x1, y0, y1); x1 -> x1 */
cffts2(1, dims[1], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts3(1, dims[2], x1, x2, y0, y1); x1 -> x2 */
cffts3(1, dims[2], x1_real, x1_imag, x2_real, x2_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
}
else
{
/* cffts3(-1, dims[2], x1, x1, y0, y1); x1 -> x1 */
cffts3_clnd1(( - 1), dims[2], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts2(-1, dims[1], x1, x1, y0, y1); x1 -> x1 */
cffts2_clnd1(( - 1), dims[1], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts1(-1, dims[0], x1, x2, y0, y1); x1 -> x2 */
cffts1_clnd1(( - 1), dims[0], x1_real, x1_imag, x2_real, x2_imag, NULL, NULL, NULL, NULL);
/* x1 -> x2 */
}
return ;
}
static void fft_clnd2_cloned0(int dir, double x1_real[256][256][512], double x1_imag[256][256][512], double x2_real[256][256][512], double x2_imag[256][256][512])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/* dcomplex y0[NX][FFTBLOCKPAD]; */
/* dcomplex y0[NX][FFTBLOCKPAD]; */
/* dcomplex y1[NX][FFTBLOCKPAD]; */
/*
--------------------------------------------------------------------
c note: args x1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xinxout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------
*/
if ((dir==1))
{
/* cffts1(1, dims[0], x1, x1, y0, y1); x1 -> x1 */
cffts1_clnd2_cloned0(1, dims[0], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts2(1, dims[1], x1, x1, y0, y1); x1 -> x1 */
cffts2_clnd2_cloned0(1, dims[1], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts3(1, dims[2], x1, x2, y0, y1); x1 -> x2 */
cffts3_clnd2_cloned0(1, dims[2], x1_real, x1_imag, x2_real, x2_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
}
else
{
/* cffts3(-1, dims[2], x1, x1, y0, y1); x1 -> x1 */
cffts3_clnd3_cloned0(( - 1), dims[2], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts2(-1, dims[1], x1, x1, y0, y1); x1 -> x1 */
cffts2_clnd3_cloned0(( - 1), dims[1], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts1(-1, dims[0], x1, x2, y0, y1); x1 -> x2 */
cffts1_clnd3_cloned0(( - 1), dims[0], x1_real, x1_imag, x2_real, x2_imag, NULL, NULL, NULL, NULL);
/* x1 -> x2 */
}
return ;
}
static void fft_clnd1(int dir, double x1_real[256][256][512], double x1_imag[256][256][512], double x2_real[256][256][512], double x2_imag[256][256][512])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/* dcomplex y0[NX][FFTBLOCKPAD]; */
/* dcomplex y0[NX][FFTBLOCKPAD]; */
/* dcomplex y1[NX][FFTBLOCKPAD]; */
/*
--------------------------------------------------------------------
c note: args x1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xinxout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------
*/
if ((dir==1))
{
/* cffts1(1, dims[0], x1, x1, y0, y1); x1 -> x1 */
cffts1_clnd4(1, dims[0], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts2(1, dims[1], x1, x1, y0, y1); x1 -> x1 */
cffts2_clnd4(1, dims[1], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts3(1, dims[2], x1, x2, y0, y1); x1 -> x2 */
cffts3_clnd4(1, dims[2], x1_real, x1_imag, x2_real, x2_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
}
else
{
/* cffts3(-1, dims[2], x1, x1, y0, y1); x1 -> x1 */
cffts3_clnd5(( - 1), dims[2], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts2(-1, dims[1], x1, x1, y0, y1); x1 -> x1 */
cffts2_clnd5(( - 1), dims[1], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts1(-1, dims[0], x1, x2, y0, y1); x1 -> x2 */
cffts1_clnd5(( - 1), dims[0], x1_real, x1_imag, x2_real, x2_imag, NULL, NULL, NULL, NULL);
/* x1 -> x2 */
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__global__ void cffts1_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_0, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int j;
int jj;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[0];
#pragma loop name cffts1#1#0
for (jj=0; jj<=(d[1]-fftblock_0); jj+=fftblock_0)
{
#pragma loop name cffts1#1#0#0
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#0#0
for (i=0; i<d_0; i ++ )
{
yy0_real[_gtid][i][j]=x_real[k][(j+jj)][i];
yy0_imag[_gtid][i][j]=x_imag[k][(j+jj)][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_0), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts1#1#0#1
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#1#0
for (i=0; i<d_0; i ++ )
{
xout_real[k][(j+jj)][i]=yy0_real[_gtid][i][j];
xout_imag[k][(j+jj)][i]=yy0_imag[_gtid][i][j];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_0;
int * gpu__d;
int * gpu__is;
int * gpu__logd_0;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts1#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_0=logd[0];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMemcpy(gpu__fftblock, ( & fftblock), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMemcpy(gpu__fftblockpad, ( & fftblockpad), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_0)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_0, ( & logd_0), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(gpu__u_imag, u_imag, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(gpu__u_real, u_real, gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, j, jj, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts1)
#pragma cuda gpurun registerRO(d[0], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_0)
hipLaunchKernelGGL(( cffts1_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_0, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_0));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts1_clnd5_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_0, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int j;
int jj;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[0];
#pragma loop name cffts1#1#0
for (jj=0; jj<=(d[1]-fftblock_0); jj+=fftblock_0)
{
#pragma loop name cffts1#1#0#0
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#0#0
for (i=0; i<d_0; i ++ )
{
yy0_real[_gtid][i][j]=x_real[k][(j+jj)][i];
yy0_imag[_gtid][i][j]=x_imag[k][(j+jj)][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_0), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts1#1#0#1
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#1#0
for (i=0; i<d_0; i ++ )
{
xout_real[k][(j+jj)][i]=yy0_real[_gtid][i][j];
xout_imag[k][(j+jj)][i]=yy0_imag[_gtid][i][j];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts1_clnd5(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_0;
int * gpu__d;
int * gpu__is;
int * gpu__logd_0;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts1#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_0=logd[0];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_0)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_0, ( & logd_0), gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, j, jj, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts1_clnd5)
#pragma cuda gpurun registerRO(d[0], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_0)
hipLaunchKernelGGL(( cffts1_clnd5_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_0, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u0_imag__main), ((double (*)[256][512])gpu__u0_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_0));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts1_clnd4_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_0, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int j;
int jj;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[0];
#pragma loop name cffts1#1#0
for (jj=0; jj<=(d[1]-fftblock_0); jj+=fftblock_0)
{
#pragma loop name cffts1#1#0#0
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#0#0
for (i=0; i<d_0; i ++ )
{
yy0_real[_gtid][i][j]=x_real[k][(j+jj)][i];
yy0_imag[_gtid][i][j]=x_imag[k][(j+jj)][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_0), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts1#1#0#1
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#1#0
for (i=0; i<d_0; i ++ )
{
xout_real[k][(j+jj)][i]=yy0_real[_gtid][i][j];
xout_imag[k][(j+jj)][i]=yy0_imag[_gtid][i][j];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts1_clnd4(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_0;
int * gpu__d;
int * gpu__is;
int * gpu__logd_0;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts1#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_0=logd[0];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_0)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_0, ( & logd_0), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(gpu__u_imag, u_imag, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(gpu__u_real, u_real, gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, j, jj, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts1_clnd4)
#pragma cuda gpurun registerRO(d[0], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_0)
hipLaunchKernelGGL(( cffts1_clnd4_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_0, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_0));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts1_clnd3_cloned0_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_0, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int j;
int jj;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[0];
#pragma loop name cffts1#1#0
for (jj=0; jj<=(d[1]-fftblock_0); jj+=fftblock_0)
{
#pragma loop name cffts1#1#0#0
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#0#0
for (i=0; i<d_0; i ++ )
{
yy0_real[_gtid][i][j]=x_real[k][(j+jj)][i];
yy0_imag[_gtid][i][j]=x_imag[k][(j+jj)][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_0), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts1#1#0#1
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#1#0
for (i=0; i<d_0; i ++ )
{
xout_real[k][(j+jj)][i]=yy0_real[_gtid][i][j];
xout_imag[k][(j+jj)][i]=yy0_imag[_gtid][i][j];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts1_clnd3_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_0;
int * gpu__d;
int * gpu__is;
int * gpu__logd_0;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts1#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_0=logd[0];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_0)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_0, ( & logd_0), gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, j, jj, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun multisrccg(xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts1_clnd3_cloned0)
#pragma cuda gpurun registerRO(d[0], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_0)
hipLaunchKernelGGL(( cffts1_clnd3_cloned0_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_0, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u2_imag__main), ((double (*)[256][512])gpu__u2_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_0));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts1_clnd2_cloned0_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_0, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int j;
int jj;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[0];
#pragma loop name cffts1#1#0
for (jj=0; jj<=(d[1]-fftblock_0); jj+=fftblock_0)
{
#pragma loop name cffts1#1#0#0
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#0#0
for (i=0; i<d_0; i ++ )
{
yy0_real[_gtid][i][j]=x_real[k][(j+jj)][i];
yy0_imag[_gtid][i][j]=x_imag[k][(j+jj)][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_0), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts1#1#0#1
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#1#0
for (i=0; i<d_0; i ++ )
{
xout_real[k][(j+jj)][i]=yy0_real[_gtid][i][j];
xout_imag[k][(j+jj)][i]=yy0_imag[_gtid][i][j];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts1_clnd2_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_0;
int * gpu__d;
int * gpu__is;
int * gpu__logd_0;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts1#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_0=logd[0];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_0)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_0, ( & logd_0), gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, j, jj, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts1_clnd2_cloned0)
#pragma cuda gpurun registerRO(d[0], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_0)
hipLaunchKernelGGL(( cffts1_clnd2_cloned0_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_0, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_0));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts1_clnd1_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_0, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int j;
int jj;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[0];
#pragma loop name cffts1#1#0
for (jj=0; jj<=(d[1]-fftblock_0); jj+=fftblock_0)
{
#pragma loop name cffts1#1#0#0
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#0#0
for (i=0; i<d_0; i ++ )
{
yy0_real[_gtid][i][j]=x_real[k][(j+jj)][i];
yy0_imag[_gtid][i][j]=x_imag[k][(j+jj)][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_0), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts1#1#0#1
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#1#0
for (i=0; i<d_0; i ++ )
{
xout_real[k][(j+jj)][i]=yy0_real[_gtid][i][j];
xout_imag[k][(j+jj)][i]=yy0_imag[_gtid][i][j];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts1_clnd1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_0;
int * gpu__d;
int * gpu__is;
int * gpu__logd_0;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts1#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_0=logd[0];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_0)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_0, ( & logd_0), gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, j, jj, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts1_clnd1)
#pragma cuda gpurun registerRO(d[0], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_0)
hipLaunchKernelGGL(( cffts1_clnd1_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_0, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u0_imag__main), ((double (*)[256][512])gpu__u0_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_0));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__global__ void cffts2_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_1, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[1];
#pragma loop name cffts2#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#0
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][j][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][j][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_1), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#1
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][j][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][j][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts2(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_1;
int * gpu__d;
int * gpu__is;
int * gpu__logd_1;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts2#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_1=logd[1];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_1)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_1, ( & logd_1), gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts2)
#pragma cuda gpurun registerRO(d[1], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_1)
hipLaunchKernelGGL(( cffts2_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_1, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_1));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts2_clnd5_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_1, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[1];
#pragma loop name cffts2#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#0
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][j][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][j][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_1), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#1
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][j][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][j][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts2_clnd5(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_1;
int * gpu__d;
int * gpu__is;
int * gpu__logd_1;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts2#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_1=logd[1];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_1)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_1, ( & logd_1), gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts2_clnd5)
#pragma cuda gpurun registerRO(d[1], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_1)
hipLaunchKernelGGL(( cffts2_clnd5_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_1, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_1));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts2_clnd4_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_1, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[1];
#pragma loop name cffts2#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#0
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][j][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][j][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_1), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#1
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][j][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][j][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts2_clnd4(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_1;
int * gpu__d;
int * gpu__is;
int * gpu__logd_1;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts2#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_1=logd[1];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_1)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_1, ( & logd_1), gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts2_clnd4)
#pragma cuda gpurun registerRO(d[1], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_1)
hipLaunchKernelGGL(( cffts2_clnd4_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_1, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_1));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts2_clnd3_cloned0_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_1, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[1];
#pragma loop name cffts2#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#0
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][j][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][j][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_1), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#1
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][j][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][j][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts2_clnd3_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_1;
int * gpu__d;
int * gpu__is;
int * gpu__logd_1;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts2#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_1=logd[1];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_1)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_1, ( & logd_1), gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts2_clnd3_cloned0)
#pragma cuda gpurun registerRO(d[1], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_1)
hipLaunchKernelGGL(( cffts2_clnd3_cloned0_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_1, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_1));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts2_clnd2_cloned0_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_1, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[1];
#pragma loop name cffts2#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#0
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][j][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][j][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_1), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#1
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][j][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][j][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts2_clnd2_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_1;
int * gpu__d;
int * gpu__is;
int * gpu__logd_1;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts2#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_1=logd[1];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_1)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_1, ( & logd_1), gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts2_clnd2_cloned0)
#pragma cuda gpurun registerRO(d[1], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_1)
hipLaunchKernelGGL(( cffts2_clnd2_cloned0_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_1, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_1));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts2_clnd1_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_1, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[1];
#pragma loop name cffts2#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#0
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][j][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][j][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_1), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#1
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][j][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][j][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts2_clnd1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_1;
int * gpu__d;
int * gpu__is;
int * gpu__logd_1;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts2#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_1=logd[1];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_1)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_1, ( & logd_1), gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts2_clnd1)
#pragma cuda gpurun registerRO(d[1], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_1)
hipLaunchKernelGGL(( cffts2_clnd1_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_1, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_1));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__global__ void cffts3_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_2, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
j=_gtid;
fftblock_0=( * fftblock);
if (j<d[1])
{
d_0=d[2];
#pragma loop name cffts3#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#0
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][k][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][k][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_2), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#1
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][k][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][k][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts3(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_2;
int * gpu__d;
int * gpu__is;
int * gpu__logd_2;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts3#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_2=logd[2];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_2)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_2, ( & logd_2), gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[1])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts3)
#pragma cuda gpurun registerRO(d[2], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_2)
hipLaunchKernelGGL(( cffts3_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_2, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u0_imag__main), ((double (*)[256][512])gpu__u0_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_2));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts3_clnd5_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_2, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
j=_gtid;
fftblock_0=( * fftblock);
if (j<d[1])
{
d_0=d[2];
#pragma loop name cffts3#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#0
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][k][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][k][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_2), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#1
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][k][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][k][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts3_clnd5(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_2;
int * gpu__d;
int * gpu__is;
int * gpu__logd_2;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts3#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_2=logd[2];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_2)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_2, ( & logd_2), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(gpu__u_imag, u_imag, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(gpu__u_real, u_real, gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[1])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts3_clnd5)
#pragma cuda gpurun registerRO(d[2], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_2)
hipLaunchKernelGGL(( cffts3_clnd5_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_2, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_2));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts3_clnd4_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_2, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
j=_gtid;
fftblock_0=( * fftblock);
if (j<d[1])
{
d_0=d[2];
#pragma loop name cffts3#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#0
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][k][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][k][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_2), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#1
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][k][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][k][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts3_clnd4(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_2;
int * gpu__d;
int * gpu__is;
int * gpu__logd_2;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts3#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_2=logd[2];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_2)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_2, ( & logd_2), gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[1])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts3_clnd4)
#pragma cuda gpurun registerRO(d[2], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_2)
hipLaunchKernelGGL(( cffts3_clnd4_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_2, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u0_imag__main), ((double (*)[256][512])gpu__u0_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_2));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts3_clnd3_cloned0_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_2, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
j=_gtid;
fftblock_0=( * fftblock);
if (j<d[1])
{
d_0=d[2];
#pragma loop name cffts3#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#0
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][k][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][k][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_2), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#1
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][k][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][k][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts3_clnd3_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_2;
int * gpu__d;
int * gpu__is;
int * gpu__logd_2;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts3#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_2=logd[2];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_2)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_2, ( & logd_2), gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[1])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts3_clnd3_cloned0)
#pragma cuda gpurun registerRO(d[2], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_2)
hipLaunchKernelGGL(( cffts3_clnd3_cloned0_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_2, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_2));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts3_clnd2_cloned0_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_2, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
j=_gtid;
fftblock_0=( * fftblock);
if (j<d[1])
{
d_0=d[2];
#pragma loop name cffts3#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#0
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][k][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][k][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_2), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#1
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][k][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][k][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts3_clnd2_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_2;
int * gpu__d;
int * gpu__is;
int * gpu__logd_2;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts3#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_2=logd[2];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_2)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_2, ( & logd_2), gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[1])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun multisrccg(xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts3_clnd2_cloned0)
#pragma cuda gpurun registerRO(d[2], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_2)
hipLaunchKernelGGL(( cffts3_clnd2_cloned0_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_2, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u2_imag__main), ((double (*)[256][512])gpu__u2_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_2));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
__global__ void cffts3_clnd1_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_2, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
j=_gtid;
fftblock_0=( * fftblock);
if (j<d[1])
{
d_0=d[2];
#pragma loop name cffts3#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#0
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][k][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][k][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_2), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#1
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][k][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][k][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts3_clnd1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_2;
int * gpu__d;
int * gpu__is;
int * gpu__logd_2;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts3#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_2=logd[2];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__d, d, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMemcpy(gpu__fftblock, ( & fftblock), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMemcpy(gpu__fftblockpad, ( & fftblockpad), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__is, ( & is), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__logd_2)), gpuBytes));
CUDA_SAFE_CALL(hipMemcpy(gpu__logd_2, ( & logd_2), gpuBytes, hipMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(gpu__u_imag, u_imag, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(gpu__u_real, u_real, gpuBytes, hipMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[1])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts3_clnd1)
#pragma cuda gpurun registerRO(d[2], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_2)
hipLaunchKernelGGL(( cffts3_clnd1_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_2, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_real, gpu__yy1_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_real, gpu__yy0_real, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, hipMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(hipFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__logd_2));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(hipFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipFree(gpu__d));
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static void fft_init(int n)
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c compute the roots-of-unity array that will be used for subsequent FFTs.
c-------------------------------------------------------------------
*/
int m;
int nu;
int ku;
int i;
int j;
int ln;
double t;
double ti;
/*
--------------------------------------------------------------------
c Initialize the U array with sines and cosines in a manner that permits
c stride one access at each FFT iteration.
c-------------------------------------------------------------------
*/
nu=n;
m=ilog2(n);
u_real[0]=((double)m);
u_imag[0]=0.0;
ku=1;
ln=1;
#pragma loop name fft_init#0
for (j=1; j<=m; j ++ )
{
t=(3.141592653589793/ln);
#pragma loop name fft_init#0#0
for (i=0; i<=(ln-1); i ++ )
{
ti=(i*t);
u_real[(i+ku)]=cos(ti);
u_imag[(i+ku)]=sin(ti);
}
ku=(ku+ln);
ln=(2*ln);
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__device__ static void dev_cfftz(int is, int m, int n, double x_real[][512][18], double x_imag[][512][18], double y_real[][512][18], double y_imag[][512][18], int * fftblock, int * fftblockpad, double u_imag[512], double u_real[512], int _gtid)
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c Computes NY N-point complex-to-complex FFTs of X using an algorithm due
c to Swarztrauber. X is both the input and the output array, while Y is a
c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to
c perform FFTs, the array U must be initialized by calling CFFTZ with IS
c set to 0 and M set to MX, where MX is the maximum value of M for any
c subsequent call.
c-------------------------------------------------------------------
*/
int i;
int j;
int l;
int mx;
/*
--------------------------------------------------------------------
c Check if input parameters are invalid.
c-------------------------------------------------------------------
*/
int fftblock_0;
fftblock_0=( * fftblock);
mx=((int)u_real[0]);
/*
--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------
*/
#pragma loop name cfftz#0
for (l=1; l<=m; l+=2)
{
dev_fftz2(is, l, m, n, fftblock_0, ( * fftblockpad), u_real, u_imag, x_real, x_imag, y_real, y_imag, _gtid);
if ((l==m))
{
break;
}
dev_fftz2(is, (l+1), m, n, fftblock_0, ( * fftblockpad), u_real, u_imag, y_real, y_imag, x_real, x_imag, _gtid);
}
/*
--------------------------------------------------------------------
c Copy Y to X.
c-------------------------------------------------------------------
*/
if (((m%2)==1))
{
#pragma loop name cfftz#1
for (j=0; j<n; j ++ )
{
#pragma loop name cfftz#1#0
for (i=0; i<fftblock_0; i ++ )
{
x_real[_gtid][j][i]=y_real[_gtid][j][i];
x_imag[_gtid][j][i]=y_imag[_gtid][j][i];
}
}
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__device__ static void dev_fftz2(int is, int l, int m, int n, int ny, int ny1, double u_real[512], double u_imag[512], double x_real[][512][18], double x_imag[][512][18], double y_real[][512][18], double y_imag[][512][18], int _gtid)
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c Performs the L-th iteration of the second variant of the Stockham FFT.
c-------------------------------------------------------------------
*/
int k;
int n1;
int li;
int lj;
int lk;
int ku;
int i;
int j;
int i11;
int i12;
int i21;
int i22;
double u1_real;
double u1_imag;
/*
--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------
*/
n1=(n/2);
if (((l-1)==0))
{
lk=1;
}
else
{
lk=(2<<((l-1)-1));
}
if (((m-l)==0))
{
li=1;
}
else
{
li=(2<<((m-l)-1));
}
lj=(2*lk);
ku=li;
#pragma loop name fftz2#0
for (i=0; i<li; i ++ )
{
i11=(i*lk);
i12=(i11+n1);
i21=(i*lj);
i22=(i21+lk);
if ((is>=1))
{
u1_real=u_real[(ku+i)];
u1_imag=u_imag[(ku+i)];
}
else
{
u1_real=u_real[(ku+i)];
u1_imag=( - u_imag[(ku+i)]);
}
/*
--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------
*/
#pragma loop name fftz2#0#0
for (k=0; k<lk; k ++ )
{
#pragma loop name fftz2#0#0#0
for (j=0; j<ny; j ++ )
{
double x11real;
double x11imag;
double x21real;
double x21imag;
x11real=x_real[_gtid][(i11+k)][j];
x11imag=x_imag[_gtid][(i11+k)][j];
x21real=x_real[_gtid][(i12+k)][j];
x21imag=x_imag[_gtid][(i12+k)][j];
y_real[_gtid][(i21+k)][j]=(x11real+x21real);
y_imag[_gtid][(i21+k)][j]=(x11imag+x21imag);
y_real[_gtid][(i22+k)][j]=((u1_real*(x11real-x21real))-(u1_imag*(x11imag-x21imag)));
y_imag[_gtid][(i22+k)][j]=((u1_real*(x11imag-x21imag))+(u1_imag*(x11real-x21real)));
}
}
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static int ilog2(int n)
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int nn;
int lg;
int _ret_val_0;
if ((n==1))
{
_ret_val_0=0;
return _ret_val_0;
}
lg=1;
nn=2;
while (nn<n)
{
nn=(nn<<1);
lg ++ ;
}
return lg;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__global__ void checksum_kernel0(double * red__chk_imag, double * red__chk_real, double u1_imag[256][256][512], double u1_real[256][256][512], int * xend, int * xstart, int * yend, int * ystart, int * zend, int * zstart)
{
__shared__ double sh__chk_imag[BLOCK_SIZE];
__shared__ double sh__chk_real[BLOCK_SIZE];
int xstart_0;
int ystart_0;
int zstart_0;
int j;
int q;
int r;
int s;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
zstart_0=zstart[0];
ystart_0=ystart[0];
xstart_0=xstart[0];
sh__chk_real[threadIdx.x]=0.0F;
sh__chk_imag[threadIdx.x]=0.0F;
j=(_gtid+1);
#pragma omp for nowait
if (j<=1024)
{
q=((j%512)+1);
if (((q>=xstart_0)&&(q<=xend[0])))
{
r=(((3*j)%256)+1);
if (((r>=ystart_0)&&(r<=yend[0])))
{
s=(((5*j)%256)+1);
if (((s>=zstart_0)&&(s<=zend[0])))
{
/* cadd is a macro in npb-C.h adding the real and imaginary */
/* component. So, the preprocessed statement still follows the */
/* reduction pattern */
/* cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]); */
sh__chk_real[threadIdx.x]=(sh__chk_real[threadIdx.x]+u1_real[(s-zstart_0)][(r-ystart_0)][(q-xstart_0)]);
sh__chk_imag[threadIdx.x]=(sh__chk_imag[threadIdx.x]+u1_imag[(s-zstart_0)][(r-ystart_0)][(q-xstart_0)]);
}
}
}
}
__syncthreads();
if ((threadIdx.x<256))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+256)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+256)];
}
__syncthreads();
if ((threadIdx.x<128))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+128)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+128)];
}
__syncthreads();
if ((threadIdx.x<64))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+64)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+64)];
}
__syncthreads();
if ((threadIdx.x<32))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+32)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+32)];
}
if ((threadIdx.x<16))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+16)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+16)];
}
if ((threadIdx.x<8))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+8)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+8)];
}
if ((threadIdx.x<4))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+4)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+4)];
}
if ((threadIdx.x<2))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+2)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+2)];
}
if ((threadIdx.x<1))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+1)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+1)];
}
if ((threadIdx.x==0))
{
red__chk_imag[_bid]=sh__chk_imag[0];
red__chk_real[_bid]=sh__chk_real[0];
}
}
static void checksum(int i, double u1_real[256][256][512], double u1_imag[256][256][512], int d[3])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
double _chk_real;
double _chk_imag;
double * red__chk_imag;
double * chk_imag__extended;
int _ti_100_0;
double * red__chk_real;
double * chk_real__extended;
_chk_real=0.0;
_chk_imag=0.0;
{
double chk_real = _chk_real;
double chk_imag = _chk_imag;
/* #pragma omp for nowait */
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=1;
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=1;
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(gpuNumBlocks*sizeof (double));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & red__chk_imag)), gpuBytes));
chk_imag__extended=((double * )malloc(gpuBytes));
gpuBytes=(gpuNumBlocks*sizeof (double));
CUDA_SAFE_CALL(hipMalloc(((void * * )( & red__chk_real)), gpuBytes));
chk_real__extended=((double * )malloc(gpuBytes));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMemcpy(gpu__xend, xend, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMemcpy(gpu__xstart, xstart, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMemcpy(gpu__yend, yend, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMemcpy(gpu__ystart, ystart, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMemcpy(gpu__zend, zend, gpuBytes, hipMemcpyHostToDevice));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(hipMemcpy(gpu__zstart, zstart, gpuBytes, hipMemcpyHostToDevice));
#pragma omp parallel shared(u1_imag, u1_real, xend, xstart, yend, ystart, zend, zstart) private(j, q, r, s) reduction(+: chk_imag, chk_real) schedule(static)
#pragma cuda gpurun noc2gmemtr(u1_imag, u1_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(u1_imag, u1_real)
#pragma cuda gpurun nocudafree(u1_imag, u1_real, xend, xstart, yend, ystart, zend, zstart)
#pragma cuda gpurun multisrccg(xend, xstart, yend, ystart, zend, zstart)
#pragma cuda gpurun nog2cmemtr(u1_imag, u1_real, xend, xstart, yend, ystart, zend, zstart)
#pragma cuda ainfo kernelid(0) procname(checksum)
#pragma cuda gpurun registerRO(xstart[0], ystart[0], zstart[0])
hipLaunchKernelGGL(( checksum_kernel0), dim3(dimGrid0), dim3(dimBlock0), 0, 0, red__chk_imag, red__chk_real, ((double (*)[256][512])gpu__u2_imag__main), ((double (*)[256][512])gpu__u2_real__main), gpu__xend, gpu__xstart, gpu__yend, gpu__ystart, gpu__zend, gpu__zstart);
gpuBytes=(gpuNumBlocks*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(chk_real__extended, red__chk_real, gpuBytes, hipMemcpyDeviceToHost));
for (_ti_100_0=0; _ti_100_0<gpuNumBlocks; _ti_100_0 ++ )
{
chk_real+=chk_real__extended[_ti_100_0];
}
free(chk_real__extended);
CUDA_SAFE_CALL(hipFree(red__chk_real));
gpuBytes=(gpuNumBlocks*sizeof (double));
CUDA_SAFE_CALL(hipMemcpy(chk_imag__extended, red__chk_imag, gpuBytes, hipMemcpyDeviceToHost));
for (_ti_100_0=0; _ti_100_0<gpuNumBlocks; _ti_100_0 ++ )
{
chk_imag+=chk_imag__extended[_ti_100_0];
}
free(chk_imag__extended);
CUDA_SAFE_CALL(hipFree(red__chk_imag));
_chk_real=chk_real;
_chk_imag=chk_imag;
}
/* #pragma omp critical */
{
sums_real[i]+=_chk_real;
sums_imag[i]+=_chk_imag;
}
/* #pragma omp barrier */
/* #pragma omp single */
{
/* complex % real */
sums_real[i]=(sums_real[i]/((double)33554432));
sums_imag[i]=(sums_imag[i]/((double)33554432));
printf("T = %5d Checksum = %22.12e %22.12e\n", i, sums_real[i], sums_imag[i]);
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static void verify(int d1, int d2, int d3, int nt, int * verified, char * cclass)
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int i;
double err;
double epsilon;
/*
--------------------------------------------------------------------
c Sample size reference checksums
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c Class S size reference checksums
c-------------------------------------------------------------------
*/
double vdata_real_s[(6+1)] = { 0.0, 554.6087004964, 554.6385409189, 554.6148406171, 554.5423607415, 554.4255039624, 554.2683411902 } ;
double vdata_imag_s[(6+1)] = { 0.0, 484.5363331978, 486.5304269511, 488.3910722336, 490.1273169046, 491.7475857993, 493.2597244941 } ;
/*
--------------------------------------------------------------------
c Class W size reference checksums
c-------------------------------------------------------------------
*/
double vdata_real_w[(6+1)] = { 0.0, 567.3612178944, 563.1436885271, 559.402408997, 556.069804702, 553.089899125, 550.4159734538 } ;
double vdata_imag_w[(6+1)] = { 0.0, 529.3246849175, 528.2149986629, 527.0996558037, 526.0027904925, 524.9400845633, 523.9212247086 } ;
/*
--------------------------------------------------------------------
c Class A size reference checksums
c-------------------------------------------------------------------
*/
double vdata_real_a[(6+1)] = { 0.0, 504.6735008193, 505.9412319734, 506.9376896287, 507.7892868474, 508.5233095391, 509.1487099959 } ;
double vdata_imag_a[(6+1)] = { 0.0, 511.404790551, 509.8809666433, 509.8144042213, 510.1336130759, 510.4914655194, 510.7917842803 } ;
/*
--------------------------------------------------------------------
c Class B size reference checksums
c-------------------------------------------------------------------
*/
double vdata_real_b[(20+1)] = { 0.0, 517.7643571579, 515.4521291263, 514.6409228649, 514.2378756213, 513.9626667737, 513.7423460082, 513.5547056878, 513.3910925466, 513.247070539, 513.1197729984, 513.0070319283, 512.9070537032, 512.8182883502, 512.7393733383, 512.669106202, 512.6064276004, 512.550407657, 512.500233172, 512.4551951846, 512.4146770029 } ;
double vdata_imag_b[(20+1)] = { 0.0, 507.7803458597, 508.8249431599, 509.6208912659, 510.1023387619, 510.3976610617, 510.5948019802, 510.7404165783, 510.8576573661, 510.9577278523, 511.0460304483, 511.12524338, 511.1968077718, 511.2616233064, 511.3203605551, 511.3735928093, 511.4218460548, 511.465613976, 511.5053595966, 511.5415130407, 511.5744692211 } ;
/*
--------------------------------------------------------------------
c Class C size reference checksums
c-------------------------------------------------------------------
*/
double vdata_real_c[(20+1)] = { 0.0, 519.5078707457, 515.5422171134, 514.4678022222, 514.0150594328, 513.755042681, 513.5811056728, 513.4569343165, 513.3651975661, 513.2955192805, 513.2410471738, 513.1971141679, 513.1605205716, 513.1290734194, 513.1012720314, 513.0760908195, 513.0528295923, 513.0310107773, 513.0103090133, 512.9905029333, 512.9714421109 } ;
double vdata_imag_c[(20+1)] = { 0.0, 514.9019699238, 512.7578201997, 512.2251847514, 512.1090289018, 512.1143685824, 512.1496764568, 512.1870921893, 512.2193250322, 512.2454735794, 512.2663649603, 512.2830879827, 512.2965869718, 512.3075927445, 512.3166486553, 512.3241541685, 512.3304037599, 512.3356167976, 512.3399592211, 512.3435588985, 512.3465164008 } ;
epsilon=1.0E-12;
( * verified)=1;
( * cclass)='U';
if (((((d1==64)&&(d2==64))&&(d3==64))&&(nt==6)))
{
( * cclass)='S';
#pragma loop name verify#0
for (i=1; i<=nt; i ++ )
{
err=((sums_real[i]-vdata_real_s[i])/vdata_real_s[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
err=((sums_imag[i]-vdata_imag_s[i])/vdata_imag_s[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
}
}
else
{
if (((((d1==128)&&(d2==128))&&(d3==32))&&(nt==6)))
{
( * cclass)='W';
#pragma loop name verify#1
for (i=1; i<=nt; i ++ )
{
err=((sums_real[i]-vdata_real_w[i])/vdata_real_w[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
err=((sums_imag[i]-vdata_imag_w[i])/vdata_imag_w[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
}
}
else
{
if (((((d1==256)&&(d2==256))&&(d3==128))&&(nt==6)))
{
( * cclass)='A';
#pragma loop name verify#2
for (i=1; i<=nt; i ++ )
{
err=((sums_real[i]-vdata_real_a[i])/vdata_real_a[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
err=((sums_imag[i]-vdata_imag_a[i])/vdata_imag_a[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
}
}
else
{
if (((((d1==512)&&(d2==256))&&(d3==256))&&(nt==20)))
{
( * cclass)='B';
#pragma loop name verify#3
for (i=1; i<=nt; i ++ )
{
err=((sums_real[i]-vdata_real_b[i])/vdata_real_b[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
err=((sums_imag[i]-vdata_imag_b[i])/vdata_imag_b[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
}
}
else
{
if (((((d1==512)&&(d2==512))&&(d3==512))&&(nt==20)))
{
( * cclass)='C';
#pragma loop name verify#4
for (i=1; i<=nt; i ++ )
{
err=((sums_real[i]-vdata_real_c[i])/vdata_real_c[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
err=((sums_imag[i]-vdata_imag_c[i])/vdata_imag_c[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
}
}
}
}
}
}
if ((( * cclass)!='U'))
{
printf("Result verification successful\n");
}
else
{
printf("Result verification failed\n");
}
printf("cclass = %1c\n", ( * cclass));
return ;
}
| 705525f98f4b8adf3141ac84c3ffd227981607b4.cu | /*
--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - FT
This benchmark is an OpenMP C version of the NPB FT code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to [email protected]
Information on OpenMP activities at RWCP is available at:
http:pdplab.trc.rwcp.or.jppdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http:www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
Authors: D. Bailey
W. Saphir
OpenMP C version: S. Satoh
--------------------------------------------------------------------
*/
#include "npb-C.h"
/* global variables */
#include "global.h"
#ifndef __O2G_HEADER__
#define __O2G_HEADER__
/******************************************/
/* Added codes for OpenMP2GPU translation */
/******************************************/
#include <cutil.h>
#include <math.h>
#define MAX(a,b) (((a) > (b)) ? (a) : (b))
static int gpuNumThreads = BLOCK_SIZE;
static int gpuNumBlocks;
static int gpuNumBlocks1;
static int gpuNumBlocks2;
static int totalNumThreads;
unsigned int gpuGmemSize = 0;
unsigned int gpuSmemSize = 0;
static unsigned int gpuBytes = 0;
#endif
/* End of __O2G_HEADER__ */
int * gpu__fftblock;
int * gpu__fftblockpad;
double * gpu__u_imag;
double * gpu__u_real;
double * gpu__u1_imag__main;
double * gpu__u1_real__main;
double * gpu__u0_imag__main;
double * gpu__u0_real__main;
double * gpu__u2_imag__main;
double * gpu__u2_real__main;
int * gpu__xend;
int * gpu__xstart;
int * gpu__yend;
int * gpu__ystart;
int * gpu__zend;
int * gpu__zstart;
int * gpu__dims;
size_t pitch__dims;
int * gpu__indexmap__main;
double * gpu__ex;
static double tmp__compute_initial_conditions[(((512*2)*512)+1)];
static double yy0_real[512][18];
static double yy0_imag[512][18];
static double yy1_real[512][18];
static double yy1_imag[512][18];
#pragma omp threadprivate(yy0_real)
#pragma omp threadprivate(yy0_imag)
#pragma omp threadprivate(yy1_real)
#pragma omp threadprivate(yy1_imag)
/* function declarations */
static void evolve_cloned0(double u0_real[256][256][512], double u0_imag[256][256][512], double u1_real[256][256][512], double u1_imag[256][256][512], int t, int indexmap[256][256][512], int d[3]);
static void compute_initial_conditions(double u0_real[256][256][512], double u0_imag[256][256][512], int d[3]);
static void ipow46(double a, int exponent, double * result);
static void setup(void );
static void compute_indexmap(int indexmap[256][256][512], int d[3]);
static void compute_indexmap_clnd1(int indexmap[256][256][512], int d[3]);
static void print_timers(void );
static void fft(int dir, double x1_real[256][256][512], double x1_imag[256][256][512], double x2_real[256][256][512], double x2_imag[256][256][512]);
static void fft_clnd2_cloned0(int dir, double x1_real[256][256][512], double x1_imag[256][256][512], double x2_real[256][256][512], double x2_imag[256][256][512]);
static void fft_clnd1(int dir, double x1_real[256][256][512], double x1_imag[256][256][512], double x2_real[256][256][512], double x2_imag[256][256][512]);
static void cffts1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts1_clnd5(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts1_clnd4(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts1_clnd3_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts1_clnd2_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts1_clnd1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts2(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts2_clnd5(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts2_clnd4(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts2_clnd3_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts2_clnd2_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts2_clnd1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts3(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts3_clnd5(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts3_clnd4(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts3_clnd3_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts3_clnd2_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void cffts3_clnd1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18]);
static void fft_init(int n);
__device__ static void dev_cfftz(int is, int m, int n, double x_real[][512][18], double x_imag[][512][18], double y_real[][512][18], double y_imag[][512][18], int * fftblock, int * fftblockpad, double u_imag[512], double u_real[512], int _gtid);
__device__ static void dev_fftz2(int is, int l, int m, int n, int ny, int ny1, double u_real[512], double u_imag[512], double x_real[][512][18], double x_imag[][512][18], double y_real[][512][18], double y_imag[][512][18], int _gtid);
static int ilog2(int n);
static void checksum(int i, double u1_real[256][256][512], double u1_imag[256][256][512], int d[3]);
static void verify(int d1, int d2, int d3, int nt, int * verified, char * cclass);
/* */
/* E L A P S E D _ T I M E */
/* */
double elapsed_time(void )
{
double t;
wtime(( & t));
return t;
}
double start[64];
double elapsed[64];
/* */
/* T I M E R _ C L E A R */
/* */
void timer_clear(int n)
{
elapsed[n]=0.0;
return ;
}
/* */
/* T I M E R _ S T A R T */
/* */
void timer_start(int n)
{
start[n]=elapsed_time();
return ;
}
/* */
/* T I M E R _ S T O P */
/* */
void timer_stop(int n)
{
double t;
double now;
now=elapsed_time();
t=(now-start[n]);
elapsed[n]+=t;
return ;
}
/* */
/* T I M E R _ R E A D */
/* */
double timer_read(int n)
{
double _ret_val_0;
_ret_val_0=elapsed[n];
return _ret_val_0;
}
static void c_print_results(char * name, char cccccclass, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char * optype, int passed_verification, char * npbversion, char * compiletime, char * cc, char * clink, char * c_lib, char * c_inc, char * cflags, char * clinkflags, char * rand)
{
printf("\n\n %s Benchmark Completed\n", name);
printf(" Class = %c\n", cccccclass);
/* as in IS */
if (((n2==0)&&(n3==0)))
{
printf(" Size = %12d\n", n1);
}
else
{
printf(" Size = %3dx%3dx%3d\n", n1, n2, n3);
}
printf(" Iterations = %12d\n", niter);
printf(" Threads = %12d\n", nthreads);
printf(" Time in seconds = %12.2f\n", t);
printf(" Mop/s total = %12.2f\n", mops);
printf(" Operation type = %24s\n", optype);
if (passed_verification)
{
printf(" Verification = SUCCESSFUL\n");
}
else
{
printf(" Verification = UNSUCCESSFUL\n");
}
printf(" Version = %12s\n", npbversion);
printf(" Compile date = %12s\n", compiletime);
printf("\n Compile options:\n");
printf(" CC = %s\n", cc);
printf(" CLINK = %s\n", clink);
printf(" C_LIB = %s\n", c_lib);
printf(" C_INC = %s\n", c_inc);
printf(" CFLAGS = %s\n", cflags);
printf(" CLINKFLAGS = %s\n", clinkflags);
printf(" RAND = %s\n", rand);
/*
printf( "\n\n" );
printf( " Please send the results of this run to:\n\n" );
printf( " NPB Development Team\n" );
printf( " Internet: [email protected]\n \n" );
printf( " If email is not available, send this to:\n\n" );
printf( " MS T27A-1\n" );
printf( " NASA Ames Research Center\n" );
printf( " Moffett Field, CA 94035-1000\n\n" );
printf( " Fax: 415-604-3957\n\n" );
*/
return ;
}
/*
c---------------------------------------------------------------------
c---------------------------------------------------------------------
*/
double randlc(double * x, double a)
{
/*
c---------------------------------------------------------------------
c---------------------------------------------------------------------
*/
/*
c---------------------------------------------------------------------
c
c This routine returns a uniform pseudorandom double precision number in the
c range (0, 1) by using the linear congruential generator
c
c x_{k+1} = a x_k (mod 2^46)
c
c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
c before repeating. The argument A is the same as 'a' in the above formula,
c and X is the same as x_0. A and X must be odd double precision integers
c in the range (1, 2^46). The returned value RANDLC is normalized to be
c between 0 and 1, i.e. RANDLC = 2^(-46) x_1. X is updated to contain
c the new seed x_1, so that subsequent calls to RANDLC using the same
c arguments will generate a continuous sequence.
c
c This routine should produce the same results on any computer with at least
c 48 mantissa bits in double precision floating point data. On 64 bit
c systems, double precision should be disabled.
c
c David H. Bailey October 26, 1990
c
c---------------------------------------------------------------------
*/
double t1;
double t2;
double t3;
double t4;
double a1;
double a2;
double x1;
double x2;
double z;
/*
c---------------------------------------------------------------------
c Break A into two parts such that A = 2^23 A1 + A2.
c---------------------------------------------------------------------
*/
double _ret_val_0;
t1=(((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*a);
a1=((int)t1);
a2=(a-(((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*a1));
/*
c---------------------------------------------------------------------
c Break X into two parts such that X = 2^23 X1 + X2, compute
c Z = A1 * X2 + A2 * X1 (mod 2^23), and then
c X = 2^23 * Z + A2 * X2 (mod 2^46).
c---------------------------------------------------------------------
*/
t1=(((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*( * x));
x1=((int)t1);
x2=(( * x)-(((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*x1));
t1=((a1*x2)+(a2*x1));
t2=((int)(((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*t1));
z=(t1-(((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*t2));
t3=((((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*z)+(a2*x2));
t4=((int)((((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5))*t3));
( * x)=(t3-((((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0))*t4));
_ret_val_0=((((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5))*( * x));
return _ret_val_0;
}
/*
c---------------------------------------------------------------------
c---------------------------------------------------------------------
*/
void vranlc(int n, double * x_seed, double a, double y[])
{
/*
c---------------------------------------------------------------------
c---------------------------------------------------------------------
*/
/*
c---------------------------------------------------------------------
c
c This routine generates N uniform pseudorandom double precision numbers in
c the range (0, 1) by using the linear congruential generator
c
c x_{k+1} = a x_k (mod 2^46)
c
c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
c before repeating. The argument A is the same as 'a' in the above formula,
c and X is the same as x_0. A and X must be odd double precision integers
c in the range (1, 2^46). The N results are placed in Y and are normalized
c to be between 0 and 1. X is updated to contain the new seed, so that
c subsequent calls to VRANLC using the same arguments will generate a
c continuous sequence. If N is zero, only initialization is performed, and
c the variables X, A and Y are ignored.
c
c This routine is the standard version designed for scalar or RISC systems.
c However, it should produce the same results on any single processor
c computer with at least 48 mantissa bits in double precision floating point
c data. On 64 bit systems, double precision should be disabled.
c
c---------------------------------------------------------------------
*/
int i;
double x;
double t1;
double t2;
double t3;
double t4;
double a1;
double a2;
double x1;
double x2;
double z;
/*
c---------------------------------------------------------------------
c Break A into two parts such that A = 2^23 A1 + A2.
c---------------------------------------------------------------------
*/
t1=(((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*a);
a1=((int)t1);
a2=(a-(((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*a1));
x=( * x_seed);
/*
c---------------------------------------------------------------------
c Generate N results. This loop is not vectorizable.
c---------------------------------------------------------------------
*/
#pragma loop name vranlc#0
for (i=1; i<=n; i ++ )
{
/*
c---------------------------------------------------------------------
c Break X into two parts such that X = 2^23 X1 + X2, compute
c Z = A1 * X2 + A2 * X1 (mod 2^23), and then
c X = 2^23 * Z + A2 * X2 (mod 2^46).
c---------------------------------------------------------------------
*/
t1=(((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*x);
x1=((int)t1);
x2=(x-(((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*x1));
t1=((a1*x2)+(a2*x1));
t2=((int)(((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*t1));
z=(t1-(((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*t2));
t3=((((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*z)+(a2*x2));
t4=((int)((((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5))*t3));
x=(t3-((((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*((((((((((((((((((((((2.0*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0)*2.0))*t4));
y[i]=((((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*((((((((((((((((((((((0.5*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5)*0.5))*x);
}
( * x_seed)=x;
return ;
}
int main(int argc, char * * argv)
{
/*
c-------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int i;
/*
------------------------------------------------------------------
c u0, u1, u2 are the main arrays in the problem.
c Depending on the decomposition, these arrays will have different
c dimensions. To accomodate all possibilities, we allocate them as
c one-dimensional arrays and pass them to subroutines for different
c views
c - u0 contains the initial (transformed) initial condition
c - u1 and u2 are working arrays
c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the
c time evolution operator.
c-----------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c Large arrays are in common so that they are allocated on the
c heap rather than the stack. This common block is not
c referenced directly anywhere else. Padding is to avoid accidental
c cache problems, since all array sizes are powers of two.
c-------------------------------------------------------------------
*/
static double u0_real[256][256][512];
static double u0_imag[256][256][512];
static double u1_real[256][256][512];
static double u1_imag[256][256][512];
static double u2_real[256][256][512];
static double u2_imag[256][256][512];
static int indexmap[256][256][512];
int iter;
int nthreads = 1;
double total_time;
double mflops;
int verified;
char cclass;
/*
--------------------------------------------------------------------
c Run the entire problem once to make sure all data is touched.
c This reduces variable startup costs, which is important for such a
c short benchmark. The other NPB 2 implementations are similar.
c-------------------------------------------------------------------
*/
int _ret_val_0;
////////////////////////////////
// CUDA Device Initialization //
////////////////////////////////
int deviceCount;
CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "cutil error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaDeviceProp deviceProp;
CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&deviceProp, dev));
fprintf(stderr, "Using device %d: %s\n", dev, deviceProp.name);
CUDA_SAFE_CALL(cudaSetDevice(dev));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__fftblock)), gpuBytes));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__fftblockpad)), gpuBytes));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__u_imag)), gpuBytes));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__u_real)), gpuBytes));
gpuBytes=(((256*256)*512)*sizeof (double));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__u1_imag__main)), gpuBytes));
gpuBytes=(((256*256)*512)*sizeof (double));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__u1_real__main)), gpuBytes));
gpuBytes=(((256*256)*512)*sizeof (double));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__u0_imag__main)), gpuBytes));
gpuBytes=(((256*256)*512)*sizeof (double));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__u0_real__main)), gpuBytes));
gpuBytes=(((256*256)*512)*sizeof (double));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__u2_imag__main)), gpuBytes));
gpuBytes=(((256*256)*512)*sizeof (double));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__u2_real__main)), gpuBytes));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__xend)), gpuBytes));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__xstart)), gpuBytes));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yend)), gpuBytes));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__ystart)), gpuBytes));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__zend)), gpuBytes));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__zstart)), gpuBytes));
CUDA_SAFE_CALL(cudaMallocPitch(((void * * )( & gpu__dims)), ( & pitch__dims), (3*sizeof (int)), 3));
gpuBytes=(((256*256)*512)*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__indexmap__main)), gpuBytes));
gpuBytes=(((20*((((512*512)/4)+((256*256)/4))+((256*256)/4)))+1)*sizeof (double));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__ex)), gpuBytes));
#pragma loop name main#0
for (i=0; i<7; i ++ )
{
timer_clear(i);
}
setup();
/* #pragma omp parallel */
{
compute_indexmap(indexmap, dims[2]);
/* #pragma omp single */
{
compute_initial_conditions(u1_real, u1_imag, dims[0]);
fft_init(dims[0][0]);
}
fft(1, u1_real, u1_imag, u0_real, u0_imag);
}
/* end parallel */
/*
--------------------------------------------------------------------
c Start over from the beginning. Note that all operations must
c be timed, in contrast to other benchmarks.
c-------------------------------------------------------------------
*/
#pragma loop name main#1
for (i=0; i<7; i ++ )
{
timer_clear(i);
}
timer_start(0);
if ((0==1))
{
timer_start(1);
}
/* #pragma omp parallel private(iter) firstprivate(niter) */
{
compute_indexmap_clnd1(indexmap, dims[2]);
/* #pragma omp single */
{
compute_initial_conditions(u1_real, u1_imag, dims[0]);
fft_init(dims[0][0]);
}
if ((0==1))
{
/* #pragma omp master */
timer_stop(1);
}
if ((0==1))
{
/* #pragma omp master */
timer_start(2);
}
fft_clnd1(1, u1_real, u1_imag, u0_real, u0_imag);
if ((0==1))
{
/* #pragma omp master */
timer_stop(2);
}
#pragma loop name main#2
for (iter=1; iter<=niter; iter ++ )
{
if ((0==1))
{
/* #pragma omp master */
timer_start(3);
}
evolve_cloned0(u0_real, u0_imag, u1_real, u1_imag, iter, indexmap, dims[0]);
if ((0==1))
{
/* #pragma omp master */
timer_stop(3);
}
if ((0==1))
{
/* #pragma omp master */
timer_start(2);
}
fft_clnd2_cloned0(( - 1), u1_real, u1_imag, u2_real, u2_imag);
if ((0==1))
{
/* #pragma omp master */
timer_stop(2);
}
if ((0==1))
{
/* #pragma omp master */
timer_start(4);
}
checksum(iter, u2_real, u2_imag, dims[0]);
if ((0==1))
{
/* #pragma omp master */
timer_stop(4);
}
}
/* #pragma omp single */
verify(512, 256, 256, niter, ( & verified), ( & cclass));
}
/* end parallel */
timer_stop(0);
total_time=timer_read(0);
if ((total_time!=0.0))
{
mflops=(((1.0E-6*((double)33554432))*((14.8157+(7.19641*log(((double)33554432))))+((5.23518+(7.21113*log(((double)33554432))))*niter)))/total_time);
}
else
{
mflops=0.0;
}
c_print_results("FT", cclass, 512, 256, 256, niter, nthreads, total_time, mflops, " floating point", verified, "2.3", "20 Feb 2012", "gcc", "gcc", "-lm", "-I../common", "-O3 ", "(none)", "randdp");
if ((0==1))
{
print_timers();
}
printf("/***********************/ \n/* Input Configuration */ \n/***********************/ \n");
printf("====> GPU Block Size: 1024 \n");
printf("/**********************/ \n/* Used Optimizations */ \n/**********************/ \n");
printf("====> MallocPitch Opt is used.\n");
printf("====> MatrixTranspose Opt is used.\n");
printf("====> ParallelLoopSwap Opt is used.\n");
printf("====> LoopCollapse Opt is used.\n");
printf("====> Unrolling-on-reduction Opt is used.\n");
printf("====> Allocate GPU variables as global ones.\n");
printf("====> Optimize globally allocated GPU variables .\n");
printf("====> CPU-GPU Mem Transfer Opt Level: 4\n");
printf("====> Cuda Malloc Opt Level: 1\n");
printf("====> Assume that all loops have non-zero iterations.\n");
printf("====> Cache shared scalar variables onto GPU registers.\n");
printf("====> Cache shared array elements onto GPU registers.\n");
printf("====> Cache private array variables onto GPU shared memory.\n");
printf("====> local array reduction variable configuration = 1\n");
CUDA_SAFE_CALL(cudaFree(gpu__fftblock));
CUDA_SAFE_CALL(cudaFree(gpu__fftblockpad));
CUDA_SAFE_CALL(cudaFree(gpu__u_imag));
CUDA_SAFE_CALL(cudaFree(gpu__u_real));
CUDA_SAFE_CALL(cudaFree(gpu__u1_imag__main));
CUDA_SAFE_CALL(cudaFree(gpu__u1_real__main));
CUDA_SAFE_CALL(cudaFree(gpu__u0_imag__main));
CUDA_SAFE_CALL(cudaFree(gpu__u0_real__main));
CUDA_SAFE_CALL(cudaFree(gpu__u2_imag__main));
CUDA_SAFE_CALL(cudaFree(gpu__u2_real__main));
CUDA_SAFE_CALL(cudaFree(gpu__xend));
CUDA_SAFE_CALL(cudaFree(gpu__xstart));
CUDA_SAFE_CALL(cudaFree(gpu__yend));
CUDA_SAFE_CALL(cudaFree(gpu__ystart));
CUDA_SAFE_CALL(cudaFree(gpu__zend));
CUDA_SAFE_CALL(cudaFree(gpu__zstart));
CUDA_SAFE_CALL(cudaFree(gpu__dims));
CUDA_SAFE_CALL(cudaFree(gpu__indexmap__main));
CUDA_SAFE_CALL(cudaFree(gpu__ex));
fflush(stdout);
fflush(stderr);
return _ret_val_0;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__global__ void evolve_cloned0_kernel0(int * d, double * ex, int indexmap[256][256][512], int * t, double u0_imag[256][256][512], double u0_real[256][256][512], double u1_imag[256][256][512], double u1_real[256][256][512])
{
double ex_0;
int t_0;
int i;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
i=_gtid;
t_0=( * t);
if (i<d[0])
{
#pragma loop name evolve#0#0
for (j=0; j<d[1]; j ++ )
{
#pragma loop name evolve#0#0#0
for (k=0; k<d[2]; k ++ )
{
ex_0=ex[(t_0*indexmap[k][j][i])];
u1_real[k][j][i]=(u0_real[k][j][i]*ex_0);
u1_imag[k][j][i]=(u0_imag[k][j][i]*ex_0);
}
}
}
}
static void evolve_cloned0(double u0_real[256][256][512], double u0_imag[256][256][512], double u1_real[256][256][512], double u1_imag[256][256][512], int t, int indexmap[256][256][512], int d[3])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c evolve u0 -> u1 (t time steps) in fourier space
c-------------------------------------------------------------------
*/
int * gpu__d;
int * gpu__t;
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[0])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=(((20*((((512*512)/4)+((256*256)/4))+((256*256)/4)))+1)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(gpu__ex, ex, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__t)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__t, ( & t), gpuBytes, cudaMemcpyHostToDevice));
#pragma omp parallel for shared(d, ex, indexmap, t, u0_imag, u0_real, u1_imag, u1_real) private(i, j, k)
#pragma cuda gpurun noc2gmemtr(indexmap, u0_imag, u0_real, u1_imag, u1_real)
#pragma cuda gpurun nocudamalloc(indexmap, u0_imag, u0_real, u1_imag, u1_real)
#pragma cuda gpurun nocudafree(ex, indexmap, u0_imag, u0_real, u1_imag, u1_real)
#pragma cuda gpurun multisrccg(ex)
#pragma cuda gpurun nog2cmemtr(d, ex, indexmap, t, u0_imag, u0_real, u1_imag, u1_real)
#pragma cuda ainfo kernelid(0) procname(evolve_cloned0)
#pragma cuda gpurun registerRO(ex[(t*indexmap[k][j][i])], t)
#pragma cuda gpurun cudafree(d, t)
evolve_cloned0_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__ex, ((int (*)[256][512])gpu__indexmap__main), gpu__t, ((double (*)[256][512])gpu__u0_imag__main), ((double (*)[256][512])gpu__u0_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__t));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static void compute_initial_conditions(double u0_real[256][256][512], double u0_imag[256][256][512], int d[3])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c Fill in array u0 with initial conditions from
c random number generator
c-------------------------------------------------------------------
*/
int k;
double x0;
double start;
double an;
double dummy;
int i;
int j;
int t;
start=3.14159265E8;
/*
--------------------------------------------------------------------
c Jump to the starting element for our first plane.
c-------------------------------------------------------------------
*/
ipow46(1.220703125E9, (((((zstart[0]-1)*2)*512)*256)+(((ystart[0]-1)*2)*512)), ( & an));
dummy=randlc(( & start), an);
ipow46(1.220703125E9, ((2*512)*256), ( & an));
/*
--------------------------------------------------------------------
c Go through by z planes filling in one square at a time.
c-------------------------------------------------------------------
*/
#pragma loop name compute_initial_conditions#0
for (k=0; k<dims[0][2]; k ++ )
{
x0=start;
vranlc(((2*512)*dims[0][1]), ( & x0), 1.220703125E9, tmp__compute_initial_conditions);
t=1;
#pragma loop name compute_initial_conditions#0#0
for (j=0; j<dims[0][1]; j ++ )
{
#pragma loop name compute_initial_conditions#0#0#0
for (i=0; i<512; i ++ )
{
u0_real[k][j][i]=tmp__compute_initial_conditions[(t ++ )];
u0_imag[k][j][i]=tmp__compute_initial_conditions[(t ++ )];
}
}
if ((k!=dims[0][2]))
{
dummy=randlc(( & start), an);
}
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static void ipow46(double a, int exponent, double * result)
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c compute a^exponent mod 2^46
c-------------------------------------------------------------------
*/
double dummy;
double q;
double r;
int n;
int n2;
/*
--------------------------------------------------------------------
c Use
c a^n = a^(n2)*a^(n/2) if n even else
c a^n = a*a^(n-1) if n odd
c-------------------------------------------------------------------
*/
( * result)=1;
if ((exponent==0))
{
return ;
}
q=a;
r=1;
n=exponent;
while (n>1)
{
n2=(n/2);
if (((n2*2)==n))
{
dummy=randlc(( & q), q);
n=n2;
}
else
{
dummy=randlc(( & r), q);
n=(n-1);
}
}
dummy=randlc(( & r), q);
( * result)=r;
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static void setup(void )
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int i;
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"" - FT Benchmark\n\n");
niter=20;
printf(" Size : %3dx%3dx%3d\n", 512, 256, 256);
printf(" Iterations : %7d\n", niter);
/*
1004 format(' Number of processes : ', i7)
1005 format(' Processor array : ', i3, 'x', i3)
1006 format(' WARNING: compiled for ', i5, ' processes. ',
> ' Will not verify. ')
*/
#pragma loop name setup#0
for (i=0; i<3; i ++ )
{
dims[i][0]=512;
dims[i][1]=256;
dims[i][2]=256;
}
#pragma loop name setup#1
for (i=0; i<3; i ++ )
{
xstart[i]=1;
xend[i]=512;
ystart[i]=1;
yend[i]=256;
zstart[i]=1;
zend[i]=256;
}
/*
--------------------------------------------------------------------
c Set up info for blocking of ffts and transposes. This improves
c performance on cache-based systems. Blocking involves
c working on a chunk of the problem at a time, taking chunks
c along the first, second, or third dimension.
c
c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim)
c - In cffts23 blocking is on 1st dimension (with fft on 2nd and 3rd dims)
c Since 1st dim is always in processor, we'll assume it's long enough
c (default blocking factor is 16 so min size for 1st dim is 16)
c The only case we have to worry about is cffts1 in a 2d decomposition.
c so the blocking factor should not be larger than the 2nd dimension.
c-------------------------------------------------------------------
*/
fftblock=16;
fftblockpad=18;
if ((fftblock!=16))
{
fftblockpad=(fftblock+3);
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__global__ void compute_indexmap_kernel0(int * dims, size_t pitch__dims, int indexmap[256][256][512], int * xstart_i, int * ystart_i, int * zstart_i)
{
int i;
int ii;
int ii2;
int ij2;
int j;
int jj;
int k;
int kk;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
i=_gtid;
if (i<( * (((int * )(((char * )dims)+(2*pitch__dims)))+0)))
{
#pragma loop name compute_indexmap#0#0
for (j=0; j<( * (((int * )(((char * )dims)+(2*pitch__dims)))+1)); j ++ )
{
#pragma loop name compute_indexmap#0#0#0
for (k=0; k<( * (((int * )(((char * )dims)+(2*pitch__dims)))+2)); k ++ )
{
ii=((((((i+1)+( * xstart_i))-2)+(512/2))%512)-(512/2));
ii2=(ii*ii);
jj=((((((j+1)+( * ystart_i))-2)+(256/2))%256)-(256/2));
ij2=((jj*jj)+ii2);
kk=((((((k+1)+( * zstart_i))-2)+(256/2))%256)-(256/2));
indexmap[k][j][i]=((kk*kk)+ij2);
}
}
}
}
static void compute_indexmap(int indexmap[256][256][512], int d[3])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2
c for time evolution exponent.
c-------------------------------------------------------------------
*/
int i;
double ap;
int xstart_i;
int ystart_i;
int zstart_i;
/*
--------------------------------------------------------------------
c basically we want to convert the fortran indices
c 1 2 3 4 5 6 7 8
c to
c 0 1 2 3 -4 -3 -2 -1
c The following magic formula does the trick:
c mod(i-1+n2, n) - n/2
c-------------------------------------------------------------------
*/
int * gpu__xstart_i;
int * gpu__ystart_i;
int * gpu__zstart_i;
xstart_i=xstart[2];
ystart_i=ystart[2];
zstart_i=zstart[2];
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)dims[2][0])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
CUDA_SAFE_CALL(cudaMemcpy2D(gpu__dims, pitch__dims, dims, (3*sizeof (int)), (3*sizeof (int)), 3, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__xstart_i)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__xstart_i, ( & xstart_i), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__ystart_i)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__ystart_i, ( & ystart_i), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__zstart_i)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__zstart_i, ( & zstart_i), gpuBytes, cudaMemcpyHostToDevice));
#pragma omp parallel for shared(dims, indexmap, xstart_i, ystart_i, zstart_i) private(i, ii, ii2, ij2, j, jj, k, kk) schedule(static)
#pragma cuda gpurun nocudafree(dims, indexmap)
#pragma cuda gpurun nog2cmemtr(dims, indexmap, xstart_i, ystart_i, zstart_i)
#pragma cuda ainfo kernelid(0) procname(compute_indexmap)
#pragma cuda gpurun cudafree(xstart_i, ystart_i, zstart_i)
#pragma cuda gpurun noc2gmemtr(indexmap)
compute_indexmap_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__dims, pitch__dims, ((int (*)[256][512])gpu__indexmap__main), gpu__xstart_i, gpu__ystart_i, gpu__zstart_i);
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__zstart_i));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__ystart_i));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__xstart_i));
/*
--------------------------------------------------------------------
c compute array of exponentials for time evolution.
c-------------------------------------------------------------------
*/
/* #pragma omp single */
{
ap=(((( - 4.0)*1.0E-6)*3.141592653589793)*3.141592653589793);
ex[0]=1.0;
ex[1]=exp(ap);
#pragma loop name compute_indexmap#1
for (i=2; i<=(20*((((512*512)/4)+((256*256)/4))+((256*256)/4))); i ++ )
{
ex[i]=(ex[(i-1)]*ex[1]);
}
}
/* end single */
return ;
}
__global__ void compute_indexmap_clnd1_kernel0(int * dims, size_t pitch__dims, int indexmap[256][256][512], int * xstart_i, int * ystart_i, int * zstart_i)
{
int i;
int ii;
int ii2;
int ij2;
int j;
int jj;
int k;
int kk;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
i=_gtid;
if (i<( * (((int * )(((char * )dims)+(2*pitch__dims)))+0)))
{
#pragma loop name compute_indexmap#0#0
for (j=0; j<( * (((int * )(((char * )dims)+(2*pitch__dims)))+1)); j ++ )
{
#pragma loop name compute_indexmap#0#0#0
for (k=0; k<( * (((int * )(((char * )dims)+(2*pitch__dims)))+2)); k ++ )
{
ii=((((((i+1)+( * xstart_i))-2)+(512/2))%512)-(512/2));
ii2=(ii*ii);
jj=((((((j+1)+( * ystart_i))-2)+(256/2))%256)-(256/2));
ij2=((jj*jj)+ii2);
kk=((((((k+1)+( * zstart_i))-2)+(256/2))%256)-(256/2));
indexmap[k][j][i]=((kk*kk)+ij2);
}
}
}
}
static void compute_indexmap_clnd1(int indexmap[256][256][512], int d[3])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2
c for time evolution exponent.
c-------------------------------------------------------------------
*/
int i;
double ap;
int xstart_i;
int ystart_i;
int zstart_i;
/*
--------------------------------------------------------------------
c basically we want to convert the fortran indices
c 1 2 3 4 5 6 7 8
c to
c 0 1 2 3 -4 -3 -2 -1
c The following magic formula does the trick:
c mod(i-1+n2, n) - n/2
c-------------------------------------------------------------------
*/
int * gpu__xstart_i;
int * gpu__ystart_i;
int * gpu__zstart_i;
xstart_i=xstart[2];
ystart_i=ystart[2];
zstart_i=zstart[2];
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__xstart_i)), gpuBytes));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)dims[2][0])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
CUDA_SAFE_CALL(cudaMemcpy(gpu__xstart_i, ( & xstart_i), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__ystart_i)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__ystart_i, ( & ystart_i), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__zstart_i)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__zstart_i, ( & zstart_i), gpuBytes, cudaMemcpyHostToDevice));
#pragma omp parallel for shared(dims, indexmap, xstart_i, ystart_i, zstart_i) private(i, ii, ii2, ij2, j, jj, k, kk) schedule(static)
#pragma cuda gpurun noc2gmemtr(dims, indexmap)
#pragma cuda gpurun nocudamalloc(dims, indexmap)
#pragma cuda gpurun nocudafree(dims, indexmap)
#pragma cuda gpurun nog2cmemtr(dims, indexmap, xstart_i, ystart_i, zstart_i)
#pragma cuda ainfo kernelid(0) procname(compute_indexmap_clnd1)
#pragma cuda gpurun cudafree(xstart_i, ystart_i, zstart_i)
compute_indexmap_clnd1_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__dims, pitch__dims, ((int (*)[256][512])gpu__indexmap__main), gpu__xstart_i, gpu__ystart_i, gpu__zstart_i);
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__zstart_i));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__ystart_i));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__xstart_i));
/*
--------------------------------------------------------------------
c compute array of exponentials for time evolution.
c-------------------------------------------------------------------
*/
/* #pragma omp single */
{
ap=(((( - 4.0)*1.0E-6)*3.141592653589793)*3.141592653589793);
ex[0]=1.0;
ex[1]=exp(ap);
#pragma loop name compute_indexmap#1
for (i=2; i<=(20*((((512*512)/4)+((256*256)/4))+((256*256)/4))); i ++ )
{
ex[i]=(ex[(i-1)]*ex[1]);
}
}
/* end single */
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static void print_timers(void )
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int i;
char * tstrings[] = { " total ", " setup ", " fft ", " evolve ", " checksum ", " fftlow ", " fftcopy " } ;
#pragma loop name print_timers#0
for (i=0; i<7; i ++ )
{
if ((timer_read(i)!=0.0))
{
printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i));
}
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static void fft(int dir, double x1_real[256][256][512], double x1_imag[256][256][512], double x2_real[256][256][512], double x2_imag[256][256][512])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/* dcomplex y0[NX][FFTBLOCKPAD]; */
/* dcomplex y0[NX][FFTBLOCKPAD]; */
/* dcomplex y1[NX][FFTBLOCKPAD]; */
/*
--------------------------------------------------------------------
c note: args x1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xinxout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------
*/
if ((dir==1))
{
/* cffts1(1, dims[0], x1, x1, y0, y1); x1 -> x1 */
cffts1(1, dims[0], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts2(1, dims[1], x1, x1, y0, y1); x1 -> x1 */
cffts2(1, dims[1], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts3(1, dims[2], x1, x2, y0, y1); x1 -> x2 */
cffts3(1, dims[2], x1_real, x1_imag, x2_real, x2_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
}
else
{
/* cffts3(-1, dims[2], x1, x1, y0, y1); x1 -> x1 */
cffts3_clnd1(( - 1), dims[2], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts2(-1, dims[1], x1, x1, y0, y1); x1 -> x1 */
cffts2_clnd1(( - 1), dims[1], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts1(-1, dims[0], x1, x2, y0, y1); x1 -> x2 */
cffts1_clnd1(( - 1), dims[0], x1_real, x1_imag, x2_real, x2_imag, NULL, NULL, NULL, NULL);
/* x1 -> x2 */
}
return ;
}
static void fft_clnd2_cloned0(int dir, double x1_real[256][256][512], double x1_imag[256][256][512], double x2_real[256][256][512], double x2_imag[256][256][512])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/* dcomplex y0[NX][FFTBLOCKPAD]; */
/* dcomplex y0[NX][FFTBLOCKPAD]; */
/* dcomplex y1[NX][FFTBLOCKPAD]; */
/*
--------------------------------------------------------------------
c note: args x1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xinxout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------
*/
if ((dir==1))
{
/* cffts1(1, dims[0], x1, x1, y0, y1); x1 -> x1 */
cffts1_clnd2_cloned0(1, dims[0], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts2(1, dims[1], x1, x1, y0, y1); x1 -> x1 */
cffts2_clnd2_cloned0(1, dims[1], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts3(1, dims[2], x1, x2, y0, y1); x1 -> x2 */
cffts3_clnd2_cloned0(1, dims[2], x1_real, x1_imag, x2_real, x2_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
}
else
{
/* cffts3(-1, dims[2], x1, x1, y0, y1); x1 -> x1 */
cffts3_clnd3_cloned0(( - 1), dims[2], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts2(-1, dims[1], x1, x1, y0, y1); x1 -> x1 */
cffts2_clnd3_cloned0(( - 1), dims[1], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts1(-1, dims[0], x1, x2, y0, y1); x1 -> x2 */
cffts1_clnd3_cloned0(( - 1), dims[0], x1_real, x1_imag, x2_real, x2_imag, NULL, NULL, NULL, NULL);
/* x1 -> x2 */
}
return ;
}
static void fft_clnd1(int dir, double x1_real[256][256][512], double x1_imag[256][256][512], double x2_real[256][256][512], double x2_imag[256][256][512])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/* dcomplex y0[NX][FFTBLOCKPAD]; */
/* dcomplex y0[NX][FFTBLOCKPAD]; */
/* dcomplex y1[NX][FFTBLOCKPAD]; */
/*
--------------------------------------------------------------------
c note: args x1, x2 must be different arrays
c note: args for cfftsx are (direction, layout, xin, xout, scratch)
c xinxout may be the same and it can be somewhat faster
c if they are
c-------------------------------------------------------------------
*/
if ((dir==1))
{
/* cffts1(1, dims[0], x1, x1, y0, y1); x1 -> x1 */
cffts1_clnd4(1, dims[0], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts2(1, dims[1], x1, x1, y0, y1); x1 -> x1 */
cffts2_clnd4(1, dims[1], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts3(1, dims[2], x1, x2, y0, y1); x1 -> x2 */
cffts3_clnd4(1, dims[2], x1_real, x1_imag, x2_real, x2_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
}
else
{
/* cffts3(-1, dims[2], x1, x1, y0, y1); x1 -> x1 */
cffts3_clnd5(( - 1), dims[2], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts2(-1, dims[1], x1, x1, y0, y1); x1 -> x1 */
cffts2_clnd5(( - 1), dims[1], x1_real, x1_imag, x1_real, x1_imag, NULL, NULL, NULL, NULL);
/* x1 -> x1 */
/* cffts1(-1, dims[0], x1, x2, y0, y1); x1 -> x2 */
cffts1_clnd5(( - 1), dims[0], x1_real, x1_imag, x2_real, x2_imag, NULL, NULL, NULL, NULL);
/* x1 -> x2 */
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__global__ void cffts1_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_0, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int j;
int jj;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[0];
#pragma loop name cffts1#1#0
for (jj=0; jj<=(d[1]-fftblock_0); jj+=fftblock_0)
{
#pragma loop name cffts1#1#0#0
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#0#0
for (i=0; i<d_0; i ++ )
{
yy0_real[_gtid][i][j]=x_real[k][(j+jj)][i];
yy0_imag[_gtid][i][j]=x_imag[k][(j+jj)][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_0), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts1#1#0#1
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#1#0
for (i=0; i<d_0; i ++ )
{
xout_real[k][(j+jj)][i]=yy0_real[_gtid][i][j];
xout_imag[k][(j+jj)][i]=yy0_imag[_gtid][i][j];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_0;
int * gpu__d;
int * gpu__is;
int * gpu__logd_0;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts1#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_0=logd[0];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMemcpy(gpu__fftblock, ( & fftblock), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMemcpy(gpu__fftblockpad, ( & fftblockpad), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_0)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_0, ( & logd_0), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(gpu__u_imag, u_imag, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(gpu__u_real, u_real, gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, j, jj, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts1)
#pragma cuda gpurun registerRO(d[0], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_0)
cffts1_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_0, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_0));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts1_clnd5_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_0, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int j;
int jj;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[0];
#pragma loop name cffts1#1#0
for (jj=0; jj<=(d[1]-fftblock_0); jj+=fftblock_0)
{
#pragma loop name cffts1#1#0#0
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#0#0
for (i=0; i<d_0; i ++ )
{
yy0_real[_gtid][i][j]=x_real[k][(j+jj)][i];
yy0_imag[_gtid][i][j]=x_imag[k][(j+jj)][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_0), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts1#1#0#1
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#1#0
for (i=0; i<d_0; i ++ )
{
xout_real[k][(j+jj)][i]=yy0_real[_gtid][i][j];
xout_imag[k][(j+jj)][i]=yy0_imag[_gtid][i][j];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts1_clnd5(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_0;
int * gpu__d;
int * gpu__is;
int * gpu__logd_0;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts1#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_0=logd[0];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_0)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_0, ( & logd_0), gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, j, jj, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts1_clnd5)
#pragma cuda gpurun registerRO(d[0], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_0)
cffts1_clnd5_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_0, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u0_imag__main), ((double (*)[256][512])gpu__u0_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_0));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts1_clnd4_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_0, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int j;
int jj;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[0];
#pragma loop name cffts1#1#0
for (jj=0; jj<=(d[1]-fftblock_0); jj+=fftblock_0)
{
#pragma loop name cffts1#1#0#0
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#0#0
for (i=0; i<d_0; i ++ )
{
yy0_real[_gtid][i][j]=x_real[k][(j+jj)][i];
yy0_imag[_gtid][i][j]=x_imag[k][(j+jj)][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_0), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts1#1#0#1
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#1#0
for (i=0; i<d_0; i ++ )
{
xout_real[k][(j+jj)][i]=yy0_real[_gtid][i][j];
xout_imag[k][(j+jj)][i]=yy0_imag[_gtid][i][j];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts1_clnd4(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_0;
int * gpu__d;
int * gpu__is;
int * gpu__logd_0;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts1#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_0=logd[0];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_0)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_0, ( & logd_0), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(gpu__u_imag, u_imag, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(gpu__u_real, u_real, gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, j, jj, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts1_clnd4)
#pragma cuda gpurun registerRO(d[0], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_0)
cffts1_clnd4_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_0, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_0));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts1_clnd3_cloned0_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_0, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int j;
int jj;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[0];
#pragma loop name cffts1#1#0
for (jj=0; jj<=(d[1]-fftblock_0); jj+=fftblock_0)
{
#pragma loop name cffts1#1#0#0
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#0#0
for (i=0; i<d_0; i ++ )
{
yy0_real[_gtid][i][j]=x_real[k][(j+jj)][i];
yy0_imag[_gtid][i][j]=x_imag[k][(j+jj)][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_0), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts1#1#0#1
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#1#0
for (i=0; i<d_0; i ++ )
{
xout_real[k][(j+jj)][i]=yy0_real[_gtid][i][j];
xout_imag[k][(j+jj)][i]=yy0_imag[_gtid][i][j];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts1_clnd3_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_0;
int * gpu__d;
int * gpu__is;
int * gpu__logd_0;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts1#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_0=logd[0];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_0)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_0, ( & logd_0), gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, j, jj, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun multisrccg(xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts1_clnd3_cloned0)
#pragma cuda gpurun registerRO(d[0], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_0)
cffts1_clnd3_cloned0_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_0, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u2_imag__main), ((double (*)[256][512])gpu__u2_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_0));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts1_clnd2_cloned0_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_0, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int j;
int jj;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[0];
#pragma loop name cffts1#1#0
for (jj=0; jj<=(d[1]-fftblock_0); jj+=fftblock_0)
{
#pragma loop name cffts1#1#0#0
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#0#0
for (i=0; i<d_0; i ++ )
{
yy0_real[_gtid][i][j]=x_real[k][(j+jj)][i];
yy0_imag[_gtid][i][j]=x_imag[k][(j+jj)][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_0), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts1#1#0#1
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#1#0
for (i=0; i<d_0; i ++ )
{
xout_real[k][(j+jj)][i]=yy0_real[_gtid][i][j];
xout_imag[k][(j+jj)][i]=yy0_imag[_gtid][i][j];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts1_clnd2_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_0;
int * gpu__d;
int * gpu__is;
int * gpu__logd_0;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts1#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_0=logd[0];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_0)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_0, ( & logd_0), gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, j, jj, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts1_clnd2_cloned0)
#pragma cuda gpurun registerRO(d[0], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_0)
cffts1_clnd2_cloned0_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_0, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_0));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts1_clnd1_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_0, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int j;
int jj;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[0];
#pragma loop name cffts1#1#0
for (jj=0; jj<=(d[1]-fftblock_0); jj+=fftblock_0)
{
#pragma loop name cffts1#1#0#0
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#0#0
for (i=0; i<d_0; i ++ )
{
yy0_real[_gtid][i][j]=x_real[k][(j+jj)][i];
yy0_imag[_gtid][i][j]=x_imag[k][(j+jj)][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_0), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts1#1#0#1
for (j=0; j<fftblock_0; j ++ )
{
#pragma loop name cffts1#1#0#1#0
for (i=0; i<d_0; i ++ )
{
xout_real[k][(j+jj)][i]=yy0_real[_gtid][i][j];
xout_imag[k][(j+jj)][i]=yy0_imag[_gtid][i][j];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts1_clnd1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_0;
int * gpu__d;
int * gpu__is;
int * gpu__logd_0;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts1#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_0=logd[0];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_0)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_0, ( & logd_0), gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, j, jj, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_0, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts1_clnd1)
#pragma cuda gpurun registerRO(d[0], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_0)
cffts1_clnd1_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_0, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u0_imag__main), ((double (*)[256][512])gpu__u0_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_0));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__global__ void cffts2_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_1, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[1];
#pragma loop name cffts2#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#0
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][j][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][j][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_1), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#1
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][j][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][j][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts2(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_1;
int * gpu__d;
int * gpu__is;
int * gpu__logd_1;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts2#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_1=logd[1];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_1)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_1, ( & logd_1), gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts2)
#pragma cuda gpurun registerRO(d[1], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_1)
cffts2_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_1, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_1));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts2_clnd5_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_1, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[1];
#pragma loop name cffts2#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#0
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][j][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][j][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_1), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#1
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][j][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][j][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts2_clnd5(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_1;
int * gpu__d;
int * gpu__is;
int * gpu__logd_1;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts2#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_1=logd[1];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_1)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_1, ( & logd_1), gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts2_clnd5)
#pragma cuda gpurun registerRO(d[1], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_1)
cffts2_clnd5_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_1, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_1));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts2_clnd4_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_1, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[1];
#pragma loop name cffts2#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#0
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][j][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][j][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_1), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#1
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][j][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][j][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts2_clnd4(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_1;
int * gpu__d;
int * gpu__is;
int * gpu__logd_1;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts2#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_1=logd[1];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_1)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_1, ( & logd_1), gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts2_clnd4)
#pragma cuda gpurun registerRO(d[1], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_1)
cffts2_clnd4_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_1, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_1));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts2_clnd3_cloned0_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_1, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[1];
#pragma loop name cffts2#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#0
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][j][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][j][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_1), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#1
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][j][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][j][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts2_clnd3_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_1;
int * gpu__d;
int * gpu__is;
int * gpu__logd_1;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts2#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_1=logd[1];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_1)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_1, ( & logd_1), gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts2_clnd3_cloned0)
#pragma cuda gpurun registerRO(d[1], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_1)
cffts2_clnd3_cloned0_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_1, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_1));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts2_clnd2_cloned0_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_1, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[1];
#pragma loop name cffts2#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#0
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][j][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][j][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_1), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#1
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][j][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][j][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts2_clnd2_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_1;
int * gpu__d;
int * gpu__is;
int * gpu__logd_1;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts2#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_1=logd[1];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_1)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_1, ( & logd_1), gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts2_clnd2_cloned0)
#pragma cuda gpurun registerRO(d[1], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_1)
cffts2_clnd2_cloned0_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_1, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_1));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts2_clnd1_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_1, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
k=_gtid;
fftblock_0=( * fftblock);
if (k<d[2])
{
d_0=d[1];
#pragma loop name cffts2#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#0
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][j][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][j][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_1), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts2#1#0#1
for (j=0; j<d_0; j ++ )
{
#pragma loop name cffts2#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][j][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][j][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts2_clnd1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_1;
int * gpu__d;
int * gpu__is;
int * gpu__logd_1;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts2#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_1=logd[1];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_1)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_1, ( & logd_1), gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[2])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_1, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts2_clnd1)
#pragma cuda gpurun registerRO(d[1], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_1)
cffts2_clnd1_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_1, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_1));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__global__ void cffts3_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_2, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
j=_gtid;
fftblock_0=( * fftblock);
if (j<d[1])
{
d_0=d[2];
#pragma loop name cffts3#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#0
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][k][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][k][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_2), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#1
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][k][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][k][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts3(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_2;
int * gpu__d;
int * gpu__is;
int * gpu__logd_2;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts3#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_2=logd[2];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_2)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_2, ( & logd_2), gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[1])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts3)
#pragma cuda gpurun registerRO(d[2], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_2)
cffts3_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_2, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u0_imag__main), ((double (*)[256][512])gpu__u0_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_2));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts3_clnd5_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_2, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
j=_gtid;
fftblock_0=( * fftblock);
if (j<d[1])
{
d_0=d[2];
#pragma loop name cffts3#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#0
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][k][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][k][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_2), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#1
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][k][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][k][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts3_clnd5(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_2;
int * gpu__d;
int * gpu__is;
int * gpu__logd_2;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts3#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_2=logd[2];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_2)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_2, ( & logd_2), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(gpu__u_imag, u_imag, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(gpu__u_real, u_real, gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[1])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts3_clnd5)
#pragma cuda gpurun registerRO(d[2], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_2)
cffts3_clnd5_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_2, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_2));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts3_clnd4_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_2, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
j=_gtid;
fftblock_0=( * fftblock);
if (j<d[1])
{
d_0=d[2];
#pragma loop name cffts3#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#0
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][k][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][k][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_2), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#1
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][k][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][k][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts3_clnd4(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_2;
int * gpu__d;
int * gpu__is;
int * gpu__logd_2;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts3#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_2=logd[2];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_2)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_2, ( & logd_2), gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[1])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts3_clnd4)
#pragma cuda gpurun registerRO(d[2], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_2)
cffts3_clnd4_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_2, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u0_imag__main), ((double (*)[256][512])gpu__u0_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_2));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts3_clnd3_cloned0_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_2, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
j=_gtid;
fftblock_0=( * fftblock);
if (j<d[1])
{
d_0=d[2];
#pragma loop name cffts3#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#0
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][k][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][k][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_2), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#1
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][k][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][k][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts3_clnd3_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_2;
int * gpu__d;
int * gpu__is;
int * gpu__logd_2;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts3#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_2=logd[2];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_2)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_2, ( & logd_2), gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[1])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts3_clnd3_cloned0)
#pragma cuda gpurun registerRO(d[2], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_2)
cffts3_clnd3_cloned0_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_2, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_2));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts3_clnd2_cloned0_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_2, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
j=_gtid;
fftblock_0=( * fftblock);
if (j<d[1])
{
d_0=d[2];
#pragma loop name cffts3#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#0
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][k][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][k][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_2), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#1
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][k][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][k][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts3_clnd2_cloned0(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_2;
int * gpu__d;
int * gpu__is;
int * gpu__logd_2;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts3#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_2=logd[2];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_2)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_2, ( & logd_2), gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[1])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun multisrccg(xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts3_clnd2_cloned0)
#pragma cuda gpurun registerRO(d[2], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_2)
cffts3_clnd2_cloned0_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_2, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u2_imag__main), ((double (*)[256][512])gpu__u2_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_2));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
__global__ void cffts3_clnd1_kernel0(int * d, int * fftblock, int * fftblockpad, int * is, int * logd_2, double * u_imag, double * u_real, double x_imag[256][256][512], double x_real[256][256][512], double xout_imag[256][256][512], double xout_real[256][256][512], double yy0_imag[][512][18], double yy0_real[][512][18], double yy1_imag[][512][18], double yy1_real[][512][18])
{
int d_0;
int fftblock_0;
int i;
int ii;
int j;
int k;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
j=_gtid;
fftblock_0=( * fftblock);
if (j<d[1])
{
d_0=d[2];
#pragma loop name cffts3#1#0
for (ii=0; ii<=(d[0]-fftblock_0); ii+=fftblock_0)
{
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#0
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#0#0
for (i=0; i<fftblock_0; i ++ )
{
yy0_real[_gtid][k][i]=x_real[k][j][(i+ii)];
yy0_imag[_gtid][k][i]=x_imag[k][j][(i+ii)];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */
dev_cfftz(( * is), ( * logd_2), d_0, yy0_real, yy0_imag, yy1_real, yy1_imag, fftblock, fftblockpad, u_imag, u_real, _gtid);
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */
/* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */
#pragma loop name cffts3#1#0#1
for (k=0; k<d_0; k ++ )
{
#pragma loop name cffts3#1#0#1#0
for (i=0; i<fftblock_0; i ++ )
{
xout_real[k][j][(i+ii)]=yy0_real[_gtid][k][i];
xout_imag[k][j][(i+ii)]=yy0_imag[_gtid][k][i];
}
}
/* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */
}
}
}
static void cffts3_clnd1(int is, int d[3], double x_real[256][256][512], double x_imag[256][256][512], double xout_real[256][256][512], double xout_imag[256][256][512], double y0_real[512][18], double y0_imag[512][18], double y1_real[512][18], double y1_imag[512][18])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int logd[3];
int i;
int logd_2;
int * gpu__d;
int * gpu__is;
int * gpu__logd_2;
double * gpu__yy0_imag;
double * gpu__yy0_real;
double * gpu__yy1_imag;
double * gpu__yy1_real;
#pragma loop name cffts3#0
for (i=0; i<3; i ++ )
{
logd[i]=ilog2(d[i]);
}
logd_2=logd[2];
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__d)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__d, d, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMemcpy(gpu__fftblock, ( & fftblock), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMemcpy(gpu__fftblockpad, ( & fftblockpad), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__is)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__is, ( & is), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__logd_2)), gpuBytes));
CUDA_SAFE_CALL(cudaMemcpy(gpu__logd_2, ( & logd_2), gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(gpu__u_imag, u_imag, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=(512*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(gpu__u_real, u_real, gpuBytes, cudaMemcpyHostToDevice));
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=((int)ceil((((float)d[1])/1024.0F)));
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=((int)ceil((((float)gpuNumBlocks)/10000.0F)));
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy0_real)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_imag)), gpuBytes));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & gpu__yy1_real)), gpuBytes));
#pragma omp parallel for threadprivate(yy0_imag, yy0_real, yy1_imag, yy1_real) shared(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real) private(i, ii, j, k) schedule(static)
#pragma cuda gpurun noc2gmemtr(x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudafree(fftblock, fftblockpad, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda gpurun nog2cmemtr(d, fftblock, fftblockpad, is, logd_2, u_imag, u_real, x_imag, x_real, xout_imag, xout_real)
#pragma cuda ainfo kernelid(0) procname(cffts3_clnd1)
#pragma cuda gpurun registerRO(d[2], fftblock)
#pragma cuda gpurun cudafree(d, is, logd_2)
cffts3_clnd1_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(gpu__d, gpu__fftblock, gpu__fftblockpad, gpu__is, gpu__logd_2, gpu__u_imag, gpu__u_real, ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[256][512])gpu__u1_imag__main), ((double (*)[256][512])gpu__u1_real__main), ((double (*)[512][18])gpu__yy0_imag), ((double (*)[512][18])gpu__yy0_real), ((double (*)[512][18])gpu__yy1_imag), ((double (*)[512][18])gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_real, gpu__yy1_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy1_imag, gpu__yy1_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy1_imag));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_real, gpu__yy0_real, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_real));
gpuBytes=((512*18)*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(yy0_imag, gpu__yy0_imag, gpuBytes, cudaMemcpyDeviceToHost));
gpuBytes=(totalNumThreads*((512*18)*sizeof (double)));
CUDA_SAFE_CALL(cudaFree(gpu__yy0_imag));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__logd_2));
gpuBytes=sizeof (int);
CUDA_SAFE_CALL(cudaFree(gpu__is));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaFree(gpu__d));
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static void fft_init(int n)
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c compute the roots-of-unity array that will be used for subsequent FFTs.
c-------------------------------------------------------------------
*/
int m;
int nu;
int ku;
int i;
int j;
int ln;
double t;
double ti;
/*
--------------------------------------------------------------------
c Initialize the U array with sines and cosines in a manner that permits
c stride one access at each FFT iteration.
c-------------------------------------------------------------------
*/
nu=n;
m=ilog2(n);
u_real[0]=((double)m);
u_imag[0]=0.0;
ku=1;
ln=1;
#pragma loop name fft_init#0
for (j=1; j<=m; j ++ )
{
t=(3.141592653589793/ln);
#pragma loop name fft_init#0#0
for (i=0; i<=(ln-1); i ++ )
{
ti=(i*t);
u_real[(i+ku)]=cos(ti);
u_imag[(i+ku)]=sin(ti);
}
ku=(ku+ln);
ln=(2*ln);
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__device__ static void dev_cfftz(int is, int m, int n, double x_real[][512][18], double x_imag[][512][18], double y_real[][512][18], double y_imag[][512][18], int * fftblock, int * fftblockpad, double u_imag[512], double u_real[512], int _gtid)
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c Computes NY N-point complex-to-complex FFTs of X using an algorithm due
c to Swarztrauber. X is both the input and the output array, while Y is a
c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to
c perform FFTs, the array U must be initialized by calling CFFTZ with IS
c set to 0 and M set to MX, where MX is the maximum value of M for any
c subsequent call.
c-------------------------------------------------------------------
*/
int i;
int j;
int l;
int mx;
/*
--------------------------------------------------------------------
c Check if input parameters are invalid.
c-------------------------------------------------------------------
*/
int fftblock_0;
fftblock_0=( * fftblock);
mx=((int)u_real[0]);
/*
--------------------------------------------------------------------
c Perform one variant of the Stockham FFT.
c-------------------------------------------------------------------
*/
#pragma loop name cfftz#0
for (l=1; l<=m; l+=2)
{
dev_fftz2(is, l, m, n, fftblock_0, ( * fftblockpad), u_real, u_imag, x_real, x_imag, y_real, y_imag, _gtid);
if ((l==m))
{
break;
}
dev_fftz2(is, (l+1), m, n, fftblock_0, ( * fftblockpad), u_real, u_imag, y_real, y_imag, x_real, x_imag, _gtid);
}
/*
--------------------------------------------------------------------
c Copy Y to X.
c-------------------------------------------------------------------
*/
if (((m%2)==1))
{
#pragma loop name cfftz#1
for (j=0; j<n; j ++ )
{
#pragma loop name cfftz#1#0
for (i=0; i<fftblock_0; i ++ )
{
x_real[_gtid][j][i]=y_real[_gtid][j][i];
x_imag[_gtid][j][i]=y_imag[_gtid][j][i];
}
}
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__device__ static void dev_fftz2(int is, int l, int m, int n, int ny, int ny1, double u_real[512], double u_imag[512], double x_real[][512][18], double x_imag[][512][18], double y_real[][512][18], double y_imag[][512][18], int _gtid)
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c Performs the L-th iteration of the second variant of the Stockham FFT.
c-------------------------------------------------------------------
*/
int k;
int n1;
int li;
int lj;
int lk;
int ku;
int i;
int j;
int i11;
int i12;
int i21;
int i22;
double u1_real;
double u1_imag;
/*
--------------------------------------------------------------------
c Set initial parameters.
c-------------------------------------------------------------------
*/
n1=(n/2);
if (((l-1)==0))
{
lk=1;
}
else
{
lk=(2<<((l-1)-1));
}
if (((m-l)==0))
{
li=1;
}
else
{
li=(2<<((m-l)-1));
}
lj=(2*lk);
ku=li;
#pragma loop name fftz2#0
for (i=0; i<li; i ++ )
{
i11=(i*lk);
i12=(i11+n1);
i21=(i*lj);
i22=(i21+lk);
if ((is>=1))
{
u1_real=u_real[(ku+i)];
u1_imag=u_imag[(ku+i)];
}
else
{
u1_real=u_real[(ku+i)];
u1_imag=( - u_imag[(ku+i)]);
}
/*
--------------------------------------------------------------------
c This loop is vectorizable.
c-------------------------------------------------------------------
*/
#pragma loop name fftz2#0#0
for (k=0; k<lk; k ++ )
{
#pragma loop name fftz2#0#0#0
for (j=0; j<ny; j ++ )
{
double x11real;
double x11imag;
double x21real;
double x21imag;
x11real=x_real[_gtid][(i11+k)][j];
x11imag=x_imag[_gtid][(i11+k)][j];
x21real=x_real[_gtid][(i12+k)][j];
x21imag=x_imag[_gtid][(i12+k)][j];
y_real[_gtid][(i21+k)][j]=(x11real+x21real);
y_imag[_gtid][(i21+k)][j]=(x11imag+x21imag);
y_real[_gtid][(i22+k)][j]=((u1_real*(x11real-x21real))-(u1_imag*(x11imag-x21imag)));
y_imag[_gtid][(i22+k)][j]=((u1_real*(x11imag-x21imag))+(u1_imag*(x11real-x21real)));
}
}
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static int ilog2(int n)
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int nn;
int lg;
int _ret_val_0;
if ((n==1))
{
_ret_val_0=0;
return _ret_val_0;
}
lg=1;
nn=2;
while (nn<n)
{
nn=(nn<<1);
lg ++ ;
}
return lg;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
__global__ void checksum_kernel0(double * red__chk_imag, double * red__chk_real, double u1_imag[256][256][512], double u1_real[256][256][512], int * xend, int * xstart, int * yend, int * ystart, int * zend, int * zstart)
{
__shared__ double sh__chk_imag[BLOCK_SIZE];
__shared__ double sh__chk_real[BLOCK_SIZE];
int xstart_0;
int ystart_0;
int zstart_0;
int j;
int q;
int r;
int s;
int _bid = (blockIdx.x+(blockIdx.y*gridDim.x));
int _gtid = (threadIdx.x+(_bid*blockDim.x));
zstart_0=zstart[0];
ystart_0=ystart[0];
xstart_0=xstart[0];
sh__chk_real[threadIdx.x]=0.0F;
sh__chk_imag[threadIdx.x]=0.0F;
j=(_gtid+1);
#pragma omp for nowait
if (j<=1024)
{
q=((j%512)+1);
if (((q>=xstart_0)&&(q<=xend[0])))
{
r=(((3*j)%256)+1);
if (((r>=ystart_0)&&(r<=yend[0])))
{
s=(((5*j)%256)+1);
if (((s>=zstart_0)&&(s<=zend[0])))
{
/* cadd is a macro in npb-C.h adding the real and imaginary */
/* component. So, the preprocessed statement still follows the */
/* reduction pattern */
/* cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]); */
sh__chk_real[threadIdx.x]=(sh__chk_real[threadIdx.x]+u1_real[(s-zstart_0)][(r-ystart_0)][(q-xstart_0)]);
sh__chk_imag[threadIdx.x]=(sh__chk_imag[threadIdx.x]+u1_imag[(s-zstart_0)][(r-ystart_0)][(q-xstart_0)]);
}
}
}
}
__syncthreads();
if ((threadIdx.x<256))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+256)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+256)];
}
__syncthreads();
if ((threadIdx.x<128))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+128)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+128)];
}
__syncthreads();
if ((threadIdx.x<64))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+64)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+64)];
}
__syncthreads();
if ((threadIdx.x<32))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+32)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+32)];
}
if ((threadIdx.x<16))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+16)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+16)];
}
if ((threadIdx.x<8))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+8)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+8)];
}
if ((threadIdx.x<4))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+4)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+4)];
}
if ((threadIdx.x<2))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+2)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+2)];
}
if ((threadIdx.x<1))
{
sh__chk_imag[threadIdx.x]+=sh__chk_imag[(threadIdx.x+1)];
sh__chk_real[threadIdx.x]+=sh__chk_real[(threadIdx.x+1)];
}
if ((threadIdx.x==0))
{
red__chk_imag[_bid]=sh__chk_imag[0];
red__chk_real[_bid]=sh__chk_real[0];
}
}
static void checksum(int i, double u1_real[256][256][512], double u1_imag[256][256][512], int d[3])
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
double _chk_real;
double _chk_imag;
double * red__chk_imag;
double * chk_imag__extended;
int _ti_100_0;
double * red__chk_real;
double * chk_real__extended;
_chk_real=0.0;
_chk_imag=0.0;
{
double chk_real = _chk_real;
double chk_imag = _chk_imag;
/* #pragma omp for nowait */
dim3 dimBlock0(gpuNumThreads, 1, 1);
gpuNumBlocks=1;
if ((gpuNumBlocks>MAX_GDIMENSION))
{
gpuNumBlocks2=1;
gpuNumBlocks1=MAX_NDIMENSION;
}
else
{
gpuNumBlocks2=1;
gpuNumBlocks1=gpuNumBlocks;
}
dim3 dimGrid0(gpuNumBlocks1, gpuNumBlocks2, 1);
gpuNumBlocks=(gpuNumBlocks1*gpuNumBlocks2);
totalNumThreads=(gpuNumBlocks*gpuNumThreads);
gpuBytes=(gpuNumBlocks*sizeof (double));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & red__chk_imag)), gpuBytes));
chk_imag__extended=((double * )malloc(gpuBytes));
gpuBytes=(gpuNumBlocks*sizeof (double));
CUDA_SAFE_CALL(cudaMalloc(((void * * )( & red__chk_real)), gpuBytes));
chk_real__extended=((double * )malloc(gpuBytes));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMemcpy(gpu__xend, xend, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMemcpy(gpu__xstart, xstart, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMemcpy(gpu__yend, yend, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMemcpy(gpu__ystart, ystart, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMemcpy(gpu__zend, zend, gpuBytes, cudaMemcpyHostToDevice));
gpuBytes=(3*sizeof (int));
CUDA_SAFE_CALL(cudaMemcpy(gpu__zstart, zstart, gpuBytes, cudaMemcpyHostToDevice));
#pragma omp parallel shared(u1_imag, u1_real, xend, xstart, yend, ystart, zend, zstart) private(j, q, r, s) reduction(+: chk_imag, chk_real) schedule(static)
#pragma cuda gpurun noc2gmemtr(u1_imag, u1_real, x_imag, x_real, xout_imag, xout_real, yy0_imag, yy0_real)
#pragma cuda gpurun nocudamalloc(u1_imag, u1_real)
#pragma cuda gpurun nocudafree(u1_imag, u1_real, xend, xstart, yend, ystart, zend, zstart)
#pragma cuda gpurun multisrccg(xend, xstart, yend, ystart, zend, zstart)
#pragma cuda gpurun nog2cmemtr(u1_imag, u1_real, xend, xstart, yend, ystart, zend, zstart)
#pragma cuda ainfo kernelid(0) procname(checksum)
#pragma cuda gpurun registerRO(xstart[0], ystart[0], zstart[0])
checksum_kernel0<<<dimGrid0, dimBlock0, 0, 0>>>(red__chk_imag, red__chk_real, ((double (*)[256][512])gpu__u2_imag__main), ((double (*)[256][512])gpu__u2_real__main), gpu__xend, gpu__xstart, gpu__yend, gpu__ystart, gpu__zend, gpu__zstart);
gpuBytes=(gpuNumBlocks*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(chk_real__extended, red__chk_real, gpuBytes, cudaMemcpyDeviceToHost));
for (_ti_100_0=0; _ti_100_0<gpuNumBlocks; _ti_100_0 ++ )
{
chk_real+=chk_real__extended[_ti_100_0];
}
free(chk_real__extended);
CUDA_SAFE_CALL(cudaFree(red__chk_real));
gpuBytes=(gpuNumBlocks*sizeof (double));
CUDA_SAFE_CALL(cudaMemcpy(chk_imag__extended, red__chk_imag, gpuBytes, cudaMemcpyDeviceToHost));
for (_ti_100_0=0; _ti_100_0<gpuNumBlocks; _ti_100_0 ++ )
{
chk_imag+=chk_imag__extended[_ti_100_0];
}
free(chk_imag__extended);
CUDA_SAFE_CALL(cudaFree(red__chk_imag));
_chk_real=chk_real;
_chk_imag=chk_imag;
}
/* #pragma omp critical */
{
sums_real[i]+=_chk_real;
sums_imag[i]+=_chk_imag;
}
/* #pragma omp barrier */
/* #pragma omp single */
{
/* complex % real */
sums_real[i]=(sums_real[i]/((double)33554432));
sums_imag[i]=(sums_imag[i]/((double)33554432));
printf("T = %5d Checksum = %22.12e %22.12e\n", i, sums_real[i], sums_imag[i]);
}
return ;
}
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
static void verify(int d1, int d2, int d3, int nt, int * verified, char * cclass)
{
/*
--------------------------------------------------------------------
c-------------------------------------------------------------------
*/
int i;
double err;
double epsilon;
/*
--------------------------------------------------------------------
c Sample size reference checksums
c-------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
c Class S size reference checksums
c-------------------------------------------------------------------
*/
double vdata_real_s[(6+1)] = { 0.0, 554.6087004964, 554.6385409189, 554.6148406171, 554.5423607415, 554.4255039624, 554.2683411902 } ;
double vdata_imag_s[(6+1)] = { 0.0, 484.5363331978, 486.5304269511, 488.3910722336, 490.1273169046, 491.7475857993, 493.2597244941 } ;
/*
--------------------------------------------------------------------
c Class W size reference checksums
c-------------------------------------------------------------------
*/
double vdata_real_w[(6+1)] = { 0.0, 567.3612178944, 563.1436885271, 559.402408997, 556.069804702, 553.089899125, 550.4159734538 } ;
double vdata_imag_w[(6+1)] = { 0.0, 529.3246849175, 528.2149986629, 527.0996558037, 526.0027904925, 524.9400845633, 523.9212247086 } ;
/*
--------------------------------------------------------------------
c Class A size reference checksums
c-------------------------------------------------------------------
*/
double vdata_real_a[(6+1)] = { 0.0, 504.6735008193, 505.9412319734, 506.9376896287, 507.7892868474, 508.5233095391, 509.1487099959 } ;
double vdata_imag_a[(6+1)] = { 0.0, 511.404790551, 509.8809666433, 509.8144042213, 510.1336130759, 510.4914655194, 510.7917842803 } ;
/*
--------------------------------------------------------------------
c Class B size reference checksums
c-------------------------------------------------------------------
*/
double vdata_real_b[(20+1)] = { 0.0, 517.7643571579, 515.4521291263, 514.6409228649, 514.2378756213, 513.9626667737, 513.7423460082, 513.5547056878, 513.3910925466, 513.247070539, 513.1197729984, 513.0070319283, 512.9070537032, 512.8182883502, 512.7393733383, 512.669106202, 512.6064276004, 512.550407657, 512.500233172, 512.4551951846, 512.4146770029 } ;
double vdata_imag_b[(20+1)] = { 0.0, 507.7803458597, 508.8249431599, 509.6208912659, 510.1023387619, 510.3976610617, 510.5948019802, 510.7404165783, 510.8576573661, 510.9577278523, 511.0460304483, 511.12524338, 511.1968077718, 511.2616233064, 511.3203605551, 511.3735928093, 511.4218460548, 511.465613976, 511.5053595966, 511.5415130407, 511.5744692211 } ;
/*
--------------------------------------------------------------------
c Class C size reference checksums
c-------------------------------------------------------------------
*/
double vdata_real_c[(20+1)] = { 0.0, 519.5078707457, 515.5422171134, 514.4678022222, 514.0150594328, 513.755042681, 513.5811056728, 513.4569343165, 513.3651975661, 513.2955192805, 513.2410471738, 513.1971141679, 513.1605205716, 513.1290734194, 513.1012720314, 513.0760908195, 513.0528295923, 513.0310107773, 513.0103090133, 512.9905029333, 512.9714421109 } ;
double vdata_imag_c[(20+1)] = { 0.0, 514.9019699238, 512.7578201997, 512.2251847514, 512.1090289018, 512.1143685824, 512.1496764568, 512.1870921893, 512.2193250322, 512.2454735794, 512.2663649603, 512.2830879827, 512.2965869718, 512.3075927445, 512.3166486553, 512.3241541685, 512.3304037599, 512.3356167976, 512.3399592211, 512.3435588985, 512.3465164008 } ;
epsilon=1.0E-12;
( * verified)=1;
( * cclass)='U';
if (((((d1==64)&&(d2==64))&&(d3==64))&&(nt==6)))
{
( * cclass)='S';
#pragma loop name verify#0
for (i=1; i<=nt; i ++ )
{
err=((sums_real[i]-vdata_real_s[i])/vdata_real_s[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
err=((sums_imag[i]-vdata_imag_s[i])/vdata_imag_s[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
}
}
else
{
if (((((d1==128)&&(d2==128))&&(d3==32))&&(nt==6)))
{
( * cclass)='W';
#pragma loop name verify#1
for (i=1; i<=nt; i ++ )
{
err=((sums_real[i]-vdata_real_w[i])/vdata_real_w[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
err=((sums_imag[i]-vdata_imag_w[i])/vdata_imag_w[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
}
}
else
{
if (((((d1==256)&&(d2==256))&&(d3==128))&&(nt==6)))
{
( * cclass)='A';
#pragma loop name verify#2
for (i=1; i<=nt; i ++ )
{
err=((sums_real[i]-vdata_real_a[i])/vdata_real_a[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
err=((sums_imag[i]-vdata_imag_a[i])/vdata_imag_a[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
}
}
else
{
if (((((d1==512)&&(d2==256))&&(d3==256))&&(nt==20)))
{
( * cclass)='B';
#pragma loop name verify#3
for (i=1; i<=nt; i ++ )
{
err=((sums_real[i]-vdata_real_b[i])/vdata_real_b[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
err=((sums_imag[i]-vdata_imag_b[i])/vdata_imag_b[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
}
}
else
{
if (((((d1==512)&&(d2==512))&&(d3==512))&&(nt==20)))
{
( * cclass)='C';
#pragma loop name verify#4
for (i=1; i<=nt; i ++ )
{
err=((sums_real[i]-vdata_real_c[i])/vdata_real_c[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
err=((sums_imag[i]-vdata_imag_c[i])/vdata_imag_c[i]);
if ((fabs(err)>epsilon))
{
( * verified)=0;
break;
}
}
}
}
}
}
}
if ((( * cclass)!='U'))
{
printf("Result verification successful\n");
}
else
{
printf("Result verification failed\n");
}
printf("cclass = %1c\n", ( * cclass));
return ;
}
|
2a6c71e55fc7cc6286fa5a4ef35a09222988eb65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "preprocess_kernel.cuh"
namespace CUDAKernel{
Norm Norm::mean_std(float mean[3], float std[3], float alpha){
Norm out;
out.type = NormType::MeanStd;
out.alpha = alpha;
memcpy(out.mean, mean, sizeof(out.mean));
memcpy(out.std, std, sizeof(out.std));
return out;
}
Norm Norm::alpha_beta(float alpha, float beta){
Norm out;
out.type = NormType::AlphaBeta;
out.alpha = alpha;
out.beta = beta;
return out;
}
__global__ void warp_affine_bilinear_and_normalize_kernel(uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height,
uint8_t const_value_st, float* warp_affine_matrix_2_3, Norm norm, int edge){
int position = blockDim.x * blockIdx.x + threadIdx.x;
if (position >= edge) return;
float m_x1 = warp_affine_matrix_2_3[0];
float m_y1 = warp_affine_matrix_2_3[1];
float m_z1 = warp_affine_matrix_2_3[2];
float m_x2 = warp_affine_matrix_2_3[3];
float m_y2 = warp_affine_matrix_2_3[4];
float m_z2 = warp_affine_matrix_2_3[5];
int dx = position % dst_width;
int dy = position / dst_width;
float src_x = (m_x1 * dx + m_y1 * dy + m_z1) + 0.5f;
float src_y = (m_x2 * dx + m_y2 * dy + m_z2) + 0.5f;
float c0, c1, c2;
if(src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height){
// out of range
c0 = const_value_st;
c1 = const_value_st;
c2 = const_value_st;
}else{
int y_low = floor(src_y);
int x_low = floor(src_x);
int y_high = y_low + 1;
int x_high = x_low + 1;
uint8_t const_value[] = {const_value_st, const_value_st, const_value_st};
float ly = src_y - y_low;
float lx = src_x - x_low;
float hy = 1 - ly;
float hx = 1 - lx;
float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
float* pdst = dst + dy * dst_width + dx * 3;
uint8_t* v1 = const_value;
uint8_t* v2 = const_value;
uint8_t* v3 = const_value;
uint8_t* v4 = const_value;
if(y_low >= 0){
if (x_low >= 0)
v1 = src + y_low * src_line_size + x_low * 3;
if (x_high < src_width)
v2 = src + y_low * src_line_size + x_high * 3;
}
if(y_high < src_height){
if (x_low >= 0)
v3 = src + y_high * src_line_size + x_low * 3;
if (x_high < src_width)
v4 = src + y_high * src_line_size + x_high * 3;
}
c0 = w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0] + 0.5f;
c1 = w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1] + 0.5f;
c2 = w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2] + 0.5f;
}
int type = (unsigned int)(norm.type) & 0x000000FF;
int channel_order = (unsigned int)(norm.type) & 0x0000FF00;
if(channel_order == int(NormType::InvertChannel)){
float t = c2;
c2 = c0; c0 = t;
}
if(type == int(NormType::MeanStd)){
c0 = (c0 * norm.alpha - norm.mean[0]) / norm.std[0];
c1 = (c1 * norm.alpha - norm.mean[1]) / norm.std[1];
c2 = (c2 * norm.alpha - norm.mean[2]) / norm.std[2];
}else if(type == int(NormType::AlphaBeta)){
c0 = c0 * norm.alpha + norm.beta;
c1 = c1 * norm.alpha + norm.beta;
c2 = c2 * norm.alpha + norm.beta;
}
int area = dst_width * dst_height;
float* pdst_c0 = dst + dy * dst_width + dx;
float* pdst_c1 = pdst_c0 + area;
float* pdst_c2 = pdst_c1 + area;
*pdst_c0 = c0;
*pdst_c1 = c1;
*pdst_c2 = c2;
}
__global__ void normalize_feature_kernel(float* feature_array, int num_feature, int feature_length, int edge){
/*
& 1 gz bi.z 0
* 1 gy bi.y 0
* N NF bi.x ~
* 1 1 ti.z 0
* F FL / 32 ti.y ~
* Q 32 ti.x ~
*/
int position = (blockIdx.x * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
if (position >= edge) return;
extern __shared__ float l2_norm[];
int irow = position / feature_length;
int icol = position % feature_length;
if(icol == 0)
l2_norm[irow] = 0;
__syncthreads();
float value = feature_array[position];
atomicAdd(l2_norm + irow, value * value);
__syncthreads();
if(icol == 0)
l2_norm[irow] = sqrt(l2_norm[irow]);
__syncthreads();
feature_array[position] = value / l2_norm[irow];
}
static __device__ uint8_t cast(float value){
return value < 0 ? 0 : (value > 255 ? 255 : value);
}
static __global__ void convert_nv12_to_bgr_kernel(const uint8_t* y, const uint8_t* uv, int width, int height, int linesize, uint8_t* dst_bgr, int edge){
int position = blockDim.x * blockIdx.x + threadIdx.x;
if (position >= edge) return;
int ox = position % width;
int oy = position / width;
const uint8_t& yvalue = y[oy * linesize + ox];
int offset_uv = (oy >> 1) * linesize + (ox & 0xFFFFFFFE);
const uint8_t& u = uv[offset_uv + 0];
const uint8_t& v = uv[offset_uv + 1];
dst_bgr[position * 3 + 0] = 1.164f * (yvalue - 16.0f) + 2.018f * (u - 128.0f);
dst_bgr[position * 3 + 1] = 1.164f * (yvalue - 16.0f) - 0.813f * (v - 128.0f) - 0.391f * (u - 128.0f);
dst_bgr[position * 3 + 2] = 1.164f * (yvalue - 16.0f) + 1.596f * (v - 128.0f);
}
/////////////////////////////////////////////////////////////////////////
void convert_nv12_to_bgr_invoke(
const uint8_t* y, const uint8_t* uv, int width, int height, int linesize, uint8_t* dst, hipStream_t stream){
int total = width * height;
dim3 grid = CUDATools::grid_dims(total);
dim3 block = CUDATools::block_dims(total);
hipLaunchKernelGGL(( checkCudaKernel(convert_nv12_to_bgr_kernel), dim3(grid), dim3(block), 0, stream,
y, uv, width, height, linesize,
dst, total
));
}
void warp_affine_bilinear_and_normalize(
uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height,
float* matrix_2_3, uint8_t const_value, const Norm& norm,
hipStream_t stream) {
int jobs = dst_width * dst_height;
auto grid = CUDATools::grid_dims(jobs);
auto block = CUDATools::block_dims(jobs);
checkCudaKernel(warp_affine_bilinear_and_normalize_kernel << <grid, block, 0, stream >> > (
src, src_line_size,
src_width, src_height, dst,
dst_width, dst_height, const_value, matrix_2_3, norm, jobs
));
}
void norm_feature(
float* feature_array, int num_feature, int feature_length,
hipStream_t stream
){
Assert(feature_length % 32 == 0);
int jobs = num_feature * feature_length;
auto grid = dim3(num_feature);
auto block = dim3(feature_length / 32, 32);
checkCudaKernel(normalize_feature_kernel << <grid, block, num_feature * sizeof(float), stream >> > (
feature_array, num_feature, feature_length, jobs
));
}
// void resize_bilinear(
// uint8_t* src, int src_line_size, int src_width, int src_height,
// uint8_t* dst, int dst_line_size, int dst_width, int dst_height,
// hipStream_t stream) {
// int jobs = dst_width * dst_height;
// auto grid = CUDATools::grid_dims(jobs);
// auto block = CUDATools::block_dims(jobs);
// checkCudaKernel(resize_bilinear_kernel << <grid, block, 0, stream >> > (
// src, src_line_size, src_width, src_height,
// dst, dst_line_size, dst_width, dst_height, src_width / (float)dst_width, src_height / (float)dst_height, jobs
// ));
// }
};
| 2a6c71e55fc7cc6286fa5a4ef35a09222988eb65.cu |
#include "preprocess_kernel.cuh"
namespace CUDAKernel{
Norm Norm::mean_std(float mean[3], float std[3], float alpha){
Norm out;
out.type = NormType::MeanStd;
out.alpha = alpha;
memcpy(out.mean, mean, sizeof(out.mean));
memcpy(out.std, std, sizeof(out.std));
return out;
}
Norm Norm::alpha_beta(float alpha, float beta){
Norm out;
out.type = NormType::AlphaBeta;
out.alpha = alpha;
out.beta = beta;
return out;
}
__global__ void warp_affine_bilinear_and_normalize_kernel(uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height,
uint8_t const_value_st, float* warp_affine_matrix_2_3, Norm norm, int edge){
int position = blockDim.x * blockIdx.x + threadIdx.x;
if (position >= edge) return;
float m_x1 = warp_affine_matrix_2_3[0];
float m_y1 = warp_affine_matrix_2_3[1];
float m_z1 = warp_affine_matrix_2_3[2];
float m_x2 = warp_affine_matrix_2_3[3];
float m_y2 = warp_affine_matrix_2_3[4];
float m_z2 = warp_affine_matrix_2_3[5];
int dx = position % dst_width;
int dy = position / dst_width;
float src_x = (m_x1 * dx + m_y1 * dy + m_z1) + 0.5f;
float src_y = (m_x2 * dx + m_y2 * dy + m_z2) + 0.5f;
float c0, c1, c2;
if(src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height){
// out of range
c0 = const_value_st;
c1 = const_value_st;
c2 = const_value_st;
}else{
int y_low = floor(src_y);
int x_low = floor(src_x);
int y_high = y_low + 1;
int x_high = x_low + 1;
uint8_t const_value[] = {const_value_st, const_value_st, const_value_st};
float ly = src_y - y_low;
float lx = src_x - x_low;
float hy = 1 - ly;
float hx = 1 - lx;
float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
float* pdst = dst + dy * dst_width + dx * 3;
uint8_t* v1 = const_value;
uint8_t* v2 = const_value;
uint8_t* v3 = const_value;
uint8_t* v4 = const_value;
if(y_low >= 0){
if (x_low >= 0)
v1 = src + y_low * src_line_size + x_low * 3;
if (x_high < src_width)
v2 = src + y_low * src_line_size + x_high * 3;
}
if(y_high < src_height){
if (x_low >= 0)
v3 = src + y_high * src_line_size + x_low * 3;
if (x_high < src_width)
v4 = src + y_high * src_line_size + x_high * 3;
}
c0 = w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0] + 0.5f;
c1 = w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1] + 0.5f;
c2 = w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2] + 0.5f;
}
int type = (unsigned int)(norm.type) & 0x000000FF;
int channel_order = (unsigned int)(norm.type) & 0x0000FF00;
if(channel_order == int(NormType::InvertChannel)){
float t = c2;
c2 = c0; c0 = t;
}
if(type == int(NormType::MeanStd)){
c0 = (c0 * norm.alpha - norm.mean[0]) / norm.std[0];
c1 = (c1 * norm.alpha - norm.mean[1]) / norm.std[1];
c2 = (c2 * norm.alpha - norm.mean[2]) / norm.std[2];
}else if(type == int(NormType::AlphaBeta)){
c0 = c0 * norm.alpha + norm.beta;
c1 = c1 * norm.alpha + norm.beta;
c2 = c2 * norm.alpha + norm.beta;
}
int area = dst_width * dst_height;
float* pdst_c0 = dst + dy * dst_width + dx;
float* pdst_c1 = pdst_c0 + area;
float* pdst_c2 = pdst_c1 + area;
*pdst_c0 = c0;
*pdst_c1 = c1;
*pdst_c2 = c2;
}
__global__ void normalize_feature_kernel(float* feature_array, int num_feature, int feature_length, int edge){
/*
& 1 gz bi.z 0
* 1 gy bi.y 0
* N NF bi.x ~
* 1 1 ti.z 0
* F FL / 32 ti.y ~
* Q 32 ti.x ~
*/
int position = (blockIdx.x * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
if (position >= edge) return;
extern __shared__ float l2_norm[];
int irow = position / feature_length;
int icol = position % feature_length;
if(icol == 0)
l2_norm[irow] = 0;
__syncthreads();
float value = feature_array[position];
atomicAdd(l2_norm + irow, value * value);
__syncthreads();
if(icol == 0)
l2_norm[irow] = sqrt(l2_norm[irow]);
__syncthreads();
feature_array[position] = value / l2_norm[irow];
}
static __device__ uint8_t cast(float value){
return value < 0 ? 0 : (value > 255 ? 255 : value);
}
static __global__ void convert_nv12_to_bgr_kernel(const uint8_t* y, const uint8_t* uv, int width, int height, int linesize, uint8_t* dst_bgr, int edge){
int position = blockDim.x * blockIdx.x + threadIdx.x;
if (position >= edge) return;
int ox = position % width;
int oy = position / width;
const uint8_t& yvalue = y[oy * linesize + ox];
int offset_uv = (oy >> 1) * linesize + (ox & 0xFFFFFFFE);
const uint8_t& u = uv[offset_uv + 0];
const uint8_t& v = uv[offset_uv + 1];
dst_bgr[position * 3 + 0] = 1.164f * (yvalue - 16.0f) + 2.018f * (u - 128.0f);
dst_bgr[position * 3 + 1] = 1.164f * (yvalue - 16.0f) - 0.813f * (v - 128.0f) - 0.391f * (u - 128.0f);
dst_bgr[position * 3 + 2] = 1.164f * (yvalue - 16.0f) + 1.596f * (v - 128.0f);
}
/////////////////////////////////////////////////////////////////////////
void convert_nv12_to_bgr_invoke(
const uint8_t* y, const uint8_t* uv, int width, int height, int linesize, uint8_t* dst, cudaStream_t stream){
int total = width * height;
dim3 grid = CUDATools::grid_dims(total);
dim3 block = CUDATools::block_dims(total);
checkCudaKernel(convert_nv12_to_bgr_kernel<<<grid, block, 0, stream>>>(
y, uv, width, height, linesize,
dst, total
));
}
void warp_affine_bilinear_and_normalize(
uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height,
float* matrix_2_3, uint8_t const_value, const Norm& norm,
cudaStream_t stream) {
int jobs = dst_width * dst_height;
auto grid = CUDATools::grid_dims(jobs);
auto block = CUDATools::block_dims(jobs);
checkCudaKernel(warp_affine_bilinear_and_normalize_kernel << <grid, block, 0, stream >> > (
src, src_line_size,
src_width, src_height, dst,
dst_width, dst_height, const_value, matrix_2_3, norm, jobs
));
}
void norm_feature(
float* feature_array, int num_feature, int feature_length,
cudaStream_t stream
){
Assert(feature_length % 32 == 0);
int jobs = num_feature * feature_length;
auto grid = dim3(num_feature);
auto block = dim3(feature_length / 32, 32);
checkCudaKernel(normalize_feature_kernel << <grid, block, num_feature * sizeof(float), stream >> > (
feature_array, num_feature, feature_length, jobs
));
}
// void resize_bilinear(
// uint8_t* src, int src_line_size, int src_width, int src_height,
// uint8_t* dst, int dst_line_size, int dst_width, int dst_height,
// cudaStream_t stream) {
// int jobs = dst_width * dst_height;
// auto grid = CUDATools::grid_dims(jobs);
// auto block = CUDATools::block_dims(jobs);
// checkCudaKernel(resize_bilinear_kernel << <grid, block, 0, stream >> > (
// src, src_line_size, src_width, src_height,
// dst, dst_line_size, dst_width, dst_height, src_width / (float)dst_width, src_height / (float)dst_height, jobs
// ));
// }
};
|
202e9b4c9e33c0ee363968a81b26e4dabd9af4f0.hip | // !!! This is a file automatically generated by hipify!!!
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2019 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Asher Elmquist
// =============================================================================
//
// =============================================================================
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
namespace chrono {
namespace sensor {
__global__ void init_random_states(unsigned int seed, hiprandState_t* rng_states) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
// if (index < n_generators) {
hiprand_init(seed, index, 0, &rng_states[index]);
// }
}
void init_cuda_rng(unsigned int seed, hiprandState_t* rng_states, int n_generators) {
const int nThreads = 512;
int nBlocks = (n_generators + nThreads - 1) / nThreads;
hipLaunchKernelGGL(( init_random_states), dim3(nBlocks), dim3(nThreads), 0, 0, seed, rng_states);
}
} // namespace sensor
} // namespace chrono
| 202e9b4c9e33c0ee363968a81b26e4dabd9af4f0.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2019 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Asher Elmquist
// =============================================================================
//
// =============================================================================
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
namespace chrono {
namespace sensor {
__global__ void init_random_states(unsigned int seed, curandState_t* rng_states) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
// if (index < n_generators) {
curand_init(seed, index, 0, &rng_states[index]);
// }
}
void init_cuda_rng(unsigned int seed, curandState_t* rng_states, int n_generators) {
const int nThreads = 512;
int nBlocks = (n_generators + nThreads - 1) / nThreads;
init_random_states<<<nBlocks, nThreads>>>(seed, rng_states);
}
} // namespace sensor
} // namespace chrono
|
613cc1a3fe45cc49f52c75e8966e2ccd6f6e297f.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "../DEM2DForceGPU.cu"
typedef DEMEvaluator<Scalar, Scalar4, SWCAPotential<Scalar, Scalar4, NoFriction<Scalar> > > SWCADEM;
template hipError_t gpu_compute_dem2d_forces<Scalar, Scalar2, Scalar4, SWCADEM>(
Scalar4* d_force, Scalar4* d_torque, Scalar* d_virial,
const unsigned int virial_pitch, const unsigned int N, const unsigned int n_ghosts,
const Scalar4 *d_pos,
const Scalar4 *d_quat, const Scalar2 *d_vertices,
const unsigned int *d_num_shape_verts, const Scalar* d_diam,
const Scalar4 *d_velocity,
const unsigned int vertexCount, const BoxDim& box,
const unsigned int *d_n_neigh, const unsigned int *d_nlist,
const unsigned int *d_head_list, const SWCADEM potential, const Scalar r_cutsq,
const unsigned int n_shapes,
const unsigned int particlesPerBlock, const unsigned int maxVerts);
| 613cc1a3fe45cc49f52c75e8966e2ccd6f6e297f.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "../DEM2DForceGPU.cu"
typedef DEMEvaluator<Scalar, Scalar4, SWCAPotential<Scalar, Scalar4, NoFriction<Scalar> > > SWCADEM;
template cudaError_t gpu_compute_dem2d_forces<Scalar, Scalar2, Scalar4, SWCADEM>(
Scalar4* d_force, Scalar4* d_torque, Scalar* d_virial,
const unsigned int virial_pitch, const unsigned int N, const unsigned int n_ghosts,
const Scalar4 *d_pos,
const Scalar4 *d_quat, const Scalar2 *d_vertices,
const unsigned int *d_num_shape_verts, const Scalar* d_diam,
const Scalar4 *d_velocity,
const unsigned int vertexCount, const BoxDim& box,
const unsigned int *d_n_neigh, const unsigned int *d_nlist,
const unsigned int *d_head_list, const SWCADEM potential, const Scalar r_cutsq,
const unsigned int n_shapes,
const unsigned int particlesPerBlock, const unsigned int maxVerts);
|
5877d6d809f07410e344c0c42488cefff407a64d.hip | // !!! This is a file automatically generated by hipify!!!
#include <cusp/io/matrix_market.h>
#include <cusp/coo_matrix.h>
#include <cusp/dia_matrix.h>
#include <cusp/verify.h>
#include <cusp/print.h>
#include <cusp/multiply.h>
#include <cusp/transpose.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <thrust/functional.h>
#include <thrust/system_error.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
using namespace thrust;
using namespace cusp;
using namespace std;
//#define CPU 0
#ifdef CPU
#define CAST(X) X ## _CPU
#define MEM cusp::host_memory
#else
#define CAST(X) X ## _GPU
#define MEM cusp::device_memory
#endif
typedef coo_matrix<int, double, host_memory> COO_CPU;
typedef array1d <double, host_memory> ARR1D_CPU;
typedef coo_matrix<int, double, device_memory> COO_GPU;
typedef array1d <double, device_memory> ARR1D_GPU;
void read_matrix(CAST(COO) &temp, const char *fname) {
// Reads web matrix from market file
io::read_matrix_market_file(temp, fname);
//print(temp);
fprintf(stderr, "Read the matrix\n");
// Link Matrix (Transpose of Web Matrix)
//COO_CPU temp1 = temp;
//transpose(temp, temp1);
//temp = temp1;
}
/*
Creating a struct to define the inversion operation.
*/
template <typename T>
struct inversion_op{
__host__ __device__ T operator()(const T& x)const{
return 1 / x;
}
};
void normalize(CAST(COO) &adj, CAST(ARR1D) &inv_sum) {
device_vector<double> in_keys(adj.values.size()); //cols
device_vector<double> in_values(adj.values.size()); //val
device_vector<double> out_keys(adj.num_rows); //cols
device_vector<double> out_values(adj.num_rows, 1); //sum
/*
in_keys are row indices corresponding to nnz entries in the matrix
in_values are the nnz values in the matrix
in_keys: 0 0 1 1 2
in_values: 1 1 1 1 1
*/
thrust::copy(adj.row_indices.begin(), adj.row_indices.end(), in_keys.begin());
thrust::copy(adj.values.begin(), adj.values.end(), in_values.begin());
thrust::equal_to<double> binary_pred;
thrust::plus<double> binary_op;
/*
reduces in_values using given operator (plus) compares like row numbers using (equal_to) and dumps result into out_values indexed by out_keys
From the above example:
out_keys: 0 1 2
out_values: 2 2 1
*/
reduce_by_key(in_keys.begin(), in_keys.end(), in_values.begin(), out_keys.begin(), out_values.begin(), binary_pred, binary_op);
fprintf(stderr, "Row sum calculated\n");
/*cout << "INKEY\tINVAL =====================\n";
for(int i = 0; i < in_keys.size(); i++)
cout << in_keys[i] << "\t" << in_values[i] << endl;
cout << "OUTKEY\tOUTVAL =====================\n";
for(int i = 0; i < out_keys.size(); i++)
cout << out_keys[i] << "\t" << out_values[i] << endl;
*/
/*
instantiated an inversion_op (invert) for use in transform
*/
inversion_op<double> invert = inversion_op<double>();
thrust::transform(out_values.begin(), out_values.end(), out_values.begin(), invert);
thrust::copy(out_values.begin(), out_values.end(), inv_sum.begin());
//cout << "INVERSE SUM" << endl;
//print(inv_sum);
CAST(COO) link_mat = adj;
transpose(adj, link_mat);
adj = link_mat;
fprintf(stderr, "Transpose calculated.\n");
/*
create diagonal matrix (num_rows x num_rows) and assign corresponding inverse sum values to its principal diagonal.
diagonal_offset => the offset from the principal diagonal (the one starting from element [0][0])
dia.values(x, y) => the xth element in the yth diagonal
For the rows in adj which has non_zero sum(ie at least one entry) set the corresponding row_num'th dia elmt to the inverse of its sum else set to 1
e.g.
index in inv_sum: 0 2
value in inv_sum: 1/2 1/3
set (0,0) ---> 1/2, (1,1) ----> 1, (2,2) ----> 1/3
*/
cusp::dia_matrix<int, double, MEM> dia(adj.num_rows, adj.num_rows, adj.num_rows, 1);
dia.diagonal_offsets[0] = 0;
for(int i = 0; i < out_keys.size(); i++)
dia.values(i, 0) = 1;
for(int i = 0; i < out_keys.size(); i++) {
//cout << i << "\t" << out_keys[i] << "\t" << inv_sum[i] << endl;
dia.values(out_keys[i], 0) = inv_sum[i];
}
cout << "DIA ==========\n";
print(dia);
/*
For some reason, the 0th entry in the diagonal is not being set in the above for loop. Therefore, this hack manually sets the first entry in the diagonal.
*/
//dia.values(0, 0) = inv_sum[0];
fprintf(stderr, "Formed dia_mat.\n");
if(is_valid_matrix(adj)) {
multiply(adj, dia, link_mat); // link_mat = adj * dia
adj = link_mat;
} else {
cout << "Invalid format!" << endl;
exit(1);
}
/*
CAST(ARR1D) sum(adj.num_cols, 0);
for(int i = 0; i < adj.values.size(); i++)
sum[adj.column_indices[i]] += adj.values[i];
print(sum);
*/
fprintf(stderr, "Normalized\n");
}
void pagerank(CAST(COO) &link, double beta, CAST(ARR1D) &rank) {
int V = link.num_rows;
double one_minus_beta = (1 - beta) / V;
CAST(ARR1D) teleport(V, one_minus_beta);
CAST(ARR1D) temp(V);
blas::fill(rank, 1 / (double) V);
if(!is_valid_matrix(link)) {
cout << "Link: Invalid format!" << endl;
return;
}
for(int i = 0; i < 30; i++) {
multiply(link, rank, temp); // temp = link * rank
blas::axpby(temp, teleport, rank, beta, 1); // rank = temp * beta + 1 * teleport
#ifndef CPU
hipDeviceSynchronize();
#endif
//cout << "==============" << i << "================" << endl;
//print(rank);
}
}
void print_array(CAST(ARR1D) rank) {
for (int i = 0; i < rank.size(); i++)
//printf ("%.10lf\n", rank[i]);
cout << setprecision(10) << rank[i] << endl;
}
void check_normalized(CAST(COO) adj) {
double sum[350045];
int nodes = 350045;
int i;
cout << "CHECK NORMALIZED" << endl;
for(i = 0; i < nodes; i++) sum[i] = 0.0;
for(i = 0; i < adj.num_entries; i++)
sum[adj.column_indices[i]] += adj.values[i];
for(i = 0; i < nodes; i++)
cout << sum[i] << endl;
/*
for(int i = 0; i < adj.num_rows; i++) {
vec.row_indices[i] = 0;
vec.column_indices[i] = i;
vec.values[i] = 1;
}
multiply(vec, adj, sum);
print(sum);*/
}
int main(int argc, char **argv) {
CAST(COO) adj;
read_matrix(adj, argv[1]);
CAST(ARR1D) rank(adj.num_rows);
CAST(ARR1D) inv_sum(adj.num_rows);
normalize(adj, inv_sum);
//cout << "NORMALIZED ADJ===============" << endl;
//print(adj);
//check_normalized(adj);
//print(adj);
//cout << "INVERSE ===============" << endl;
//print(inv_sum);
//pagerank(adj, atof(argv[2]), rank);
//print(rank);
//print_array(rank);
return 0;
}
| 5877d6d809f07410e344c0c42488cefff407a64d.cu | #include <cusp/io/matrix_market.h>
#include <cusp/coo_matrix.h>
#include <cusp/dia_matrix.h>
#include <cusp/verify.h>
#include <cusp/print.h>
#include <cusp/multiply.h>
#include <cusp/transpose.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <thrust/functional.h>
#include <thrust/system_error.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
using namespace thrust;
using namespace cusp;
using namespace std;
//#define CPU 0
#ifdef CPU
#define CAST(X) X ## _CPU
#define MEM cusp::host_memory
#else
#define CAST(X) X ## _GPU
#define MEM cusp::device_memory
#endif
typedef coo_matrix<int, double, host_memory> COO_CPU;
typedef array1d <double, host_memory> ARR1D_CPU;
typedef coo_matrix<int, double, device_memory> COO_GPU;
typedef array1d <double, device_memory> ARR1D_GPU;
void read_matrix(CAST(COO) &temp, const char *fname) {
// Reads web matrix from market file
io::read_matrix_market_file(temp, fname);
//print(temp);
fprintf(stderr, "Read the matrix\n");
// Link Matrix (Transpose of Web Matrix)
//COO_CPU temp1 = temp;
//transpose(temp, temp1);
//temp = temp1;
}
/*
Creating a struct to define the inversion operation.
*/
template <typename T>
struct inversion_op{
__host__ __device__ T operator()(const T& x)const{
return 1 / x;
}
};
void normalize(CAST(COO) &adj, CAST(ARR1D) &inv_sum) {
device_vector<double> in_keys(adj.values.size()); //cols
device_vector<double> in_values(adj.values.size()); //val
device_vector<double> out_keys(adj.num_rows); //cols
device_vector<double> out_values(adj.num_rows, 1); //sum
/*
in_keys are row indices corresponding to nnz entries in the matrix
in_values are the nnz values in the matrix
in_keys: 0 0 1 1 2
in_values: 1 1 1 1 1
*/
thrust::copy(adj.row_indices.begin(), adj.row_indices.end(), in_keys.begin());
thrust::copy(adj.values.begin(), adj.values.end(), in_values.begin());
thrust::equal_to<double> binary_pred;
thrust::plus<double> binary_op;
/*
reduces in_values using given operator (plus) compares like row numbers using (equal_to) and dumps result into out_values indexed by out_keys
From the above example:
out_keys: 0 1 2
out_values: 2 2 1
*/
reduce_by_key(in_keys.begin(), in_keys.end(), in_values.begin(), out_keys.begin(), out_values.begin(), binary_pred, binary_op);
fprintf(stderr, "Row sum calculated\n");
/*cout << "INKEY\tINVAL =====================\n";
for(int i = 0; i < in_keys.size(); i++)
cout << in_keys[i] << "\t" << in_values[i] << endl;
cout << "OUTKEY\tOUTVAL =====================\n";
for(int i = 0; i < out_keys.size(); i++)
cout << out_keys[i] << "\t" << out_values[i] << endl;
*/
/*
instantiated an inversion_op (invert) for use in transform
*/
inversion_op<double> invert = inversion_op<double>();
thrust::transform(out_values.begin(), out_values.end(), out_values.begin(), invert);
thrust::copy(out_values.begin(), out_values.end(), inv_sum.begin());
//cout << "INVERSE SUM" << endl;
//print(inv_sum);
CAST(COO) link_mat = adj;
transpose(adj, link_mat);
adj = link_mat;
fprintf(stderr, "Transpose calculated.\n");
/*
create diagonal matrix (num_rows x num_rows) and assign corresponding inverse sum values to its principal diagonal.
diagonal_offset => the offset from the principal diagonal (the one starting from element [0][0])
dia.values(x, y) => the xth element in the yth diagonal
For the rows in adj which has non_zero sum(ie at least one entry) set the corresponding row_num'th dia elmt to the inverse of its sum else set to 1
e.g.
index in inv_sum: 0 2
value in inv_sum: 1/2 1/3
set (0,0) ---> 1/2, (1,1) ----> 1, (2,2) ----> 1/3
*/
cusp::dia_matrix<int, double, MEM> dia(adj.num_rows, adj.num_rows, adj.num_rows, 1);
dia.diagonal_offsets[0] = 0;
for(int i = 0; i < out_keys.size(); i++)
dia.values(i, 0) = 1;
for(int i = 0; i < out_keys.size(); i++) {
//cout << i << "\t" << out_keys[i] << "\t" << inv_sum[i] << endl;
dia.values(out_keys[i], 0) = inv_sum[i];
}
cout << "DIA ==========\n";
print(dia);
/*
For some reason, the 0th entry in the diagonal is not being set in the above for loop. Therefore, this hack manually sets the first entry in the diagonal.
*/
//dia.values(0, 0) = inv_sum[0];
fprintf(stderr, "Formed dia_mat.\n");
if(is_valid_matrix(adj)) {
multiply(adj, dia, link_mat); // link_mat = adj * dia
adj = link_mat;
} else {
cout << "Invalid format!" << endl;
exit(1);
}
/*
CAST(ARR1D) sum(adj.num_cols, 0);
for(int i = 0; i < adj.values.size(); i++)
sum[adj.column_indices[i]] += adj.values[i];
print(sum);
*/
fprintf(stderr, "Normalized\n");
}
void pagerank(CAST(COO) &link, double beta, CAST(ARR1D) &rank) {
int V = link.num_rows;
double one_minus_beta = (1 - beta) / V;
CAST(ARR1D) teleport(V, one_minus_beta);
CAST(ARR1D) temp(V);
blas::fill(rank, 1 / (double) V);
if(!is_valid_matrix(link)) {
cout << "Link: Invalid format!" << endl;
return;
}
for(int i = 0; i < 30; i++) {
multiply(link, rank, temp); // temp = link * rank
blas::axpby(temp, teleport, rank, beta, 1); // rank = temp * beta + 1 * teleport
#ifndef CPU
cudaThreadSynchronize();
#endif
//cout << "==============" << i << "================" << endl;
//print(rank);
}
}
void print_array(CAST(ARR1D) rank) {
for (int i = 0; i < rank.size(); i++)
//printf ("%.10lf\n", rank[i]);
cout << setprecision(10) << rank[i] << endl;
}
void check_normalized(CAST(COO) adj) {
double sum[350045];
int nodes = 350045;
int i;
cout << "CHECK NORMALIZED" << endl;
for(i = 0; i < nodes; i++) sum[i] = 0.0;
for(i = 0; i < adj.num_entries; i++)
sum[adj.column_indices[i]] += adj.values[i];
for(i = 0; i < nodes; i++)
cout << sum[i] << endl;
/*
for(int i = 0; i < adj.num_rows; i++) {
vec.row_indices[i] = 0;
vec.column_indices[i] = i;
vec.values[i] = 1;
}
multiply(vec, adj, sum);
print(sum);*/
}
int main(int argc, char **argv) {
CAST(COO) adj;
read_matrix(adj, argv[1]);
CAST(ARR1D) rank(adj.num_rows);
CAST(ARR1D) inv_sum(adj.num_rows);
normalize(adj, inv_sum);
//cout << "NORMALIZED ADJ===============" << endl;
//print(adj);
//check_normalized(adj);
//print(adj);
//cout << "INVERSE ===============" << endl;
//print(inv_sum);
//pagerank(adj, atof(argv[2]), rank);
//print(rank);
//print_array(rank);
return 0;
}
|
940891d77fe04c0b345de2c6c3e15a5e44f4e975.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
* Author: Shih-Hao Tseng <[email protected]>
*/
#include "witsenhausen.cuh"
#include "helper_functions.cuh"
double
Witsenhausen::get_J_value (void) {
if (_output_coord == nullptr) return 0.0;
hipLaunchKernelGGL(( parallel_get_J_value), dim3((_total_samples+255)/256), dim3(256), 0, 0,
(Witsenhausen*)_prob_at_gpu
);
hipDeviceSynchronize ();
double cost = 0.0;
for (int sample_x = 0; sample_x < _total_samples; ++sample_x) {
cost += _comp_x_cost[sample_x];
}
return cost;
}
__global__
void
parallel_get_J_value (Witsenhausen* base) {
int sample_x = blockIdx.x*blockDim.x + threadIdx.x;
if(sample_x >= base->_total_samples) return;
double x0, u0, x1, w, u1, x2;
base->_comp_x_cost[sample_x] = 0.0;
x0 = base->_sample_coord[sample_x];
u0 = base->_u[sample_x]; //get_u_value(this,0,x0);
base->_comp_x_cost[sample_x] += base->_k_2 * u0 * u0 * base->_x_prob_dx[sample_x];
x1 = x0 + u0;
for (int sample_w = 0; sample_w < base->_total_samples; ++sample_w) {
w = base->_sample_coord[sample_w] * base->_sigma_w_to_x_ratio;
u1 = get_u_value(base, 1, x1 + w);
x2 = x1 - u1;
base->_comp_x_cost[sample_x] += x2 * x2 * base->_x_prob_dx[sample_x] * base->_w_prob_dw[sample_w];
}
}
void
Witsenhausen::set_k (double k) {
_k_2 = k*k;
}
void
Witsenhausen::set_sigma_x (double sigma_x) {
_sigma_x = sigma_x;
}
void
Witsenhausen::set_sigma_w (double sigma_w) {
_sigma_w = sigma_w;
}
double
Witsenhausen::test_normalization_x (void) {
if (_x_prob_dx == nullptr) return 0.0;
double* x_prob_dx = new double [_total_samples];
hipMemcpy(x_prob_dx,_x_prob_dx,_total_samples*sizeof(double),hipMemcpyDeviceToHost);
double sum = 0.0;
for (int sample = 0; sample < _total_samples; ++sample) {
sum += x_prob_dx[sample];
}
delete [] x_prob_dx;
return sum;
}
double
Witsenhausen::test_normalization_w (void) {
if (_w_prob_dw == nullptr) return 0.0;
double* w_prob_dw = new double [_total_samples];
hipMemcpy(w_prob_dw,_w_prob_dw,_total_samples*sizeof(double),hipMemcpyDeviceToHost);
double sum = 0.0;
for (int sample = 0; sample < _total_samples; ++sample) {
sum += w_prob_dw[sample];
}
delete [] w_prob_dw;
return sum;
}
void
Witsenhausen::initialize_variables (void) {
UCPSolver::initialize_variables ();
_total_stages = 2;
hipMalloc(&_x_prob , _total_samples*sizeof(double));
hipMalloc(&_x_prob_dx, _total_samples*sizeof(double));
hipMalloc(&_w_prob , _total_samples*sizeof(double));
hipMalloc(&_w_prob_dw, _total_samples*sizeof(double));
_sigma_w_to_x_ratio = _sigma_w/_sigma_x;
double w_step_size = _sample_coord_step_size * _sigma_w_to_x_ratio;
hipLaunchKernelGGL(( parallel_initialize_variables), dim3((_total_samples+255)/256), dim3(256), 0, 0,
this,
_total_samples,
_sample_coord_step_size,
_sigma_w_to_x_ratio,
w_step_size,
_sample_coord,
_sigma_x, _x_prob, _x_prob_dx,
_sigma_w, _w_prob, _w_prob_dw
);
hipDeviceSynchronize ();
hipMallocManaged(&_comp_x_cost, _total_samples*sizeof(double));
}
__global__
void
parallel_initialize_variables (
Witsenhausen* base,
const double total_samples,
const double sample_coord_step_size,
const double sigma_w_to_x_ratio,
const double w_step_size,
double* sample_coord,
const double sigma_x,
double* x_prob, double* x_prob_dx,
const double sigma_w,
double* w_prob, double* w_prob_dw
) {
int sample = blockIdx.x*blockDim.x + threadIdx.x;
if(sample >= total_samples) return;
x_prob[sample] =
(normpdf(sample_coord[sample]-sample_coord_step_size/2.0,sigma_x) +
normpdf(sample_coord[sample]+sample_coord_step_size/2.0,sigma_x)) / 2.0;
x_prob_dx[sample] = x_prob[sample] * sample_coord_step_size;
w_prob[sample] =
(normpdf(sample_coord[sample]*sigma_w_to_x_ratio-w_step_size/2.0,sigma_w) +
normpdf(sample_coord[sample]*sigma_w_to_x_ratio+w_step_size/2.0,sigma_w)) / 2.0;
w_prob_dw[sample] = w_prob[sample] * w_step_size;
}
void
Witsenhausen::destroy_variables (void) {
if ( _output_coord == nullptr ) {
return;
}
hipFree(_x_prob);
hipFree(_x_prob_dx);
hipFree(_w_prob);
hipFree(_w_prob_dw);
_x_prob = nullptr;
_x_prob_dx = nullptr;
_w_prob = nullptr;
_w_prob_dw = nullptr;
hipFree(_comp_x_cost);
_comp_x_cost = nullptr;
UCPSolver::destroy_variables ();
}
PROB_PARALLEL_SUITE(Witsenhausen);
__device__
double
compute_C_value (Witsenhausen* base, int stage, double u_m, double y_m) {
double C_value = 0.0;
double x0, u0, x1, w, u1, x2;
switch (stage) {
case 0:
{
// scaling gives the same result
//int sample_x = (y_m + _sample_range) / _sample_coord_step_size;
x0 = y_m;
u0 = u_m;
C_value += base->_k_2 * u0 * u0;// * _x_prob[sample_x];
x1 = x0 + u0;
for (int sample_w = 0; sample_w < base->_total_samples; ++sample_w) {
w = base->_sample_coord[sample_w] * base->_sigma_w_to_x_ratio;
u1 = get_u_value(base, 1, x1 + w);
x2 = x1 - u1;
//C_value += x2 * x2 * _x_prob[sample_x] * _w_prob_dw[sample_w];
C_value += x2 * x2 * base->_w_prob_dw[sample_w];
}
break;
}
case 1:
{
u1 = u_m;
for (int sample_x = 0; sample_x < base->_total_samples; ++sample_x) {
x0 = base->_sample_coord[sample_x];
u0 = base->_u[sample_x]; //get_u_value(0,x0);
x1 = x0 + u0;
x2 = x1 - u1;
C_value += x2 * x2 * base->_x_prob_dx[sample_x] * get_w_prob_value(base, y_m - x1);
}
break;
}
default:
break;
}
return C_value;
}
__device__
double
get_w_prob_value (Witsenhausen* base, double coord) {
if ( base->_w_prob == nullptr ) return 0.0;
if(coord + base->_sample_range * base->_sigma_w_to_x_ratio < 0.0) {
return 0.0;
} else if (coord > base->_sample_range * base->_sigma_w_to_x_ratio) {
return 0.0;
} else {
double index = (coord/base->_sigma_w_to_x_ratio + base->_sample_range) / base->_sample_coord_step_size;
int id_max = ceil(index);
int id_min = floor(index);
if (id_max == id_min) {
return base->_w_prob[id_max];
} else {
double max = base->_w_prob[id_max];
double min = base->_w_prob[id_min];
double coord_max = base->_sample_coord[id_max] * base->_sigma_w_to_x_ratio;
double coord_min = base->_sample_coord[id_min] * base->_sigma_w_to_x_ratio;
return (max*(coord - coord_min) + min*(coord_max - coord))/(coord_max - coord_min);
}
}
}
void
Witsenhausen_GradientMomentum::compute_dC_du (int stage) {
// the precision is slightly worse because the GPU supports float only rather than double
hipLaunchKernelGGL(( parallel_compute_dC_du), dim3((_base->_total_samples+255)/256), dim3(256), 0, 0,
(Witsenhausen*)_base->_prob_at_gpu,
(Witsenhausen_GradientMomentum*) _algo_at_gpu,
stage
);
hipDeviceSynchronize ();
}
__global__
void
parallel_compute_dC_du (
Witsenhausen* base,
Witsenhausen_GradientMomentum* algo,
const int stage
) {
int sample_y = blockIdx.x*blockDim.x + threadIdx.x;
if(sample_y >= base->_total_samples) return;
double x0, u0, x1, w, u1, x2, y1;
switch (stage) {
case 0:
{
double du1_du0;
x0 = base->_sample_coord[sample_y];
u0 = base->_u[sample_y];
algo->_dC_du [sample_y] = base->_k_2 * 2 * u0 * base->_x_prob[sample_y];
x1 = x0 + u0;
for (int sample_w = 0; sample_w < base->_total_samples; ++sample_w) {
w = base->_sample_coord[sample_w] * base->_sigma_w_to_x_ratio;
y1 = x1 + w;
u1 = get_u_value(base, 1, y1);
du1_du0 = get_du_value(base, 1, y1);
x2 = x1 - u1;
algo->_dC_du [sample_y] += 2 * (1 - du1_du0) * x2 * base->_x_prob[sample_y] * base->_w_prob_dw[sample_w];
}
break;
}
case 1:
{
y1 = base->_sample_coord[sample_y];
sample_y += base->_total_samples;
algo->_dC_du [sample_y] = 0.0;
u1 = base->_u[sample_y];
for (int sample_x = 0; sample_x < base->_total_samples; ++sample_x) {
x0 = base->_sample_coord[sample_x];
u0 = base->_u[sample_x];
x1 = x0 + u0;
w = y1 - x1;
x2 = x1 - u1;
algo->_dC_du [sample_y] += - 2 * x2 * base->_x_prob_dx[sample_x] * get_w_prob_value(base, w);
}
break;
}
default:
break;
}
}
ALGO_PARALLEL_SUITE(Witsenhausen_GradientMomentum)
void
Witsenhausen_ModifiedNewton::compute_derivatives (int stage) {
// the precision is slightly worse because the GPU supports float only rather than double
hipLaunchKernelGGL(( parallel_compute_derivatives), dim3((_base->_total_samples+255)/256), dim3(256), 0, 0,
(Witsenhausen*)_base->_prob_at_gpu,
(Witsenhausen_ModifiedNewton*) _algo_at_gpu,
stage
);
hipDeviceSynchronize ();
}
__global__
void
parallel_compute_derivatives (
Witsenhausen* base,
Witsenhausen_ModifiedNewton* algo,
const int stage
) {
int sample_y = blockIdx.x*blockDim.x + threadIdx.x;
if(sample_y >= base->_total_samples) return;
// Also compute d2C_du2
double x0, u0, x1, w, u1, x2, y1;
switch (stage) {
case 0:
{
double du1_du0, d2u1_du02;
x0 = base->_sample_coord[sample_y];
u0 = base->_u[sample_y];
algo->_dC_du [sample_y] = base->_k_2 * 2 * u0 * base->_x_prob[sample_y];
algo->_d2C_du2[sample_y] = base->_k_2 * 2 * base->_x_prob[sample_y];
x1 = x0 + u0;
for (int sample_w = 0; sample_w < base->_total_samples; ++sample_w) {
w = base->_sample_coord[sample_w] * base->_sigma_w_to_x_ratio;
y1 = x1 + w;
u1 = get_u_value(base, 1, y1);
du1_du0 = get_du_value(base, 1, y1);
d2u1_du02 = get_d2u_value(base, 1, y1);
x2 = x1 - u1;
algo->_dC_du [sample_y] += 2 * (1 - du1_du0) * x2 * base->_x_prob[sample_y] * base->_w_prob_dw[sample_w];
algo->_d2C_du2[sample_y] += 2 * ( -d2u1_du02 * x2 + (1 - du1_du0) * (1 - du1_du0)) * base->_x_prob[sample_y] * base->_w_prob_dw[sample_w];
}
break;
}
case 1:
{
y1 = base->_sample_coord[sample_y];
sample_y += base->_total_samples;
algo->_dC_du [sample_y] = 0.0;
algo->_d2C_du2[sample_y] = 0.0;
u1 = base->_u[sample_y];
for (int sample_x = 0; sample_x < base->_total_samples; ++sample_x) {
x0 = base->_sample_coord[sample_x];
u0 = base->_u[sample_x];
x1 = x0 + u0;
w = y1 - x1;
x2 = x1 - u1;
algo->_dC_du [sample_y] += - 2 * x2 * base->_x_prob_dx[sample_x] * get_w_prob_value(base, w);
algo->_d2C_du2[sample_y] += 2 * base->_x_prob_dx[sample_x] * get_w_prob_value(base, w);
}
break;
}
default:
break;
}
}
ALGO_PARALLEL_SUITE(Witsenhausen_ModifiedNewton) | 940891d77fe04c0b345de2c6c3e15a5e44f4e975.cu | /*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
* Author: Shih-Hao Tseng <[email protected]>
*/
#include "witsenhausen.cuh"
#include "helper_functions.cuh"
double
Witsenhausen::get_J_value (void) {
if (_output_coord == nullptr) return 0.0;
parallel_get_J_value<<<(_total_samples+255)/256, 256>>> (
(Witsenhausen*)_prob_at_gpu
);
cudaDeviceSynchronize ();
double cost = 0.0;
for (int sample_x = 0; sample_x < _total_samples; ++sample_x) {
cost += _comp_x_cost[sample_x];
}
return cost;
}
__global__
void
parallel_get_J_value (Witsenhausen* base) {
int sample_x = blockIdx.x*blockDim.x + threadIdx.x;
if(sample_x >= base->_total_samples) return;
double x0, u0, x1, w, u1, x2;
base->_comp_x_cost[sample_x] = 0.0;
x0 = base->_sample_coord[sample_x];
u0 = base->_u[sample_x]; //get_u_value(this,0,x0);
base->_comp_x_cost[sample_x] += base->_k_2 * u0 * u0 * base->_x_prob_dx[sample_x];
x1 = x0 + u0;
for (int sample_w = 0; sample_w < base->_total_samples; ++sample_w) {
w = base->_sample_coord[sample_w] * base->_sigma_w_to_x_ratio;
u1 = get_u_value(base, 1, x1 + w);
x2 = x1 - u1;
base->_comp_x_cost[sample_x] += x2 * x2 * base->_x_prob_dx[sample_x] * base->_w_prob_dw[sample_w];
}
}
void
Witsenhausen::set_k (double k) {
_k_2 = k*k;
}
void
Witsenhausen::set_sigma_x (double sigma_x) {
_sigma_x = sigma_x;
}
void
Witsenhausen::set_sigma_w (double sigma_w) {
_sigma_w = sigma_w;
}
double
Witsenhausen::test_normalization_x (void) {
if (_x_prob_dx == nullptr) return 0.0;
double* x_prob_dx = new double [_total_samples];
cudaMemcpy(x_prob_dx,_x_prob_dx,_total_samples*sizeof(double),cudaMemcpyDeviceToHost);
double sum = 0.0;
for (int sample = 0; sample < _total_samples; ++sample) {
sum += x_prob_dx[sample];
}
delete [] x_prob_dx;
return sum;
}
double
Witsenhausen::test_normalization_w (void) {
if (_w_prob_dw == nullptr) return 0.0;
double* w_prob_dw = new double [_total_samples];
cudaMemcpy(w_prob_dw,_w_prob_dw,_total_samples*sizeof(double),cudaMemcpyDeviceToHost);
double sum = 0.0;
for (int sample = 0; sample < _total_samples; ++sample) {
sum += w_prob_dw[sample];
}
delete [] w_prob_dw;
return sum;
}
void
Witsenhausen::initialize_variables (void) {
UCPSolver::initialize_variables ();
_total_stages = 2;
cudaMalloc(&_x_prob , _total_samples*sizeof(double));
cudaMalloc(&_x_prob_dx, _total_samples*sizeof(double));
cudaMalloc(&_w_prob , _total_samples*sizeof(double));
cudaMalloc(&_w_prob_dw, _total_samples*sizeof(double));
_sigma_w_to_x_ratio = _sigma_w/_sigma_x;
double w_step_size = _sample_coord_step_size * _sigma_w_to_x_ratio;
parallel_initialize_variables<<<(_total_samples+255)/256, 256>>> (
this,
_total_samples,
_sample_coord_step_size,
_sigma_w_to_x_ratio,
w_step_size,
_sample_coord,
_sigma_x, _x_prob, _x_prob_dx,
_sigma_w, _w_prob, _w_prob_dw
);
cudaDeviceSynchronize ();
cudaMallocManaged(&_comp_x_cost, _total_samples*sizeof(double));
}
__global__
void
parallel_initialize_variables (
Witsenhausen* base,
const double total_samples,
const double sample_coord_step_size,
const double sigma_w_to_x_ratio,
const double w_step_size,
double* sample_coord,
const double sigma_x,
double* x_prob, double* x_prob_dx,
const double sigma_w,
double* w_prob, double* w_prob_dw
) {
int sample = blockIdx.x*blockDim.x + threadIdx.x;
if(sample >= total_samples) return;
x_prob[sample] =
(normpdf(sample_coord[sample]-sample_coord_step_size/2.0,sigma_x) +
normpdf(sample_coord[sample]+sample_coord_step_size/2.0,sigma_x)) / 2.0;
x_prob_dx[sample] = x_prob[sample] * sample_coord_step_size;
w_prob[sample] =
(normpdf(sample_coord[sample]*sigma_w_to_x_ratio-w_step_size/2.0,sigma_w) +
normpdf(sample_coord[sample]*sigma_w_to_x_ratio+w_step_size/2.0,sigma_w)) / 2.0;
w_prob_dw[sample] = w_prob[sample] * w_step_size;
}
void
Witsenhausen::destroy_variables (void) {
if ( _output_coord == nullptr ) {
return;
}
cudaFree(_x_prob);
cudaFree(_x_prob_dx);
cudaFree(_w_prob);
cudaFree(_w_prob_dw);
_x_prob = nullptr;
_x_prob_dx = nullptr;
_w_prob = nullptr;
_w_prob_dw = nullptr;
cudaFree(_comp_x_cost);
_comp_x_cost = nullptr;
UCPSolver::destroy_variables ();
}
PROB_PARALLEL_SUITE(Witsenhausen);
__device__
double
compute_C_value (Witsenhausen* base, int stage, double u_m, double y_m) {
double C_value = 0.0;
double x0, u0, x1, w, u1, x2;
switch (stage) {
case 0:
{
// scaling gives the same result
//int sample_x = (y_m + _sample_range) / _sample_coord_step_size;
x0 = y_m;
u0 = u_m;
C_value += base->_k_2 * u0 * u0;// * _x_prob[sample_x];
x1 = x0 + u0;
for (int sample_w = 0; sample_w < base->_total_samples; ++sample_w) {
w = base->_sample_coord[sample_w] * base->_sigma_w_to_x_ratio;
u1 = get_u_value(base, 1, x1 + w);
x2 = x1 - u1;
//C_value += x2 * x2 * _x_prob[sample_x] * _w_prob_dw[sample_w];
C_value += x2 * x2 * base->_w_prob_dw[sample_w];
}
break;
}
case 1:
{
u1 = u_m;
for (int sample_x = 0; sample_x < base->_total_samples; ++sample_x) {
x0 = base->_sample_coord[sample_x];
u0 = base->_u[sample_x]; //get_u_value(0,x0);
x1 = x0 + u0;
x2 = x1 - u1;
C_value += x2 * x2 * base->_x_prob_dx[sample_x] * get_w_prob_value(base, y_m - x1);
}
break;
}
default:
break;
}
return C_value;
}
__device__
double
get_w_prob_value (Witsenhausen* base, double coord) {
if ( base->_w_prob == nullptr ) return 0.0;
if(coord + base->_sample_range * base->_sigma_w_to_x_ratio < 0.0) {
return 0.0;
} else if (coord > base->_sample_range * base->_sigma_w_to_x_ratio) {
return 0.0;
} else {
double index = (coord/base->_sigma_w_to_x_ratio + base->_sample_range) / base->_sample_coord_step_size;
int id_max = ceil(index);
int id_min = floor(index);
if (id_max == id_min) {
return base->_w_prob[id_max];
} else {
double max = base->_w_prob[id_max];
double min = base->_w_prob[id_min];
double coord_max = base->_sample_coord[id_max] * base->_sigma_w_to_x_ratio;
double coord_min = base->_sample_coord[id_min] * base->_sigma_w_to_x_ratio;
return (max*(coord - coord_min) + min*(coord_max - coord))/(coord_max - coord_min);
}
}
}
void
Witsenhausen_GradientMomentum::compute_dC_du (int stage) {
// the precision is slightly worse because the GPU supports float only rather than double
parallel_compute_dC_du<<<(_base->_total_samples+255)/256, 256>>> (
(Witsenhausen*)_base->_prob_at_gpu,
(Witsenhausen_GradientMomentum*) _algo_at_gpu,
stage
);
cudaDeviceSynchronize ();
}
__global__
void
parallel_compute_dC_du (
Witsenhausen* base,
Witsenhausen_GradientMomentum* algo,
const int stage
) {
int sample_y = blockIdx.x*blockDim.x + threadIdx.x;
if(sample_y >= base->_total_samples) return;
double x0, u0, x1, w, u1, x2, y1;
switch (stage) {
case 0:
{
double du1_du0;
x0 = base->_sample_coord[sample_y];
u0 = base->_u[sample_y];
algo->_dC_du [sample_y] = base->_k_2 * 2 * u0 * base->_x_prob[sample_y];
x1 = x0 + u0;
for (int sample_w = 0; sample_w < base->_total_samples; ++sample_w) {
w = base->_sample_coord[sample_w] * base->_sigma_w_to_x_ratio;
y1 = x1 + w;
u1 = get_u_value(base, 1, y1);
du1_du0 = get_du_value(base, 1, y1);
x2 = x1 - u1;
algo->_dC_du [sample_y] += 2 * (1 - du1_du0) * x2 * base->_x_prob[sample_y] * base->_w_prob_dw[sample_w];
}
break;
}
case 1:
{
y1 = base->_sample_coord[sample_y];
sample_y += base->_total_samples;
algo->_dC_du [sample_y] = 0.0;
u1 = base->_u[sample_y];
for (int sample_x = 0; sample_x < base->_total_samples; ++sample_x) {
x0 = base->_sample_coord[sample_x];
u0 = base->_u[sample_x];
x1 = x0 + u0;
w = y1 - x1;
x2 = x1 - u1;
algo->_dC_du [sample_y] += - 2 * x2 * base->_x_prob_dx[sample_x] * get_w_prob_value(base, w);
}
break;
}
default:
break;
}
}
ALGO_PARALLEL_SUITE(Witsenhausen_GradientMomentum)
void
Witsenhausen_ModifiedNewton::compute_derivatives (int stage) {
// the precision is slightly worse because the GPU supports float only rather than double
parallel_compute_derivatives<<<(_base->_total_samples+255)/256, 256>>> (
(Witsenhausen*)_base->_prob_at_gpu,
(Witsenhausen_ModifiedNewton*) _algo_at_gpu,
stage
);
cudaDeviceSynchronize ();
}
__global__
void
parallel_compute_derivatives (
Witsenhausen* base,
Witsenhausen_ModifiedNewton* algo,
const int stage
) {
int sample_y = blockIdx.x*blockDim.x + threadIdx.x;
if(sample_y >= base->_total_samples) return;
// Also compute d2C_du2
double x0, u0, x1, w, u1, x2, y1;
switch (stage) {
case 0:
{
double du1_du0, d2u1_du02;
x0 = base->_sample_coord[sample_y];
u0 = base->_u[sample_y];
algo->_dC_du [sample_y] = base->_k_2 * 2 * u0 * base->_x_prob[sample_y];
algo->_d2C_du2[sample_y] = base->_k_2 * 2 * base->_x_prob[sample_y];
x1 = x0 + u0;
for (int sample_w = 0; sample_w < base->_total_samples; ++sample_w) {
w = base->_sample_coord[sample_w] * base->_sigma_w_to_x_ratio;
y1 = x1 + w;
u1 = get_u_value(base, 1, y1);
du1_du0 = get_du_value(base, 1, y1);
d2u1_du02 = get_d2u_value(base, 1, y1);
x2 = x1 - u1;
algo->_dC_du [sample_y] += 2 * (1 - du1_du0) * x2 * base->_x_prob[sample_y] * base->_w_prob_dw[sample_w];
algo->_d2C_du2[sample_y] += 2 * ( -d2u1_du02 * x2 + (1 - du1_du0) * (1 - du1_du0)) * base->_x_prob[sample_y] * base->_w_prob_dw[sample_w];
}
break;
}
case 1:
{
y1 = base->_sample_coord[sample_y];
sample_y += base->_total_samples;
algo->_dC_du [sample_y] = 0.0;
algo->_d2C_du2[sample_y] = 0.0;
u1 = base->_u[sample_y];
for (int sample_x = 0; sample_x < base->_total_samples; ++sample_x) {
x0 = base->_sample_coord[sample_x];
u0 = base->_u[sample_x];
x1 = x0 + u0;
w = y1 - x1;
x2 = x1 - u1;
algo->_dC_du [sample_y] += - 2 * x2 * base->_x_prob_dx[sample_x] * get_w_prob_value(base, w);
algo->_d2C_du2[sample_y] += 2 * base->_x_prob_dx[sample_x] * get_w_prob_value(base, w);
}
break;
}
default:
break;
}
}
ALGO_PARALLEL_SUITE(Witsenhausen_ModifiedNewton) |
9e90d2e1ca86a544bff274c635ae7af1512fea37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include "scan_common.h"
#include <stdio.h>
//All three kernels run 512 threads per workgroup
//Must be a power of two
#define THREADBLOCK_SIZE 256
////////////////////////////////////////////////////////////////////////////////
// Basic ccan codelets
////////////////////////////////////////////////////////////////////////////////
#if(0)
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size){
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for(uint offset = 1; offset < size; offset <<= 1){
__syncthreads();
uint t = s_Data[pos] + s_Data[pos - offset];
__syncthreads();
s_Data[pos] = t;
}
return s_Data[pos];
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size){
return scan1Inclusive(idata, s_Data, size) - idata;
}
#else
#define LOG2_WARP_SIZE 5U
#define WARP_SIZE (1U << LOG2_WARP_SIZE)
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE
inline __device__ uint warpScanInclusive(uint idata, volatile uint *s_Data, uint size){
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for(uint offset = 1; offset < size; offset <<= 1)
s_Data[pos] += s_Data[pos - offset];
return s_Data[pos];
}
inline __device__ uint warpScanExclusive(uint idata, volatile uint *s_Data, uint size){
return warpScanInclusive(idata, s_Data, size) - idata;
}
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size){
printf("size: %d, warp: %d\n", size, WARP_SIZE);
if(size > WARP_SIZE){
//Bottom-level inclusive warp scan
uint warpResult = warpScanInclusive(idata, s_Data, WARP_SIZE);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (WARP_SIZE - 1)) == (WARP_SIZE - 1) )
s_Data[threadIdx.x >> LOG2_WARP_SIZE] = warpResult;
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (THREADBLOCK_SIZE / WARP_SIZE) ){
//grab top warp elements
uint val = s_Data[threadIdx.x];
//calculate exclsive scan and write back to shared memory
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data, size >> LOG2_WARP_SIZE);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> LOG2_WARP_SIZE];
}else{
return warpScanInclusive(idata, s_Data, size);
}
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size){
return scan1Inclusive(idata, s_Data, size) - idata;
}
#endif
inline __device__ uint4 scan4Inclusive(uint4 idata4, volatile uint *s_Data, uint size){
//Level-0 inclusive scan
idata4.y += idata4.x;
idata4.z += idata4.y;
idata4.w += idata4.z;
//Level-1 exclusive scan
uint oval = scan1Exclusive(idata4.w, s_Data, size / 4);
idata4.x += oval;
idata4.y += oval;
idata4.z += oval;
idata4.w += oval;
return idata4;
}
//Exclusive vector scan: the array to be scanned is stored
//in local thread memory scope as uint4
inline __device__ uint4 scan4Exclusive(uint4 idata4, volatile uint *s_Data, uint size){
uint4 odata4 = scan4Inclusive(idata4, s_Data, size);
odata4.x -= idata4.x;
odata4.y -= idata4.y;
odata4.z -= idata4.z;
odata4.w -= idata4.w;
return odata4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveShared(
uint4 *d_Dst,
uint4 *d_Src,
uint size
){
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load data
uint4 idata4 = d_Src[pos];
//Calculate exclusive scan
uint4 odata4 = scan4Exclusive(idata4, s_Data, size);
//Write back
d_Dst[pos] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveShared2(
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
){
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if(pos < N)
idata =
d_Dst[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos] +
d_Src[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if(pos < N)
d_Buf[pos] = odata;
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdate(
uint4 *d_Data,
uint *d_Buffer
){
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if(threadIdx.x == 0)
buf = d_Buffer[blockIdx.x];
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Derived as 32768 (max power-of-two gridDim.x) * 4 * THREADBLOCK_SIZE
//Due to scanExclusiveShared<<<>>>() 1D block addressing
extern "C" const uint MAX_BATCH_ELEMENTS = 64 * 1048576;
extern "C" const uint MIN_SHORT_ARRAY_SIZE = 4;
extern "C" const uint MAX_SHORT_ARRAY_SIZE = 4 * THREADBLOCK_SIZE;
extern "C" const uint MIN_LARGE_ARRAY_SIZE = 8 * THREADBLOCK_SIZE;
extern "C" const uint MAX_LARGE_ARRAY_SIZE = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
//Internal exclusive scan buffer
static uint *d_Buf;
extern "C" void initScan(void){
hipMalloc((void **)&d_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE)) * sizeof(uint));
}
extern "C" void closeScan(void){
hipFree(d_Buf);
}
static uint factorRadix2(uint& log2L, uint L){
if(!L){
log2L = 0;
return 0;
}else{
for(log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
return L;
}
}
static uint iDivUp(uint dividend, uint divisor){
return ( (dividend % divisor) == 0 ) ? (dividend / divisor) : (dividend / divisor + 1);
}
extern "C" size_t scanExclusiveShort(
uint *d_Dst,
uint *d_Src,
uint batchSize,
uint arrayLength
){
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert( factorizationRemainder == 1 );
//Check supported size range
assert( (arrayLength >= MIN_SHORT_ARRAY_SIZE) && (arrayLength <= MAX_SHORT_ARRAY_SIZE) );
//Check total batch size limit
assert( (batchSize * arrayLength) <= MAX_BATCH_ELEMENTS );
//Check all threadblocks to be fully packed with data
printf("batchSize: %d, arrayLengh: %d, threadblock_size: %d\n", batchSize, arrayLength, THREADBLOCK_SIZE);
assert( (batchSize * arrayLength) % (4 * THREADBLOCK_SIZE) == 0 );
hipLaunchKernelGGL(( scanExclusiveShared), dim3((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
arrayLength
);
//cutilCheckMsg("scanExclusiveShared() execution FAILED\n");
//printf("scanExclusiveShared() execution FAILED\n");
return THREADBLOCK_SIZE;
}
//extern "C" size_t scanExclusiveLarge(
size_t scanExclusiveLarge(
uint *d_Dst,
uint *d_Src,
uint batchSize,
uint arrayLength
){
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert( factorizationRemainder == 1 );
//Check supported size range
assert( (arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE) );
//Check total batch size limit
assert( (batchSize * arrayLength) <= MAX_BATCH_ELEMENTS );
hipLaunchKernelGGL(( scanExclusiveShared), dim3((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
//cutilCheckMsg("scanExclusiveShared() execution FAILED\n");
//printf("scanExclusiveShared() execution FAILED\n");
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
const uint blockCount2 = iDivUp( (batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE );
printf("blockCount2: %d\n", blockCount2);
hipLaunchKernelGGL(( scanExclusiveShared2), dim3(blockCount2), dim3(THREADBLOCK_SIZE), 0, 0,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE),
arrayLength / (4 * THREADBLOCK_SIZE)
);
//cutilCheckMsg("scanExclusiveShared2() execution FAILED\n");
printf("scanExclusiveShared2() execution FAILED\n");
hipLaunchKernelGGL(( uniformUpdate), dim3((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint *)d_Buf
);
//cutilCheckMsg("uniformUpdate() execution FAILED\n");
printf("uniformUpdate() execution FAILED\n");
return THREADBLOCK_SIZE;
}
| 9e90d2e1ca86a544bff274c635ae7af1512fea37.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include "scan_common.h"
#include <stdio.h>
//All three kernels run 512 threads per workgroup
//Must be a power of two
#define THREADBLOCK_SIZE 256
////////////////////////////////////////////////////////////////////////////////
// Basic ccan codelets
////////////////////////////////////////////////////////////////////////////////
#if(0)
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size){
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for(uint offset = 1; offset < size; offset <<= 1){
__syncthreads();
uint t = s_Data[pos] + s_Data[pos - offset];
__syncthreads();
s_Data[pos] = t;
}
return s_Data[pos];
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size){
return scan1Inclusive(idata, s_Data, size) - idata;
}
#else
#define LOG2_WARP_SIZE 5U
#define WARP_SIZE (1U << LOG2_WARP_SIZE)
//Almost the same as naive scan1Inclusive, but doesn't need __syncthreads()
//assuming size <= WARP_SIZE
inline __device__ uint warpScanInclusive(uint idata, volatile uint *s_Data, uint size){
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for(uint offset = 1; offset < size; offset <<= 1)
s_Data[pos] += s_Data[pos - offset];
return s_Data[pos];
}
inline __device__ uint warpScanExclusive(uint idata, volatile uint *s_Data, uint size){
return warpScanInclusive(idata, s_Data, size) - idata;
}
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size){
printf("size: %d, warp: %d\n", size, WARP_SIZE);
if(size > WARP_SIZE){
//Bottom-level inclusive warp scan
uint warpResult = warpScanInclusive(idata, s_Data, WARP_SIZE);
//Save top elements of each warp for exclusive warp scan
//sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (WARP_SIZE - 1)) == (WARP_SIZE - 1) )
s_Data[threadIdx.x >> LOG2_WARP_SIZE] = warpResult;
//wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (THREADBLOCK_SIZE / WARP_SIZE) ){
//grab top warp elements
uint val = s_Data[threadIdx.x];
//calculate exclsive scan and write back to shared memory
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data, size >> LOG2_WARP_SIZE);
}
//return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> LOG2_WARP_SIZE];
}else{
return warpScanInclusive(idata, s_Data, size);
}
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size){
return scan1Inclusive(idata, s_Data, size) - idata;
}
#endif
inline __device__ uint4 scan4Inclusive(uint4 idata4, volatile uint *s_Data, uint size){
//Level-0 inclusive scan
idata4.y += idata4.x;
idata4.z += idata4.y;
idata4.w += idata4.z;
//Level-1 exclusive scan
uint oval = scan1Exclusive(idata4.w, s_Data, size / 4);
idata4.x += oval;
idata4.y += oval;
idata4.z += oval;
idata4.w += oval;
return idata4;
}
//Exclusive vector scan: the array to be scanned is stored
//in local thread memory scope as uint4
inline __device__ uint4 scan4Exclusive(uint4 idata4, volatile uint *s_Data, uint size){
uint4 odata4 = scan4Inclusive(idata4, s_Data, size);
odata4.x -= idata4.x;
odata4.y -= idata4.y;
odata4.z -= idata4.z;
odata4.w -= idata4.w;
return odata4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveShared(
uint4 *d_Dst,
uint4 *d_Src,
uint size
){
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load data
uint4 idata4 = d_Src[pos];
//Calculate exclusive scan
uint4 odata4 = scan4Exclusive(idata4, s_Data, size);
//Write back
d_Dst[pos] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveShared2(
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
){
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if(pos < N)
idata =
d_Dst[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos] +
d_Src[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if(pos < N)
d_Buf[pos] = odata;
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdate(
uint4 *d_Data,
uint *d_Buffer
){
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if(threadIdx.x == 0)
buf = d_Buffer[blockIdx.x];
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Derived as 32768 (max power-of-two gridDim.x) * 4 * THREADBLOCK_SIZE
//Due to scanExclusiveShared<<<>>>() 1D block addressing
extern "C" const uint MAX_BATCH_ELEMENTS = 64 * 1048576;
extern "C" const uint MIN_SHORT_ARRAY_SIZE = 4;
extern "C" const uint MAX_SHORT_ARRAY_SIZE = 4 * THREADBLOCK_SIZE;
extern "C" const uint MIN_LARGE_ARRAY_SIZE = 8 * THREADBLOCK_SIZE;
extern "C" const uint MAX_LARGE_ARRAY_SIZE = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
//Internal exclusive scan buffer
static uint *d_Buf;
extern "C" void initScan(void){
cudaMalloc((void **)&d_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE)) * sizeof(uint));
}
extern "C" void closeScan(void){
cudaFree(d_Buf);
}
static uint factorRadix2(uint& log2L, uint L){
if(!L){
log2L = 0;
return 0;
}else{
for(log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
return L;
}
}
static uint iDivUp(uint dividend, uint divisor){
return ( (dividend % divisor) == 0 ) ? (dividend / divisor) : (dividend / divisor + 1);
}
extern "C" size_t scanExclusiveShort(
uint *d_Dst,
uint *d_Src,
uint batchSize,
uint arrayLength
){
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert( factorizationRemainder == 1 );
//Check supported size range
assert( (arrayLength >= MIN_SHORT_ARRAY_SIZE) && (arrayLength <= MAX_SHORT_ARRAY_SIZE) );
//Check total batch size limit
assert( (batchSize * arrayLength) <= MAX_BATCH_ELEMENTS );
//Check all threadblocks to be fully packed with data
printf("batchSize: %d, arrayLengh: %d, threadblock_size: %d\n", batchSize, arrayLength, THREADBLOCK_SIZE);
assert( (batchSize * arrayLength) % (4 * THREADBLOCK_SIZE) == 0 );
scanExclusiveShared<<<(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
arrayLength
);
//cutilCheckMsg("scanExclusiveShared() execution FAILED\n");
//printf("scanExclusiveShared() execution FAILED\n");
return THREADBLOCK_SIZE;
}
//extern "C" size_t scanExclusiveLarge(
size_t scanExclusiveLarge(
uint *d_Dst,
uint *d_Src,
uint batchSize,
uint arrayLength
){
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert( factorizationRemainder == 1 );
//Check supported size range
assert( (arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE) );
//Check total batch size limit
assert( (batchSize * arrayLength) <= MAX_BATCH_ELEMENTS );
scanExclusiveShared<<<(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
//cutilCheckMsg("scanExclusiveShared() execution FAILED\n");
//printf("scanExclusiveShared() execution FAILED\n");
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
const uint blockCount2 = iDivUp( (batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE );
printf("blockCount2: %d\n", blockCount2);
scanExclusiveShared2<<< blockCount2, THREADBLOCK_SIZE>>>(
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE),
arrayLength / (4 * THREADBLOCK_SIZE)
);
//cutilCheckMsg("scanExclusiveShared2() execution FAILED\n");
printf("scanExclusiveShared2() execution FAILED\n");
uniformUpdate<<<(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint *)d_Buf
);
//cutilCheckMsg("uniformUpdate() execution FAILED\n");
printf("uniformUpdate() execution FAILED\n");
return THREADBLOCK_SIZE;
}
|
cf30acabcd7476f7723531d2b17d60b4160efc57.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
* Third party copyrights are property of their respective owners.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id: $
* Ported to PCL by Koen Buys : Attention Work in progress!
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "NCV.hpp"
#include "NCVAlg.hpp"
#include "NCVPyramid.hpp"
#include "NCVPixelOperations.hpp"
//#include "opencv2/gpu/device/common.hpp"
template<typename T, Ncv32u CN> struct __average4_CN {static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11);};
template<typename T> struct __average4_CN<T, 1> {
static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11)
{
T out;
out.x = ((Ncv32s)p00.x + p01.x + p10.x + p11.x + 2) / 4;
return out;
}};
template<> struct __average4_CN<float1, 1> {
static __host__ __device__ float1 _average4_CN(const float1 &p00, const float1 &p01, const float1 &p10, const float1 &p11)
{
float1 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
return out;
}};
template<> struct __average4_CN<double1, 1> {
static __host__ __device__ double1 _average4_CN(const double1 &p00, const double1 &p01, const double1 &p10, const double1 &p11)
{
double1 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
return out;
}};
template<typename T> struct __average4_CN<T, 3> {
static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11)
{
T out;
out.x = ((Ncv32s)p00.x + p01.x + p10.x + p11.x + 2) / 4;
out.y = ((Ncv32s)p00.y + p01.y + p10.y + p11.y + 2) / 4;
out.z = ((Ncv32s)p00.z + p01.z + p10.z + p11.z + 2) / 4;
return out;
}};
template<> struct __average4_CN<float3, 3> {
static __host__ __device__ float3 _average4_CN(const float3 &p00, const float3 &p01, const float3 &p10, const float3 &p11)
{
float3 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
return out;
}};
template<> struct __average4_CN<double3, 3> {
static __host__ __device__ double3 _average4_CN(const double3 &p00, const double3 &p01, const double3 &p10, const double3 &p11)
{
double3 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
return out;
}};
template<typename T> struct __average4_CN<T, 4> {
static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11)
{
T out;
out.x = ((Ncv32s)p00.x + p01.x + p10.x + p11.x + 2) / 4;
out.y = ((Ncv32s)p00.y + p01.y + p10.y + p11.y + 2) / 4;
out.z = ((Ncv32s)p00.z + p01.z + p10.z + p11.z + 2) / 4;
out.w = ((Ncv32s)p00.w + p01.w + p10.w + p11.w + 2) / 4;
return out;
}};
template<> struct __average4_CN<float4, 4> {
static __host__ __device__ float4 _average4_CN(const float4 &p00, const float4 &p01, const float4 &p10, const float4 &p11)
{
float4 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
out.w = (p00.w + p01.w + p10.w + p11.w) / 4;
return out;
}};
template<> struct __average4_CN<double4, 4> {
static __host__ __device__ double4 _average4_CN(const double4 &p00, const double4 &p01, const double4 &p10, const double4 &p11)
{
double4 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
out.w = (p00.w + p01.w + p10.w + p11.w) / 4;
return out;
}};
template<typename T> static __host__ __device__ T _average4(const T &p00, const T &p01, const T &p10, const T &p11)
{
return __average4_CN<T, NC(T)>::_average4_CN(p00, p01, p10, p11);
}
template<typename Tin, typename Tout, Ncv32u CN> struct __lerp_CN {static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d);};
template<typename Tin, typename Tout> struct __lerp_CN<Tin, Tout, 1> {
static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d)
{
typedef typename TConvVec2Base<Tout>::TBase TB;
return _pixMake(TB(b.x * d + a.x * (1 - d)));
}};
template<typename Tin, typename Tout> struct __lerp_CN<Tin, Tout, 3> {
static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d)
{
typedef typename TConvVec2Base<Tout>::TBase TB;
return _pixMake(TB(b.x * d + a.x * (1 - d)),
TB(b.y * d + a.y * (1 - d)),
TB(b.z * d + a.z * (1 - d)));
}};
template<typename Tin, typename Tout> struct __lerp_CN<Tin, Tout, 4> {
static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d)
{
typedef typename TConvVec2Base<Tout>::TBase TB;
return _pixMake(TB(b.x * d + a.x * (1 - d)),
TB(b.y * d + a.y * (1 - d)),
TB(b.z * d + a.z * (1 - d)),
TB(b.w * d + a.w * (1 - d)));
}};
template<typename Tin, typename Tout> static __host__ __device__ Tout _lerp(const Tin &a, const Tin &b, Ncv32f d)
{
return __lerp_CN<Tin, Tout, NC(Tin)>::_lerp_CN(a, b, d);
}
template<typename T>
__global__ void kernelDownsampleX2(T *d_src,
Ncv32u srcPitch,
T *d_dst,
Ncv32u dstPitch,
NcvSize32u dstRoi)
{
Ncv32u i = blockIdx.y * blockDim.y + threadIdx.y;
Ncv32u j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dstRoi.height && j < dstRoi.width)
{
T *d_src_line1 = (T *)((Ncv8u *)d_src + (2 * i + 0) * srcPitch);
T *d_src_line2 = (T *)((Ncv8u *)d_src + (2 * i + 1) * srcPitch);
T *d_dst_line = (T *)((Ncv8u *)d_dst + i * dstPitch);
T p00 = d_src_line1[2*j+0];
T p01 = d_src_line1[2*j+1];
T p10 = d_src_line2[2*j+0];
T p11 = d_src_line2[2*j+1];
d_dst_line[j] = _average4(p00, p01, p10, p11);
}
}
/*
namespace cv { namespace gpu { namespace device
{
namespace pyramid
{
template <typename T> void kernelDownsampleX2_gpu(DevMem2Db src, DevMem2Db dst, hipStream_t stream)
{
dim3 bDim(16, 8);
dim3 gDim(divUp(src.cols, bDim.x), divUp(src.rows, bDim.y));
kernelDownsampleX2<<<gDim, bDim, 0, stream>>>((T*)src.data, src.step, (T*)dst.data, dst.step, NcvSize32u(dst.cols, dst.rows));
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void kernelDownsampleX2_gpu<uchar1>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<uchar3>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<uchar4>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<ushort1>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<ushort3>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<ushort4>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<float1>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<float3>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelDownsampleX2_gpu<float4>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
}
}}} */
template<typename T>
__global__ void kernelInterpolateFrom1(T *d_srcTop,
Ncv32u srcTopPitch,
NcvSize32u szTopRoi,
T *d_dst,
Ncv32u dstPitch,
NcvSize32u dstRoi)
{
Ncv32u i = blockIdx.y * blockDim.y + threadIdx.y;
Ncv32u j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dstRoi.height && j < dstRoi.width)
{
Ncv32f ptTopX = 1.0f * (szTopRoi.width - 1) * j / (dstRoi.width - 1);
Ncv32f ptTopY = 1.0f * (szTopRoi.height - 1) * i / (dstRoi.height - 1);
Ncv32u xl = (Ncv32u)ptTopX;
Ncv32u xh = xl+1;
Ncv32f dx = ptTopX - xl;
Ncv32u yl = (Ncv32u)ptTopY;
Ncv32u yh = yl+1;
Ncv32f dy = ptTopY - yl;
T *d_src_line1 = (T *)((Ncv8u *)d_srcTop + yl * srcTopPitch);
T *d_src_line2 = (T *)((Ncv8u *)d_srcTop + yh * srcTopPitch);
T *d_dst_line = (T *)((Ncv8u *)d_dst + i * dstPitch);
T p00, p01, p10, p11;
p00 = d_src_line1[xl];
p01 = xh < szTopRoi.width ? d_src_line1[xh] : p00;
p10 = yh < szTopRoi.height ? d_src_line2[xl] : p00;
p11 = (xh < szTopRoi.width && yh < szTopRoi.height) ? d_src_line2[xh] : p00;
typedef typename TConvBase2Vec<Ncv32f, NC(T)>::TVec TVFlt;
TVFlt m_00_01 = _lerp<T, TVFlt>(p00, p01, dx);
TVFlt m_10_11 = _lerp<T, TVFlt>(p10, p11, dx);
TVFlt mixture = _lerp<TVFlt, TVFlt>(m_00_01, m_10_11, dy);
T outPix = _pixDemoteClampZ<TVFlt, T>(mixture);
d_dst_line[j] = outPix;
}
}
/*
namespace cv { namespace gpu { namespace device
{
namespace pyramid
{
template <typename T> void kernelInterpolateFrom1_gpu(DevMem2Db src, DevMem2Db dst, hipStream_t stream)
{
dim3 bDim(16, 8);
dim3 gDim(divUp(dst.cols, bDim.x), divUp(dst.rows, bDim.y));
kernelInterpolateFrom1<<<gDim, bDim, 0, stream>>>((T*) src.data, src.step, NcvSize32u(src.cols, src.rows),
(T*) dst.data, dst.step, NcvSize32u(dst.cols, dst.rows));
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void kernelInterpolateFrom1_gpu<uchar1>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<uchar3>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<uchar4>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<ushort1>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<ushort3>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<ushort4>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<float1>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<float3>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
template void kernelInterpolateFrom1_gpu<float4>(DevMem2Db src, DevMem2Db dst, hipStream_t stream);
}
}}} */
#if 0 //def _WIN32
template<typename T>
static T _interpLinear(const T &a, const T &b, Ncv32f d)
{
typedef typename TConvBase2Vec<Ncv32f, NC(T)>::TVec TVFlt;
TVFlt tmp = _lerp<T, TVFlt>(a, b, d);
return _pixDemoteClampZ<TVFlt, T>(tmp);
}
template<typename T>
static T _interpBilinear(const NCVMatrix<T> &refLayer, Ncv32f x, Ncv32f y)
{
Ncv32u xl = (Ncv32u)x;
Ncv32u xh = xl+1;
Ncv32f dx = x - xl;
Ncv32u yl = (Ncv32u)y;
Ncv32u yh = yl+1;
Ncv32f dy = y - yl;
T p00, p01, p10, p11;
p00 = refLayer.at(xl, yl);
p01 = xh < refLayer.width() ? refLayer.at(xh, yl) : p00;
p10 = yh < refLayer.height() ? refLayer.at(xl, yh) : p00;
p11 = (xh < refLayer.width() && yh < refLayer.height()) ? refLayer.at(xh, yh) : p00;
typedef typename TConvBase2Vec<Ncv32f, NC(T)>::TVec TVFlt;
TVFlt m_00_01 = _lerp<T, TVFlt>(p00, p01, dx);
TVFlt m_10_11 = _lerp<T, TVFlt>(p10, p11, dx);
TVFlt mixture = _lerp<TVFlt, TVFlt>(m_00_01, m_10_11, dy);
return _pixDemoteClampZ<TVFlt, T>(mixture);
}
template <class T>
NCVImagePyramid<T>::NCVImagePyramid(const NCVMatrix<T> &img,
Ncv8u numLayers,
INCVMemAllocator &alloc,
hipStream_t cuStream)
{
this->_isInitialized = false;
ncvAssertPrintReturn(img.memType() == alloc.memType(), "NCVImagePyramid::ctor error", );
this->layer0 = &img;
NcvSize32u szLastLayer(img.width(), img.height());
this->nLayers = 1;
NCV_SET_SKIP_COND(alloc.isCounting());
NcvBool bDeviceCode = alloc.memType() == NCVMemoryTypeDevice;
if (numLayers == 0)
{
numLayers = 255; //it will cut-off when any of the dimensions goes 1
}
#ifdef SELF_CHECK_GPU
NCVMemNativeAllocator allocCPU(NCVMemoryTypeHostPinned, 512);
#endif
for (Ncv32u i=0; i<(Ncv32u)numLayers-1; i++)
{
NcvSize32u szCurLayer(szLastLayer.width / 2, szLastLayer.height / 2);
if (szCurLayer.width == 0 || szCurLayer.height == 0)
{
break;
}
this->pyramid.push_back(new NCVMatrixAlloc<T>(alloc, szCurLayer.width, szCurLayer.height));
ncvAssertPrintReturn(((NCVMatrixAlloc<T> *)(this->pyramid[i]))->isMemAllocated(), "NCVImagePyramid::ctor error", );
this->nLayers++;
//fill in the layer
NCV_SKIP_COND_BEGIN
const NCVMatrix<T> *prevLayer = i == 0 ? this->layer0 : this->pyramid[i-1];
NCVMatrix<T> *curLayer = this->pyramid[i];
if (bDeviceCode)
{
dim3 bDim(16, 8);
dim3 gDim(divUp(szCurLayer.width, bDim.x), divUp(szCurLayer.height, bDim.y));
hipLaunchKernelGGL(( kernelDownsampleX2), dim3(gDim), dim3(bDim), 0, cuStream, prevLayer->ptr(),
prevLayer->pitch(),
curLayer->ptr(),
curLayer->pitch(),
szCurLayer);
ncvAssertPrintReturn(hipSuccess == hipGetLastError(), "NCVImagePyramid::ctor error", );
#ifdef SELF_CHECK_GPU
NCVMatrixAlloc<T> h_prevLayer(allocCPU, prevLayer->width(), prevLayer->height());
ncvAssertPrintReturn(h_prevLayer.isMemAllocated(), "Validation failure in NCVImagePyramid::ctor", );
NCVMatrixAlloc<T> h_curLayer(allocCPU, curLayer->width(), curLayer->height());
ncvAssertPrintReturn(h_curLayer.isMemAllocated(), "Validation failure in NCVImagePyramid::ctor", );
ncvAssertPrintReturn(NCV_SUCCESS == prevLayer->copy2D(h_prevLayer, prevLayer->size(), cuStream), "Validation failure in NCVImagePyramid::ctor", );
ncvAssertPrintReturn(NCV_SUCCESS == curLayer->copy2D(h_curLayer, curLayer->size(), cuStream), "Validation failure in NCVImagePyramid::ctor", );
ncvAssertPrintReturn(hipSuccess == hipStreamSynchronize(cuStream), "Validation failure in NCVImagePyramid::ctor", );
for (Ncv32u i=0; i<szCurLayer.height; i++)
{
for (Ncv32u j=0; j<szCurLayer.width; j++)
{
T p00 = h_prevLayer.at(2*j+0, 2*i+0);
T p01 = h_prevLayer.at(2*j+1, 2*i+0);
T p10 = h_prevLayer.at(2*j+0, 2*i+1);
T p11 = h_prevLayer.at(2*j+1, 2*i+1);
T outGold = _average4(p00, p01, p10, p11);
T outGPU = h_curLayer.at(j, i);
ncvAssertPrintReturn(0 == memcmp(&outGold, &outGPU, sizeof(T)), "Validation failure in NCVImagePyramid::ctor with kernelDownsampleX2", );
}
}
#endif
}
else
{
for (Ncv32u i=0; i<szCurLayer.height; i++)
{
for (Ncv32u j=0; j<szCurLayer.width; j++)
{
T p00 = prevLayer->at(2*j+0, 2*i+0);
T p01 = prevLayer->at(2*j+1, 2*i+0);
T p10 = prevLayer->at(2*j+0, 2*i+1);
T p11 = prevLayer->at(2*j+1, 2*i+1);
curLayer->at(j, i) = _average4(p00, p01, p10, p11);
}
}
}
NCV_SKIP_COND_END
szLastLayer = szCurLayer;
}
this->_isInitialized = true;
}
template <class T>
NCVImagePyramid<T>::~NCVImagePyramid()
{
}
template <class T>
NcvBool NCVImagePyramid<T>::isInitialized() const
{
return this->_isInitialized;
}
template <class T>
NCVStatus NCVImagePyramid<T>::getLayer(NCVMatrix<T> &outImg,
NcvSize32u outRoi,
NcvBool bTrilinear,
hipStream_t cuStream) const
{
ncvAssertReturn(this->isInitialized(), NCV_UNKNOWN_ERROR);
ncvAssertReturn(outImg.memType() == this->layer0->memType(), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(outRoi.width <= this->layer0->width() && outRoi.height <= this->layer0->height() &&
outRoi.width > 0 && outRoi.height > 0, NCV_DIMENSIONS_INVALID);
if (outRoi.width == this->layer0->width() && outRoi.height == this->layer0->height())
{
ncvAssertReturnNcvStat(this->layer0->copy2D(outImg, NcvSize32u(this->layer0->width(), this->layer0->height()), cuStream));
return NCV_SUCCESS;
}
Ncv32f lastScale = 1.0f;
Ncv32f curScale;
const NCVMatrix<T> *lastLayer = this->layer0;
const NCVMatrix<T> *curLayer = NULL;
NcvBool bUse2Refs = false;
for (Ncv32u i=0; i<this->nLayers-1; i++)
{
curScale = lastScale * 0.5f;
curLayer = this->pyramid[i];
if (outRoi.width == curLayer->width() && outRoi.height == curLayer->height())
{
ncvAssertReturnNcvStat(this->pyramid[i]->copy2D(outImg, NcvSize32u(this->pyramid[i]->width(), this->pyramid[i]->height()), cuStream));
return NCV_SUCCESS;
}
if (outRoi.width >= curLayer->width() && outRoi.height >= curLayer->height())
{
if (outRoi.width < lastLayer->width() && outRoi.height < lastLayer->height())
{
bUse2Refs = true;
}
break;
}
lastScale = curScale;
lastLayer = curLayer;
}
bUse2Refs = bUse2Refs && bTrilinear;
NCV_SET_SKIP_COND(outImg.memType() == NCVMemoryTypeNone);
NcvBool bDeviceCode = this->layer0->memType() == NCVMemoryTypeDevice;
#ifdef SELF_CHECK_GPU
NCVMemNativeAllocator allocCPU(NCVMemoryTypeHostPinned, 512);
#endif
NCV_SKIP_COND_BEGIN
if (bDeviceCode)
{
ncvAssertReturn(bUse2Refs == false, NCV_NOT_IMPLEMENTED);
dim3 bDim(16, 8);
dim3 gDim(divUp(outRoi.width, bDim.x), divUp(outRoi.height, bDim.y));
hipLaunchKernelGGL(( kernelInterpolateFrom1), dim3(gDim), dim3(bDim), 0, cuStream, lastLayer->ptr(),
lastLayer->pitch(),
lastLayer->size(),
outImg.ptr(),
outImg.pitch(),
outRoi);
ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR);
#ifdef SELF_CHECK_GPU
ncvSafeMatAlloc(h_lastLayer, T, allocCPU, lastLayer->width(), lastLayer->height(), NCV_ALLOCATOR_BAD_ALLOC);
ncvSafeMatAlloc(h_outImg, T, allocCPU, outImg.width(), outImg.height(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturnNcvStat(lastLayer->copy2D(h_lastLayer, lastLayer->size(), cuStream));
ncvAssertReturnNcvStat(outImg.copy2D(h_outImg, outRoi, cuStream));
ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR);
for (Ncv32u i=0; i<outRoi.height; i++)
{
for (Ncv32u j=0; j<outRoi.width; j++)
{
NcvSize32u szTopLayer(lastLayer->width(), lastLayer->height());
Ncv32f ptTopX = 1.0f * (szTopLayer.width - 1) * j / (outRoi.width - 1);
Ncv32f ptTopY = 1.0f * (szTopLayer.height - 1) * i / (outRoi.height - 1);
T outGold = _interpBilinear(h_lastLayer, ptTopX, ptTopY);
ncvAssertPrintReturn(0 == memcmp(&outGold, &h_outImg.at(j,i), sizeof(T)), "Validation failure in NCVImagePyramid::ctor with kernelInterpolateFrom1", NCV_UNKNOWN_ERROR);
}
}
#endif
}
else
{
for (Ncv32u i=0; i<outRoi.height; i++)
{
for (Ncv32u j=0; j<outRoi.width; j++)
{
//top layer pixel (always exists)
NcvSize32u szTopLayer(lastLayer->width(), lastLayer->height());
Ncv32f ptTopX = 1.0f * (szTopLayer.width - 1) * j / (outRoi.width - 1);
Ncv32f ptTopY = 1.0f * (szTopLayer.height - 1) * i / (outRoi.height - 1);
T topPix = _interpBilinear(*lastLayer, ptTopX, ptTopY);
T trilinearPix = topPix;
if (bUse2Refs)
{
//bottom layer pixel (exists only if the requested scale is greater than the smallest layer scale)
NcvSize32u szBottomLayer(curLayer->width(), curLayer->height());
Ncv32f ptBottomX = 1.0f * (szBottomLayer.width - 1) * j / (outRoi.width - 1);
Ncv32f ptBottomY = 1.0f * (szBottomLayer.height - 1) * i / (outRoi.height - 1);
T bottomPix = _interpBilinear(*curLayer, ptBottomX, ptBottomY);
Ncv32f scale = (1.0f * outRoi.width / layer0->width() + 1.0f * outRoi.height / layer0->height()) / 2;
Ncv32f dl = (scale - curScale) / (lastScale - curScale);
dl = CLAMP(dl, 0.0f, 1.0f);
trilinearPix = _interpLinear(bottomPix, topPix, dl);
}
outImg.at(j, i) = trilinearPix;
}
}
}
NCV_SKIP_COND_END
return NCV_SUCCESS;
}
template class NCVImagePyramid<uchar1>;
template class NCVImagePyramid<uchar3>;
template class NCVImagePyramid<uchar4>;
template class NCVImagePyramid<ushort1>;
template class NCVImagePyramid<ushort3>;
template class NCVImagePyramid<ushort4>;
template class NCVImagePyramid<uint1>;
template class NCVImagePyramid<uint3>;
template class NCVImagePyramid<uint4>;
template class NCVImagePyramid<float1>;
template class NCVImagePyramid<float3>;
template class NCVImagePyramid<float4>;
#endif //_WIN32
| cf30acabcd7476f7723531d2b17d60b4160efc57.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved.
* Third party copyrights are property of their respective owners.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $Id: $
* Ported to PCL by Koen Buys : Attention Work in progress!
*/
#include <cuda_runtime.h>
#include <stdio.h>
#include "NCV.hpp"
#include "NCVAlg.hpp"
#include "NCVPyramid.hpp"
#include "NCVPixelOperations.hpp"
//#include "opencv2/gpu/device/common.hpp"
template<typename T, Ncv32u CN> struct __average4_CN {static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11);};
template<typename T> struct __average4_CN<T, 1> {
static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11)
{
T out;
out.x = ((Ncv32s)p00.x + p01.x + p10.x + p11.x + 2) / 4;
return out;
}};
template<> struct __average4_CN<float1, 1> {
static __host__ __device__ float1 _average4_CN(const float1 &p00, const float1 &p01, const float1 &p10, const float1 &p11)
{
float1 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
return out;
}};
template<> struct __average4_CN<double1, 1> {
static __host__ __device__ double1 _average4_CN(const double1 &p00, const double1 &p01, const double1 &p10, const double1 &p11)
{
double1 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
return out;
}};
template<typename T> struct __average4_CN<T, 3> {
static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11)
{
T out;
out.x = ((Ncv32s)p00.x + p01.x + p10.x + p11.x + 2) / 4;
out.y = ((Ncv32s)p00.y + p01.y + p10.y + p11.y + 2) / 4;
out.z = ((Ncv32s)p00.z + p01.z + p10.z + p11.z + 2) / 4;
return out;
}};
template<> struct __average4_CN<float3, 3> {
static __host__ __device__ float3 _average4_CN(const float3 &p00, const float3 &p01, const float3 &p10, const float3 &p11)
{
float3 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
return out;
}};
template<> struct __average4_CN<double3, 3> {
static __host__ __device__ double3 _average4_CN(const double3 &p00, const double3 &p01, const double3 &p10, const double3 &p11)
{
double3 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
return out;
}};
template<typename T> struct __average4_CN<T, 4> {
static __host__ __device__ T _average4_CN(const T &p00, const T &p01, const T &p10, const T &p11)
{
T out;
out.x = ((Ncv32s)p00.x + p01.x + p10.x + p11.x + 2) / 4;
out.y = ((Ncv32s)p00.y + p01.y + p10.y + p11.y + 2) / 4;
out.z = ((Ncv32s)p00.z + p01.z + p10.z + p11.z + 2) / 4;
out.w = ((Ncv32s)p00.w + p01.w + p10.w + p11.w + 2) / 4;
return out;
}};
template<> struct __average4_CN<float4, 4> {
static __host__ __device__ float4 _average4_CN(const float4 &p00, const float4 &p01, const float4 &p10, const float4 &p11)
{
float4 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
out.w = (p00.w + p01.w + p10.w + p11.w) / 4;
return out;
}};
template<> struct __average4_CN<double4, 4> {
static __host__ __device__ double4 _average4_CN(const double4 &p00, const double4 &p01, const double4 &p10, const double4 &p11)
{
double4 out;
out.x = (p00.x + p01.x + p10.x + p11.x) / 4;
out.y = (p00.y + p01.y + p10.y + p11.y) / 4;
out.z = (p00.z + p01.z + p10.z + p11.z) / 4;
out.w = (p00.w + p01.w + p10.w + p11.w) / 4;
return out;
}};
template<typename T> static __host__ __device__ T _average4(const T &p00, const T &p01, const T &p10, const T &p11)
{
return __average4_CN<T, NC(T)>::_average4_CN(p00, p01, p10, p11);
}
template<typename Tin, typename Tout, Ncv32u CN> struct __lerp_CN {static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d);};
template<typename Tin, typename Tout> struct __lerp_CN<Tin, Tout, 1> {
static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d)
{
typedef typename TConvVec2Base<Tout>::TBase TB;
return _pixMake(TB(b.x * d + a.x * (1 - d)));
}};
template<typename Tin, typename Tout> struct __lerp_CN<Tin, Tout, 3> {
static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d)
{
typedef typename TConvVec2Base<Tout>::TBase TB;
return _pixMake(TB(b.x * d + a.x * (1 - d)),
TB(b.y * d + a.y * (1 - d)),
TB(b.z * d + a.z * (1 - d)));
}};
template<typename Tin, typename Tout> struct __lerp_CN<Tin, Tout, 4> {
static __host__ __device__ Tout _lerp_CN(const Tin &a, const Tin &b, Ncv32f d)
{
typedef typename TConvVec2Base<Tout>::TBase TB;
return _pixMake(TB(b.x * d + a.x * (1 - d)),
TB(b.y * d + a.y * (1 - d)),
TB(b.z * d + a.z * (1 - d)),
TB(b.w * d + a.w * (1 - d)));
}};
template<typename Tin, typename Tout> static __host__ __device__ Tout _lerp(const Tin &a, const Tin &b, Ncv32f d)
{
return __lerp_CN<Tin, Tout, NC(Tin)>::_lerp_CN(a, b, d);
}
template<typename T>
__global__ void kernelDownsampleX2(T *d_src,
Ncv32u srcPitch,
T *d_dst,
Ncv32u dstPitch,
NcvSize32u dstRoi)
{
Ncv32u i = blockIdx.y * blockDim.y + threadIdx.y;
Ncv32u j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dstRoi.height && j < dstRoi.width)
{
T *d_src_line1 = (T *)((Ncv8u *)d_src + (2 * i + 0) * srcPitch);
T *d_src_line2 = (T *)((Ncv8u *)d_src + (2 * i + 1) * srcPitch);
T *d_dst_line = (T *)((Ncv8u *)d_dst + i * dstPitch);
T p00 = d_src_line1[2*j+0];
T p01 = d_src_line1[2*j+1];
T p10 = d_src_line2[2*j+0];
T p11 = d_src_line2[2*j+1];
d_dst_line[j] = _average4(p00, p01, p10, p11);
}
}
/*
namespace cv { namespace gpu { namespace device
{
namespace pyramid
{
template <typename T> void kernelDownsampleX2_gpu(DevMem2Db src, DevMem2Db dst, cudaStream_t stream)
{
dim3 bDim(16, 8);
dim3 gDim(divUp(src.cols, bDim.x), divUp(src.rows, bDim.y));
kernelDownsampleX2<<<gDim, bDim, 0, stream>>>((T*)src.data, src.step, (T*)dst.data, dst.step, NcvSize32u(dst.cols, dst.rows));
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void kernelDownsampleX2_gpu<uchar1>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<uchar3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<uchar4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<ushort1>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<ushort3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<ushort4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<float1>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<float3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelDownsampleX2_gpu<float4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
}
}}} */
template<typename T>
__global__ void kernelInterpolateFrom1(T *d_srcTop,
Ncv32u srcTopPitch,
NcvSize32u szTopRoi,
T *d_dst,
Ncv32u dstPitch,
NcvSize32u dstRoi)
{
Ncv32u i = blockIdx.y * blockDim.y + threadIdx.y;
Ncv32u j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dstRoi.height && j < dstRoi.width)
{
Ncv32f ptTopX = 1.0f * (szTopRoi.width - 1) * j / (dstRoi.width - 1);
Ncv32f ptTopY = 1.0f * (szTopRoi.height - 1) * i / (dstRoi.height - 1);
Ncv32u xl = (Ncv32u)ptTopX;
Ncv32u xh = xl+1;
Ncv32f dx = ptTopX - xl;
Ncv32u yl = (Ncv32u)ptTopY;
Ncv32u yh = yl+1;
Ncv32f dy = ptTopY - yl;
T *d_src_line1 = (T *)((Ncv8u *)d_srcTop + yl * srcTopPitch);
T *d_src_line2 = (T *)((Ncv8u *)d_srcTop + yh * srcTopPitch);
T *d_dst_line = (T *)((Ncv8u *)d_dst + i * dstPitch);
T p00, p01, p10, p11;
p00 = d_src_line1[xl];
p01 = xh < szTopRoi.width ? d_src_line1[xh] : p00;
p10 = yh < szTopRoi.height ? d_src_line2[xl] : p00;
p11 = (xh < szTopRoi.width && yh < szTopRoi.height) ? d_src_line2[xh] : p00;
typedef typename TConvBase2Vec<Ncv32f, NC(T)>::TVec TVFlt;
TVFlt m_00_01 = _lerp<T, TVFlt>(p00, p01, dx);
TVFlt m_10_11 = _lerp<T, TVFlt>(p10, p11, dx);
TVFlt mixture = _lerp<TVFlt, TVFlt>(m_00_01, m_10_11, dy);
T outPix = _pixDemoteClampZ<TVFlt, T>(mixture);
d_dst_line[j] = outPix;
}
}
/*
namespace cv { namespace gpu { namespace device
{
namespace pyramid
{
template <typename T> void kernelInterpolateFrom1_gpu(DevMem2Db src, DevMem2Db dst, cudaStream_t stream)
{
dim3 bDim(16, 8);
dim3 gDim(divUp(dst.cols, bDim.x), divUp(dst.rows, bDim.y));
kernelInterpolateFrom1<<<gDim, bDim, 0, stream>>>((T*) src.data, src.step, NcvSize32u(src.cols, src.rows),
(T*) dst.data, dst.step, NcvSize32u(dst.cols, dst.rows));
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void kernelInterpolateFrom1_gpu<uchar1>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<uchar3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<uchar4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<ushort1>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<ushort3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<ushort4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<float1>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<float3>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
template void kernelInterpolateFrom1_gpu<float4>(DevMem2Db src, DevMem2Db dst, cudaStream_t stream);
}
}}} */
#if 0 //def _WIN32
template<typename T>
static T _interpLinear(const T &a, const T &b, Ncv32f d)
{
typedef typename TConvBase2Vec<Ncv32f, NC(T)>::TVec TVFlt;
TVFlt tmp = _lerp<T, TVFlt>(a, b, d);
return _pixDemoteClampZ<TVFlt, T>(tmp);
}
template<typename T>
static T _interpBilinear(const NCVMatrix<T> &refLayer, Ncv32f x, Ncv32f y)
{
Ncv32u xl = (Ncv32u)x;
Ncv32u xh = xl+1;
Ncv32f dx = x - xl;
Ncv32u yl = (Ncv32u)y;
Ncv32u yh = yl+1;
Ncv32f dy = y - yl;
T p00, p01, p10, p11;
p00 = refLayer.at(xl, yl);
p01 = xh < refLayer.width() ? refLayer.at(xh, yl) : p00;
p10 = yh < refLayer.height() ? refLayer.at(xl, yh) : p00;
p11 = (xh < refLayer.width() && yh < refLayer.height()) ? refLayer.at(xh, yh) : p00;
typedef typename TConvBase2Vec<Ncv32f, NC(T)>::TVec TVFlt;
TVFlt m_00_01 = _lerp<T, TVFlt>(p00, p01, dx);
TVFlt m_10_11 = _lerp<T, TVFlt>(p10, p11, dx);
TVFlt mixture = _lerp<TVFlt, TVFlt>(m_00_01, m_10_11, dy);
return _pixDemoteClampZ<TVFlt, T>(mixture);
}
template <class T>
NCVImagePyramid<T>::NCVImagePyramid(const NCVMatrix<T> &img,
Ncv8u numLayers,
INCVMemAllocator &alloc,
cudaStream_t cuStream)
{
this->_isInitialized = false;
ncvAssertPrintReturn(img.memType() == alloc.memType(), "NCVImagePyramid::ctor error", );
this->layer0 = &img;
NcvSize32u szLastLayer(img.width(), img.height());
this->nLayers = 1;
NCV_SET_SKIP_COND(alloc.isCounting());
NcvBool bDeviceCode = alloc.memType() == NCVMemoryTypeDevice;
if (numLayers == 0)
{
numLayers = 255; //it will cut-off when any of the dimensions goes 1
}
#ifdef SELF_CHECK_GPU
NCVMemNativeAllocator allocCPU(NCVMemoryTypeHostPinned, 512);
#endif
for (Ncv32u i=0; i<(Ncv32u)numLayers-1; i++)
{
NcvSize32u szCurLayer(szLastLayer.width / 2, szLastLayer.height / 2);
if (szCurLayer.width == 0 || szCurLayer.height == 0)
{
break;
}
this->pyramid.push_back(new NCVMatrixAlloc<T>(alloc, szCurLayer.width, szCurLayer.height));
ncvAssertPrintReturn(((NCVMatrixAlloc<T> *)(this->pyramid[i]))->isMemAllocated(), "NCVImagePyramid::ctor error", );
this->nLayers++;
//fill in the layer
NCV_SKIP_COND_BEGIN
const NCVMatrix<T> *prevLayer = i == 0 ? this->layer0 : this->pyramid[i-1];
NCVMatrix<T> *curLayer = this->pyramid[i];
if (bDeviceCode)
{
dim3 bDim(16, 8);
dim3 gDim(divUp(szCurLayer.width, bDim.x), divUp(szCurLayer.height, bDim.y));
kernelDownsampleX2<<<gDim, bDim, 0, cuStream>>>(prevLayer->ptr(),
prevLayer->pitch(),
curLayer->ptr(),
curLayer->pitch(),
szCurLayer);
ncvAssertPrintReturn(cudaSuccess == cudaGetLastError(), "NCVImagePyramid::ctor error", );
#ifdef SELF_CHECK_GPU
NCVMatrixAlloc<T> h_prevLayer(allocCPU, prevLayer->width(), prevLayer->height());
ncvAssertPrintReturn(h_prevLayer.isMemAllocated(), "Validation failure in NCVImagePyramid::ctor", );
NCVMatrixAlloc<T> h_curLayer(allocCPU, curLayer->width(), curLayer->height());
ncvAssertPrintReturn(h_curLayer.isMemAllocated(), "Validation failure in NCVImagePyramid::ctor", );
ncvAssertPrintReturn(NCV_SUCCESS == prevLayer->copy2D(h_prevLayer, prevLayer->size(), cuStream), "Validation failure in NCVImagePyramid::ctor", );
ncvAssertPrintReturn(NCV_SUCCESS == curLayer->copy2D(h_curLayer, curLayer->size(), cuStream), "Validation failure in NCVImagePyramid::ctor", );
ncvAssertPrintReturn(cudaSuccess == cudaStreamSynchronize(cuStream), "Validation failure in NCVImagePyramid::ctor", );
for (Ncv32u i=0; i<szCurLayer.height; i++)
{
for (Ncv32u j=0; j<szCurLayer.width; j++)
{
T p00 = h_prevLayer.at(2*j+0, 2*i+0);
T p01 = h_prevLayer.at(2*j+1, 2*i+0);
T p10 = h_prevLayer.at(2*j+0, 2*i+1);
T p11 = h_prevLayer.at(2*j+1, 2*i+1);
T outGold = _average4(p00, p01, p10, p11);
T outGPU = h_curLayer.at(j, i);
ncvAssertPrintReturn(0 == memcmp(&outGold, &outGPU, sizeof(T)), "Validation failure in NCVImagePyramid::ctor with kernelDownsampleX2", );
}
}
#endif
}
else
{
for (Ncv32u i=0; i<szCurLayer.height; i++)
{
for (Ncv32u j=0; j<szCurLayer.width; j++)
{
T p00 = prevLayer->at(2*j+0, 2*i+0);
T p01 = prevLayer->at(2*j+1, 2*i+0);
T p10 = prevLayer->at(2*j+0, 2*i+1);
T p11 = prevLayer->at(2*j+1, 2*i+1);
curLayer->at(j, i) = _average4(p00, p01, p10, p11);
}
}
}
NCV_SKIP_COND_END
szLastLayer = szCurLayer;
}
this->_isInitialized = true;
}
template <class T>
NCVImagePyramid<T>::~NCVImagePyramid()
{
}
template <class T>
NcvBool NCVImagePyramid<T>::isInitialized() const
{
return this->_isInitialized;
}
template <class T>
NCVStatus NCVImagePyramid<T>::getLayer(NCVMatrix<T> &outImg,
NcvSize32u outRoi,
NcvBool bTrilinear,
cudaStream_t cuStream) const
{
ncvAssertReturn(this->isInitialized(), NCV_UNKNOWN_ERROR);
ncvAssertReturn(outImg.memType() == this->layer0->memType(), NCV_MEM_RESIDENCE_ERROR);
ncvAssertReturn(outRoi.width <= this->layer0->width() && outRoi.height <= this->layer0->height() &&
outRoi.width > 0 && outRoi.height > 0, NCV_DIMENSIONS_INVALID);
if (outRoi.width == this->layer0->width() && outRoi.height == this->layer0->height())
{
ncvAssertReturnNcvStat(this->layer0->copy2D(outImg, NcvSize32u(this->layer0->width(), this->layer0->height()), cuStream));
return NCV_SUCCESS;
}
Ncv32f lastScale = 1.0f;
Ncv32f curScale;
const NCVMatrix<T> *lastLayer = this->layer0;
const NCVMatrix<T> *curLayer = NULL;
NcvBool bUse2Refs = false;
for (Ncv32u i=0; i<this->nLayers-1; i++)
{
curScale = lastScale * 0.5f;
curLayer = this->pyramid[i];
if (outRoi.width == curLayer->width() && outRoi.height == curLayer->height())
{
ncvAssertReturnNcvStat(this->pyramid[i]->copy2D(outImg, NcvSize32u(this->pyramid[i]->width(), this->pyramid[i]->height()), cuStream));
return NCV_SUCCESS;
}
if (outRoi.width >= curLayer->width() && outRoi.height >= curLayer->height())
{
if (outRoi.width < lastLayer->width() && outRoi.height < lastLayer->height())
{
bUse2Refs = true;
}
break;
}
lastScale = curScale;
lastLayer = curLayer;
}
bUse2Refs = bUse2Refs && bTrilinear;
NCV_SET_SKIP_COND(outImg.memType() == NCVMemoryTypeNone);
NcvBool bDeviceCode = this->layer0->memType() == NCVMemoryTypeDevice;
#ifdef SELF_CHECK_GPU
NCVMemNativeAllocator allocCPU(NCVMemoryTypeHostPinned, 512);
#endif
NCV_SKIP_COND_BEGIN
if (bDeviceCode)
{
ncvAssertReturn(bUse2Refs == false, NCV_NOT_IMPLEMENTED);
dim3 bDim(16, 8);
dim3 gDim(divUp(outRoi.width, bDim.x), divUp(outRoi.height, bDim.y));
kernelInterpolateFrom1<<<gDim, bDim, 0, cuStream>>>(lastLayer->ptr(),
lastLayer->pitch(),
lastLayer->size(),
outImg.ptr(),
outImg.pitch(),
outRoi);
ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR);
#ifdef SELF_CHECK_GPU
ncvSafeMatAlloc(h_lastLayer, T, allocCPU, lastLayer->width(), lastLayer->height(), NCV_ALLOCATOR_BAD_ALLOC);
ncvSafeMatAlloc(h_outImg, T, allocCPU, outImg.width(), outImg.height(), NCV_ALLOCATOR_BAD_ALLOC);
ncvAssertReturnNcvStat(lastLayer->copy2D(h_lastLayer, lastLayer->size(), cuStream));
ncvAssertReturnNcvStat(outImg.copy2D(h_outImg, outRoi, cuStream));
ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR);
for (Ncv32u i=0; i<outRoi.height; i++)
{
for (Ncv32u j=0; j<outRoi.width; j++)
{
NcvSize32u szTopLayer(lastLayer->width(), lastLayer->height());
Ncv32f ptTopX = 1.0f * (szTopLayer.width - 1) * j / (outRoi.width - 1);
Ncv32f ptTopY = 1.0f * (szTopLayer.height - 1) * i / (outRoi.height - 1);
T outGold = _interpBilinear(h_lastLayer, ptTopX, ptTopY);
ncvAssertPrintReturn(0 == memcmp(&outGold, &h_outImg.at(j,i), sizeof(T)), "Validation failure in NCVImagePyramid::ctor with kernelInterpolateFrom1", NCV_UNKNOWN_ERROR);
}
}
#endif
}
else
{
for (Ncv32u i=0; i<outRoi.height; i++)
{
for (Ncv32u j=0; j<outRoi.width; j++)
{
//top layer pixel (always exists)
NcvSize32u szTopLayer(lastLayer->width(), lastLayer->height());
Ncv32f ptTopX = 1.0f * (szTopLayer.width - 1) * j / (outRoi.width - 1);
Ncv32f ptTopY = 1.0f * (szTopLayer.height - 1) * i / (outRoi.height - 1);
T topPix = _interpBilinear(*lastLayer, ptTopX, ptTopY);
T trilinearPix = topPix;
if (bUse2Refs)
{
//bottom layer pixel (exists only if the requested scale is greater than the smallest layer scale)
NcvSize32u szBottomLayer(curLayer->width(), curLayer->height());
Ncv32f ptBottomX = 1.0f * (szBottomLayer.width - 1) * j / (outRoi.width - 1);
Ncv32f ptBottomY = 1.0f * (szBottomLayer.height - 1) * i / (outRoi.height - 1);
T bottomPix = _interpBilinear(*curLayer, ptBottomX, ptBottomY);
Ncv32f scale = (1.0f * outRoi.width / layer0->width() + 1.0f * outRoi.height / layer0->height()) / 2;
Ncv32f dl = (scale - curScale) / (lastScale - curScale);
dl = CLAMP(dl, 0.0f, 1.0f);
trilinearPix = _interpLinear(bottomPix, topPix, dl);
}
outImg.at(j, i) = trilinearPix;
}
}
}
NCV_SKIP_COND_END
return NCV_SUCCESS;
}
template class NCVImagePyramid<uchar1>;
template class NCVImagePyramid<uchar3>;
template class NCVImagePyramid<uchar4>;
template class NCVImagePyramid<ushort1>;
template class NCVImagePyramid<ushort3>;
template class NCVImagePyramid<ushort4>;
template class NCVImagePyramid<uint1>;
template class NCVImagePyramid<uint3>;
template class NCVImagePyramid<uint4>;
template class NCVImagePyramid<float1>;
template class NCVImagePyramid<float3>;
template class NCVImagePyramid<float4>;
#endif //_WIN32
|
d31fa02fc7a0ffefc846848ce2e334b0e3b46394.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Copyright 2016. Phoenix.
*
* This code was modified to support Phoenix framework on it.
*
* We used matrix multiplication to show how Phoenix could be employed to
* extract power values.
*
* Some part of the code from NVIDIA changed to make it more customized on
* our end.
*
* The main source code was at following location:
* /usr/local/cuda/samples/0_Simple/matrixMul/matrixMul.cu
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// includes, project
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <assert.h>
#include <math.h>
#include <fstream>
#include <vector>
#include <iostream>
#include <algorithm>
#include "phoenix.h"
#define N 4096
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
hipDeviceSynchronize();
printf("done\n");
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 100;
for (int j = 0; j < nIter; j++)
{
PHOENIX_ENERGY_TIME_START(1, "region_compute");
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
PHOENIX_ENERGY_TIME_STOP(1, "region_compute");
}
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
PHOENIX_INITIALIZE();
printf("[Matrix Multiply Using CUDA] - Starting...\n");
int devID = 0;
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
dimsA.x = N;
// height of Matrix A
dimsA.y = N;
// width of Matrix B
dimsB.x = N;
// height of Matrix B
dimsB.y = N;
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| d31fa02fc7a0ffefc846848ce2e334b0e3b46394.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Copyright 2016. Phoenix.
*
* This code was modified to support Phoenix framework on it.
*
* We used matrix multiplication to show how Phoenix could be employed to
* extract power values.
*
* Some part of the code from NVIDIA changed to make it more customized on
* our end.
*
* The main source code was at following location:
* /usr/local/cuda/samples/0_Simple/matrixMul/matrixMul.cu
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// includes, project
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <assert.h>
#include <math.h>
#include <fstream>
#include <vector>
#include <iostream>
#include <algorithm>
#include "phoenix.h"
#define N 4096
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
cudaDeviceSynchronize();
printf("done\n");
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 100;
for (int j = 0; j < nIter; j++)
{
PHOENIX_ENERGY_TIME_START(1, "region_compute");
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
PHOENIX_ENERGY_TIME_STOP(1, "region_compute");
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "Result = PASS" : "Result = FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
PHOENIX_INITIALIZE();
printf("[Matrix Multiply Using CUDA] - Starting...\n");
int devID = 0;
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
dimsA.x = N;
// height of Matrix A
dimsA.y = N;
// width of Matrix B
dimsB.x = N;
// height of Matrix B
dimsB.y = N;
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
0bf52308e668eb356713aedb51b765b1cf5da162.hip | // !!! This is a file automatically generated by hipify!!!
/*
* md2.cu CUDA Implementation of MD2 digest
*
* Date: 12 June 2019
* Revision: 1
*
* Based on the public domain Reference Implementation in C, by
* Brad Conte, original code here:
*
* https://github.com/B-Con/crypto-algorithms
*
* This file is released into the Public Domain.
*/
/*************************** HEADER FILES ***************************/
#include <stdlib.h>
#include <memory.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include "md2.cuh"
/**************************** STRUCT ********************************/
typedef struct {
BYTE data[16];
BYTE state[48];
BYTE checksum[16];
int len;
} CUDA_MD2_CTX;
/**************************** VARIABLES *****************************/
__constant__ BYTE s[256] = {
41, 46, 67, 201, 162, 216, 124, 1, 61, 54, 84, 161, 236, 240, 6,
19, 98, 167, 5, 243, 192, 199, 115, 140, 152, 147, 43, 217, 188,
76, 130, 202, 30, 155, 87, 60, 253, 212, 224, 22, 103, 66, 111, 24,
138, 23, 229, 18, 190, 78, 196, 214, 218, 158, 222, 73, 160, 251,
245, 142, 187, 47, 238, 122, 169, 104, 121, 145, 21, 178, 7, 63,
148, 194, 16, 137, 11, 34, 95, 33, 128, 127, 93, 154, 90, 144, 50,
39, 53, 62, 204, 231, 191, 247, 151, 3, 255, 25, 48, 179, 72, 165,
181, 209, 215, 94, 146, 42, 172, 86, 170, 198, 79, 184, 56, 210,
150, 164, 125, 182, 118, 252, 107, 226, 156, 116, 4, 241, 69, 157,
112, 89, 100, 113, 135, 32, 134, 91, 207, 101, 230, 45, 168, 2, 27,
96, 37, 173, 174, 176, 185, 246, 28, 70, 97, 105, 52, 64, 126, 15,
85, 71, 163, 35, 221, 81, 175, 58, 195, 92, 249, 206, 186, 197,
234, 38, 44, 83, 13, 110, 133, 40, 132, 9, 211, 223, 205, 244, 65,
129, 77, 82, 106, 220, 55, 200, 108, 193, 171, 250, 36, 225, 123,
8, 12, 189, 177, 74, 120, 136, 149, 139, 227, 99, 232, 109, 233,
203, 213, 254, 59, 0, 29, 57, 242, 239, 183, 14, 102, 88, 208, 228,
166, 119, 114, 248, 235, 117, 75, 10, 49, 68, 80, 180, 143, 237,
31, 26, 219, 153, 141, 51, 159, 17, 131, 20
};
/*********************** FUNCTION DEFINITIONS ***********************/
__device__ void cuda_md2_transform(CUDA_MD2_CTX *ctx, BYTE data[])
{
int j,k,t;
//memcpy(&ctx->state[16], data);
for (j=0; j < 16; ++j) {
ctx->state[j + 16] = data[j];
ctx->state[j + 32] = (ctx->state[j+16] ^ ctx->state[j]);
}
t = 0;
for (j = 0; j < 18; ++j) {
for (k = 0; k < 48; ++k) {
ctx->state[k] ^= s[t];
t = ctx->state[k];
}
t = (t+j) & 0xFF;
}
t = ctx->checksum[15];
for (j=0; j < 16; ++j) {
ctx->checksum[j] ^= s[data[j] ^ t];
t = ctx->checksum[j];
}
}
__device__ void cuda_md2_init(CUDA_MD2_CTX *ctx)
{
int i;
for (i=0; i < 48; ++i)
ctx->state[i] = 0;
for (i=0; i < 16; ++i)
ctx->checksum[i] = 0;
ctx->len = 0;
}
__device__ void cuda_md2_update(CUDA_MD2_CTX *ctx, const BYTE data[], size_t len)
{
size_t i;
for (i = 0; i < len; ++i) {
ctx->data[ctx->len] = data[i];
ctx->len++;
if (ctx->len == MD2_BLOCK_SIZE) {
cuda_md2_transform(ctx, ctx->data);
ctx->len = 0;
}
}
}
__device__ void cuda_md2_final(CUDA_MD2_CTX *ctx, BYTE hash[])
{
int to_pad;
to_pad = MD2_BLOCK_SIZE - ctx->len;
while (ctx->len < MD2_BLOCK_SIZE)
ctx->data[ctx->len++] = to_pad;
cuda_md2_transform(ctx, ctx->data);
cuda_md2_transform(ctx, ctx->checksum);
memcpy(hash, ctx->state, MD2_BLOCK_SIZE);
}
__global__ void kernel_md2_hash(BYTE* indata, WORD inlen, BYTE* outdata, WORD n_batch)
{
WORD thread = blockIdx.x * blockDim.x + threadIdx.x;
if (thread >= n_batch)
{
return;
}
BYTE* in = indata + thread * inlen;
BYTE* out = outdata + thread * MD2_BLOCK_SIZE;
CUDA_MD2_CTX ctx;
cuda_md2_init(&ctx);
cuda_md2_update(&ctx, in, inlen);
cuda_md2_final(&ctx, out);
}
void mcm_cuda_md2_hash_batch(BYTE *in, WORD inlen, BYTE *out, WORD n_batch, WORD n_iter) {
BYTE *cuda_indata;
BYTE *cuda_outdata;
hipMalloc(&cuda_indata, inlen * n_batch);
hipMalloc(&cuda_outdata, MD2_BLOCK_SIZE * n_batch);
hipMemcpy(cuda_indata, in, inlen * n_batch, hipMemcpyHostToDevice);
WORD thread = WG_SIZE;
WORD block = (n_batch / thread) + (n_batch % thread != 0);
for(int i = 0 ; i < n_iter ; ++i)
kernel_md2_hash << < block, thread >> > (cuda_indata, inlen, cuda_outdata, n_batch);
hipMemcpy(out, cuda_outdata, MD2_BLOCK_SIZE * n_batch, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf("Error cuda md2 hash: %s \n", hipGetErrorString(error));
}
hipFree(cuda_indata);
hipFree(cuda_outdata);
}
| 0bf52308e668eb356713aedb51b765b1cf5da162.cu | /*
* md2.cu CUDA Implementation of MD2 digest
*
* Date: 12 June 2019
* Revision: 1
*
* Based on the public domain Reference Implementation in C, by
* Brad Conte, original code here:
*
* https://github.com/B-Con/crypto-algorithms
*
* This file is released into the Public Domain.
*/
/*************************** HEADER FILES ***************************/
#include <stdlib.h>
#include <memory.h>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
#include "md2.cuh"
/**************************** STRUCT ********************************/
typedef struct {
BYTE data[16];
BYTE state[48];
BYTE checksum[16];
int len;
} CUDA_MD2_CTX;
/**************************** VARIABLES *****************************/
__constant__ BYTE s[256] = {
41, 46, 67, 201, 162, 216, 124, 1, 61, 54, 84, 161, 236, 240, 6,
19, 98, 167, 5, 243, 192, 199, 115, 140, 152, 147, 43, 217, 188,
76, 130, 202, 30, 155, 87, 60, 253, 212, 224, 22, 103, 66, 111, 24,
138, 23, 229, 18, 190, 78, 196, 214, 218, 158, 222, 73, 160, 251,
245, 142, 187, 47, 238, 122, 169, 104, 121, 145, 21, 178, 7, 63,
148, 194, 16, 137, 11, 34, 95, 33, 128, 127, 93, 154, 90, 144, 50,
39, 53, 62, 204, 231, 191, 247, 151, 3, 255, 25, 48, 179, 72, 165,
181, 209, 215, 94, 146, 42, 172, 86, 170, 198, 79, 184, 56, 210,
150, 164, 125, 182, 118, 252, 107, 226, 156, 116, 4, 241, 69, 157,
112, 89, 100, 113, 135, 32, 134, 91, 207, 101, 230, 45, 168, 2, 27,
96, 37, 173, 174, 176, 185, 246, 28, 70, 97, 105, 52, 64, 126, 15,
85, 71, 163, 35, 221, 81, 175, 58, 195, 92, 249, 206, 186, 197,
234, 38, 44, 83, 13, 110, 133, 40, 132, 9, 211, 223, 205, 244, 65,
129, 77, 82, 106, 220, 55, 200, 108, 193, 171, 250, 36, 225, 123,
8, 12, 189, 177, 74, 120, 136, 149, 139, 227, 99, 232, 109, 233,
203, 213, 254, 59, 0, 29, 57, 242, 239, 183, 14, 102, 88, 208, 228,
166, 119, 114, 248, 235, 117, 75, 10, 49, 68, 80, 180, 143, 237,
31, 26, 219, 153, 141, 51, 159, 17, 131, 20
};
/*********************** FUNCTION DEFINITIONS ***********************/
__device__ void cuda_md2_transform(CUDA_MD2_CTX *ctx, BYTE data[])
{
int j,k,t;
//memcpy(&ctx->state[16], data);
for (j=0; j < 16; ++j) {
ctx->state[j + 16] = data[j];
ctx->state[j + 32] = (ctx->state[j+16] ^ ctx->state[j]);
}
t = 0;
for (j = 0; j < 18; ++j) {
for (k = 0; k < 48; ++k) {
ctx->state[k] ^= s[t];
t = ctx->state[k];
}
t = (t+j) & 0xFF;
}
t = ctx->checksum[15];
for (j=0; j < 16; ++j) {
ctx->checksum[j] ^= s[data[j] ^ t];
t = ctx->checksum[j];
}
}
__device__ void cuda_md2_init(CUDA_MD2_CTX *ctx)
{
int i;
for (i=0; i < 48; ++i)
ctx->state[i] = 0;
for (i=0; i < 16; ++i)
ctx->checksum[i] = 0;
ctx->len = 0;
}
__device__ void cuda_md2_update(CUDA_MD2_CTX *ctx, const BYTE data[], size_t len)
{
size_t i;
for (i = 0; i < len; ++i) {
ctx->data[ctx->len] = data[i];
ctx->len++;
if (ctx->len == MD2_BLOCK_SIZE) {
cuda_md2_transform(ctx, ctx->data);
ctx->len = 0;
}
}
}
__device__ void cuda_md2_final(CUDA_MD2_CTX *ctx, BYTE hash[])
{
int to_pad;
to_pad = MD2_BLOCK_SIZE - ctx->len;
while (ctx->len < MD2_BLOCK_SIZE)
ctx->data[ctx->len++] = to_pad;
cuda_md2_transform(ctx, ctx->data);
cuda_md2_transform(ctx, ctx->checksum);
memcpy(hash, ctx->state, MD2_BLOCK_SIZE);
}
__global__ void kernel_md2_hash(BYTE* indata, WORD inlen, BYTE* outdata, WORD n_batch)
{
WORD thread = blockIdx.x * blockDim.x + threadIdx.x;
if (thread >= n_batch)
{
return;
}
BYTE* in = indata + thread * inlen;
BYTE* out = outdata + thread * MD2_BLOCK_SIZE;
CUDA_MD2_CTX ctx;
cuda_md2_init(&ctx);
cuda_md2_update(&ctx, in, inlen);
cuda_md2_final(&ctx, out);
}
void mcm_cuda_md2_hash_batch(BYTE *in, WORD inlen, BYTE *out, WORD n_batch, WORD n_iter) {
BYTE *cuda_indata;
BYTE *cuda_outdata;
cudaMalloc(&cuda_indata, inlen * n_batch);
cudaMalloc(&cuda_outdata, MD2_BLOCK_SIZE * n_batch);
cudaMemcpy(cuda_indata, in, inlen * n_batch, cudaMemcpyHostToDevice);
WORD thread = WG_SIZE;
WORD block = (n_batch / thread) + (n_batch % thread != 0);
for(int i = 0 ; i < n_iter ; ++i)
kernel_md2_hash << < block, thread >> > (cuda_indata, inlen, cuda_outdata, n_batch);
cudaMemcpy(out, cuda_outdata, MD2_BLOCK_SIZE * n_batch, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("Error cuda md2 hash: %s \n", cudaGetErrorString(error));
}
cudaFree(cuda_indata);
cudaFree(cuda_outdata);
}
|
205dc5608fea5be063604b6a6d9b773a9bcc1138.hip | // !!! This is a file automatically generated by hipify!!!
#include "matrix.hh"
#include "layers.hh"
// Testeado, ponele
void Matrix::print_matrix(){
copyD2H();
printf("fil : %lu, col : %lu\n", shape.x, shape.y);
for(int i=0; i< shape.x; i++)
{
for(int j=0; j< shape.y; j++)
{
printf("%f ", this->data_h.get()[i*shape.y+j]);
}
printf("\n");
}
}
// ---------- SHAPE -----------
// constructor de Shape para que no le ponga mierda el compilador
// dado que size_t es un objeto en Shape, puedo inicializarlo as
Shape::Shape(size_t x, size_t y) :
x(x), y(y)
{ }
// ---------- MATRIX -----------
// funciones del constructor
// a la matriz le pongo todos los atributos vacios
Matrix::Matrix(size_t x_dim, size_t y_dim) :
shape(x_dim, y_dim), data_d(nullptr), data_h(nullptr),
d_allocated(false), h_allocated(false)
{ }
Matrix::Matrix(Shape shape) :
Matrix(shape.x, shape.y)
{ }
// mtodos de alocacion de memoria en host y device
void Matrix::allocateMemory()
{
allocateHostMemory();
allocateDevMemory();
}
// aloco memoria en caso de no tenerla
void Matrix::allocateMemoryIfNAll(Shape shape)
{
// chequeo si estan alocadas en memoria estas cosas y sino, le pongo el shape y pimba
if (!d_allocated && !h_allocated)
{
this -> shape = shape;
allocateMemory();
}
}
// mtodo para alocacin de matriz en mem host
void Matrix::allocateHostMemory()
{
if (!h_allocated) {
// le calzo la memoria en host
// [&] captura todas las variables en scope por referencia / lambda function que deletea la memoria apuntada cuando tenga que realizar la moricin de la matrcula
data_h = std::shared_ptr<double>(new double[shape.x * shape.y], [&](double * ptr){delete[] ptr; });
memset(data_h.get(), 0, shape.x * shape.y * sizeof(double));
// le aviso que ya aloque memoria en host
h_allocated = true;
}
}
void Matrix::allocateDevMemory()
{
if (!d_allocated)
{
double * device_memory = nullptr;
// aloco memoria en GPU y verifico que onda si hay errores
hipMalloc(&device_memory, shape.x*shape.y*sizeof(double));
NNExc::thIfDevErr("No se puede alocar memoria para tensor");
hipMemset(device_memory, 0, shape.x*shape.y*sizeof(double));
// de vuelta, le paso al puntero inteligente el mtodo de destuccin
data_d = std::shared_ptr<double>(device_memory,
[&](double *ptr){ hipFree(ptr); });
d_allocated = true;
}
}
// mtodo de copiado de memoria de host a device
void Matrix::copyH2D() {
// chequeo que esten alocadas las memorias en host y device
if (d_allocated && h_allocated) {
hipMemcpy(data_d.get(), data_h.get(), shape.x*shape.y*sizeof(double),hipMemcpyHostToDevice);
NNExc::thIfDevErr("No se puede copiar los datos de host a device\n");
}
else {
// no hay memoria alocada, hago un throw
NNExc::thIfDevErr("No se puede copiar los datos porque la memoria no se halla alocada : H2D");
}
}
// mtodo de copiado de memoria de device a host
void Matrix::copyD2H() {
// chequeo que esten alocadas las memorias en host y device
if (d_allocated && h_allocated) {
hipMemcpy(data_h.get(), data_d.get(), shape.x*shape.y*sizeof(double),hipMemcpyDeviceToHost);
NNExc::thIfDevErr("No se pudo copiar los datos de device a host");
}
else {
// no hay memoria alocada, hago un throw para controlar errores
NNExc::thIfDevErr("No se puede copiar los datos porque la memoria no se halla alocada : D2H");
}
}
// equivalente a __get__ en la matriz de host (accedo al indice de la matriz)
double& Matrix::operator[](const int idx){
return data_h.get()[idx];
}
const double& Matrix::operator[](const int idx) const{
return data_h.get()[idx];
} | 205dc5608fea5be063604b6a6d9b773a9bcc1138.cu | #include "matrix.hh"
#include "layers.hh"
// Testeado, ponele
void Matrix::print_matrix(){
copyD2H();
printf("fil : %lu, col : %lu\n", shape.x, shape.y);
for(int i=0; i< shape.x; i++)
{
for(int j=0; j< shape.y; j++)
{
printf("%f ", this->data_h.get()[i*shape.y+j]);
}
printf("\n");
}
}
// ---------- SHAPE -----------
// constructor de Shape para que no le ponga mierda el compilador
// dado que size_t es un objeto en Shape, puedo inicializarlo así
Shape::Shape(size_t x, size_t y) :
x(x), y(y)
{ }
// ---------- MATRIX -----------
// funciones del constructor
// a la matriz le pongo todos los atributos vacios
Matrix::Matrix(size_t x_dim, size_t y_dim) :
shape(x_dim, y_dim), data_d(nullptr), data_h(nullptr),
d_allocated(false), h_allocated(false)
{ }
Matrix::Matrix(Shape shape) :
Matrix(shape.x, shape.y)
{ }
// métodos de alocacion de memoria en host y device
void Matrix::allocateMemory()
{
allocateHostMemory();
allocateDevMemory();
}
// aloco memoria en caso de no tenerla
void Matrix::allocateMemoryIfNAll(Shape shape)
{
// chequeo si estan alocadas en memoria estas cosas y sino, le pongo el shape y pimba
if (!d_allocated && !h_allocated)
{
this -> shape = shape;
allocateMemory();
}
}
// método para alocación de matriz en mem host
void Matrix::allocateHostMemory()
{
if (!h_allocated) {
// le calzo la memoria en host
// [&] captura todas las variables en scope por referencia / lambda function que deletea la memoria apuntada cuando tenga que realizar la morición de la matrícula
data_h = std::shared_ptr<double>(new double[shape.x * shape.y], [&](double * ptr){delete[] ptr; });
memset(data_h.get(), 0, shape.x * shape.y * sizeof(double));
// le aviso que ya aloque memoria en host
h_allocated = true;
}
}
void Matrix::allocateDevMemory()
{
if (!d_allocated)
{
double * device_memory = nullptr;
// aloco memoria en GPU y verifico que onda si hay errores
cudaMalloc(&device_memory, shape.x*shape.y*sizeof(double));
NNExc::thIfDevErr("No se puede alocar memoria para tensor");
cudaMemset(device_memory, 0, shape.x*shape.y*sizeof(double));
// de vuelta, le paso al puntero inteligente el método de destucción
data_d = std::shared_ptr<double>(device_memory,
[&](double *ptr){ cudaFree(ptr); });
d_allocated = true;
}
}
// método de copiado de memoria de host a device
void Matrix::copyH2D() {
// chequeo que esten alocadas las memorias en host y device
if (d_allocated && h_allocated) {
cudaMemcpy(data_d.get(), data_h.get(), shape.x*shape.y*sizeof(double),cudaMemcpyHostToDevice);
NNExc::thIfDevErr("No se puede copiar los datos de host a device\n");
}
else {
// no hay memoria alocada, hago un throw
NNExc::thIfDevErr("No se puede copiar los datos porque la memoria no se halla alocada : H2D");
}
}
// método de copiado de memoria de device a host
void Matrix::copyD2H() {
// chequeo que esten alocadas las memorias en host y device
if (d_allocated && h_allocated) {
cudaMemcpy(data_h.get(), data_d.get(), shape.x*shape.y*sizeof(double),cudaMemcpyDeviceToHost);
NNExc::thIfDevErr("No se pudo copiar los datos de device a host");
}
else {
// no hay memoria alocada, hago un throw para controlar errores
NNExc::thIfDevErr("No se puede copiar los datos porque la memoria no se halla alocada : D2H");
}
}
// equivalente a __get__ en la matriz de host (accedo al indice de la matriz)
double& Matrix::operator[](const int idx){
return data_h.get()[idx];
}
const double& Matrix::operator[](const int idx) const{
return data_h.get()[idx];
} |
6f3de7e232ecbcfbfadc401eceae874dfe8af926.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 8192
#define THREAD_PER_BLOCK_SIDE_X 16
#define THREAD_PER_BLOCK_SIDE_Y 32
#define THREAD_PER_BLOCK THREAD_PER_BLOCK_SIDE_X*THREAD_PER_BLOCK_SIDE_Y
#define TYPE double
#define TYPE_S "double"
__global__ void transpose(TYPE * in, TYPE * out, int size)
{
//int temp_side = THREAD_PER_BLOCK;
__shared__ TYPE temp_matrix[THREAD_PER_BLOCK_SIDE_X][THREAD_PER_BLOCK_SIDE_Y];
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
// copy submatrix (transposed) in shared memory
temp_matrix[threadIdx.x][threadIdx.y] = in[row*size + col];
__syncthreads();
// copy submatrix in main memory
out[col*size + row] = temp_matrix[threadIdx.x][threadIdx.y];
}
int correct(TYPE* a, TYPE* b, int side)
{
int i;
for(i=0; i<side*side; i++)
if(a[i]!=b[(i%side)*side + i/side]) return 0;
return 1;
}
int main()
{
TYPE * h_in, * h_out;
TYPE * d_in, * d_out;
int size = N*N;
int size_in_memory = size * sizeof(TYPE);
int i;
// timing
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
//allocate memory in host and device
h_in = (TYPE *)malloc(size_in_memory);
h_out = (TYPE *)malloc(size_in_memory);
hipMalloc((void**)&d_in, size_in_memory);
hipMalloc((void**)&d_out, size_in_memory);
//fill matrix in host
for(i = 0; i<size; i++)
h_in[i] = i;
//transfer matrix from host to device
hipMemcpy(d_in, h_in, size_in_memory, hipMemcpyHostToDevice);
//transpose matrix in device
dim3 grid, block;
block.x = THREAD_PER_BLOCK_SIDE_X;
block.y = THREAD_PER_BLOCK_SIDE_Y;
grid.x = N / block.x;
grid.y = N / block.y;
hipEventRecord(start);
hipLaunchKernelGGL(( transpose), dim3(grid), dim3(block) , 0, 0, d_in, d_out, N);
hipEventRecord(stop);
// transfer matrix from device to host
hipMemcpy(h_out, d_out, size_in_memory, hipMemcpyDeviceToHost);
// correctness test
printf("\ncorrecteness: %d \n", correct(h_in, h_out, N));
//free memory
free(h_in);
free(h_out);
hipFree(d_in);
hipFree(d_out);
//showing Bandwidth
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("\nmatrix type: %s", TYPE_S);
printf("\nblock: %d x %d", block.y, block.x);
printf("\nmilliseconds: %f", milliseconds);
printf("\nBandwidth: %f GB/s \n", 2*size_in_memory/milliseconds/1e6);
return 0;
}
| 6f3de7e232ecbcfbfadc401eceae874dfe8af926.cu | #include <stdio.h>
#define N 8192
#define THREAD_PER_BLOCK_SIDE_X 16
#define THREAD_PER_BLOCK_SIDE_Y 32
#define THREAD_PER_BLOCK THREAD_PER_BLOCK_SIDE_X*THREAD_PER_BLOCK_SIDE_Y
#define TYPE double
#define TYPE_S "double"
__global__ void transpose(TYPE * in, TYPE * out, int size)
{
//int temp_side = THREAD_PER_BLOCK;
__shared__ TYPE temp_matrix[THREAD_PER_BLOCK_SIDE_X][THREAD_PER_BLOCK_SIDE_Y];
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
// copy submatrix (transposed) in shared memory
temp_matrix[threadIdx.x][threadIdx.y] = in[row*size + col];
__syncthreads();
// copy submatrix in main memory
out[col*size + row] = temp_matrix[threadIdx.x][threadIdx.y];
}
int correct(TYPE* a, TYPE* b, int side)
{
int i;
for(i=0; i<side*side; i++)
if(a[i]!=b[(i%side)*side + i/side]) return 0;
return 1;
}
int main()
{
TYPE * h_in, * h_out;
TYPE * d_in, * d_out;
int size = N*N;
int size_in_memory = size * sizeof(TYPE);
int i;
// timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//allocate memory in host and device
h_in = (TYPE *)malloc(size_in_memory);
h_out = (TYPE *)malloc(size_in_memory);
cudaMalloc((void**)&d_in, size_in_memory);
cudaMalloc((void**)&d_out, size_in_memory);
//fill matrix in host
for(i = 0; i<size; i++)
h_in[i] = i;
//transfer matrix from host to device
cudaMemcpy(d_in, h_in, size_in_memory, cudaMemcpyHostToDevice);
//transpose matrix in device
dim3 grid, block;
block.x = THREAD_PER_BLOCK_SIDE_X;
block.y = THREAD_PER_BLOCK_SIDE_Y;
grid.x = N / block.x;
grid.y = N / block.y;
cudaEventRecord(start);
transpose<<< grid, block >>>(d_in, d_out, N);
cudaEventRecord(stop);
// transfer matrix from device to host
cudaMemcpy(h_out, d_out, size_in_memory, cudaMemcpyDeviceToHost);
// correctness test
printf("\ncorrecteness: %d \n", correct(h_in, h_out, N));
//free memory
free(h_in);
free(h_out);
cudaFree(d_in);
cudaFree(d_out);
//showing Bandwidth
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("\nmatrix type: %s", TYPE_S);
printf("\nblock: %d x %d", block.y, block.x);
printf("\nmilliseconds: %f", milliseconds);
printf("\nBandwidth: %f GB/s \n", 2*size_in_memory/milliseconds/1e6);
return 0;
}
|
89275a05f62e4838fea83a3ab658215ea498fdff.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/types.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/gather.hpp>
#include <thrust/logical.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <algorithm>
#include <cmath>
namespace cudf {
namespace experimental {
namespace detail {
/*
* unique_copy copies elements from the range [first, last) to a range beginning
* with output, except that in a consecutive group of duplicate elements only
* depending on last argument keep, only the first one is copied, or the last
* one is copied or neither is copied. The return value is the end of the range
* to which the elements are copied.
*/
template<typename Exec,
typename InputIterator,
typename OutputIterator,
typename BinaryPredicate>
OutputIterator unique_copy(Exec&& exec,
InputIterator first,
InputIterator last,
OutputIterator output,
BinaryPredicate comp,
const duplicate_keep_option keep)
{
size_type last_index = thrust::distance(first,last)-1;
if (keep == duplicate_keep_option::KEEP_NONE) {
return thrust::copy_if(exec,
first,
last,
thrust::counting_iterator<size_type>(0),
output,
[first, comp, last_index] __device__ (size_type i) {
return (i == 0 || !comp(first[i], first[i-1]))
&& (i == last_index || !comp(first[i], first[i+1]));
});
} else {
size_type offset = 1;
if (keep == duplicate_keep_option::KEEP_FIRST) {
last_index = 0;
offset = -1;
}
return thrust::copy_if(exec,
first,
last,
thrust::counting_iterator<size_type>(0),
output,
[first, comp, last_index, offset] __device__ (size_type i) {
return (i == last_index || !comp(first[i], first[i+offset]));
});
}
}
/**
* @brief Create a column_view of index values which represent the row values
* without duplicates as per @p `keep`
*
* Given a `keys` table_view, each row index is copied to output `unique_indices`, if the corresponding
* row of `keys` table_view is unique, where the definition of unique depends on the value of @p keep:
* - KEEP_FIRST: only the first of a sequence of duplicate rows is copied
* - KEEP_LAST: only the last of a sequence of duplicate rows is copied
* - KEEP_NONE: only unique rows are kept
*
* @param[in] keys table_view to identify duplicate rows
* @param[out] unique_indices Column to store the index with unique rows
* @param[in] keep keep first entry, last entry, or no entries if duplicates found
* @param[in] nulls_are_equal flag to denote nulls are equal if true,
* nulls are not equal if false
* @param[in] mr Optional, The resource to use for all allocations
* @param[in] stream Optional CUDA stream on which to execute kernels
*
* @return column_view column_view of unique row index as per specified `keep`, this is actually slice of `unique_indices`.
*/
column_view get_unique_ordered_indices(cudf::table_view const& keys,
cudf::mutable_column_view & unique_indices,
duplicate_keep_option const& keep,
bool const& nulls_are_equal = true,
hipStream_t stream=0)
{
// sort only indices
auto sorted_indices = sorted_order(keys,
std::vector<order>{},
std::vector<null_order>{},
rmm::mr::get_default_resource(),
stream);
// extract unique indices
auto device_input_table = cudf::table_device_view::create(keys, stream);
if(cudf::has_nulls(keys)) {
auto comp = row_equality_comparator<true>(*device_input_table,
*device_input_table,
nulls_are_equal);
auto result_end = unique_copy(rmm::exec_policy(stream)->on(stream),
sorted_indices->view().begin<cudf::size_type>(),
sorted_indices->view().end<cudf::size_type>(),
unique_indices.begin<cudf::size_type>(),
comp,
keep);
return cudf::experimental::detail::slice(column_view(unique_indices), 0,
thrust::distance(unique_indices.begin<cudf::size_type>(), result_end));
} else {
auto comp = row_equality_comparator<false>(*device_input_table,
*device_input_table,
nulls_are_equal);
auto result_end = unique_copy(rmm::exec_policy(stream)->on(stream),
sorted_indices->view().begin<cudf::size_type>(),
sorted_indices->view().end<cudf::size_type>(),
unique_indices.begin<cudf::size_type>(),
comp,
keep);
return cudf::experimental::detail::slice(column_view(unique_indices), 0,
thrust::distance(unique_indices.begin<cudf::size_type>(), result_end));
}
}
cudf::size_type unique_count(table_view const& keys,
bool const& nulls_are_equal = true,
hipStream_t stream=0)
{
// sort only indices
auto sorted_indices = sorted_order(keys,
std::vector<order>{},
std::vector<null_order>{},
rmm::mr::get_default_resource(),
stream);
// count unique elements
auto sorted_row_index = sorted_indices->view().data<cudf::size_type>();
auto device_input_table = cudf::table_device_view::create(keys, stream);
if(cudf::has_nulls(keys)) {
row_equality_comparator<true> comp (*device_input_table,
*device_input_table,
nulls_are_equal);
return thrust::count_if(rmm::exec_policy(stream)->on(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(keys.num_rows()),
[sorted_row_index, comp]
__device__ (cudf::size_type i) {
return (i == 0 || not comp(sorted_row_index[i], sorted_row_index[i-1]));
});
} else {
row_equality_comparator<false> comp(*device_input_table,
*device_input_table,
nulls_are_equal);
return thrust::count_if(rmm::exec_policy(stream)->on(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(keys.num_rows()),
[sorted_row_index, comp]
__device__ (cudf::size_type i) {
return (i == 0 || not comp(sorted_row_index[i], sorted_row_index[i-1]));
});
}
}
std::unique_ptr<experimental::table>
drop_duplicates(table_view const& input,
std::vector<size_type> const& keys,
duplicate_keep_option const& keep,
bool const& nulls_are_equal,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
if (0 == input.num_rows() ||
0 == input.num_columns() ||
0 == keys.size()
) {
return experimental::empty_like(input);
}
auto keys_view = input.select(keys);
// The values will be filled into this column
auto unique_indices =
cudf::make_numeric_column(data_type{INT32},
keys_view.num_rows(), mask_state::UNALLOCATED, stream, mr);
auto mutable_unique_indices_view = unique_indices->mutable_view();
// This is just slice of `unique_indices` but with different size as per the
// keys_view has been processed in `get_unique_ordered_indices`
auto unique_indices_view =
detail::get_unique_ordered_indices(keys_view,
mutable_unique_indices_view,
keep, nulls_are_equal,
stream);
// run gather operation to establish new order
return detail::gather(input, unique_indices_view, false, false, false, mr, stream);
}
cudf::size_type unique_count(column_view const& input,
bool const& ignore_nulls,
bool const& nan_as_null,
hipStream_t stream)
{
if (0 == input.size() || input.null_count() == input.size()) {
return 0;
}
cudf::size_type nrows = input.size();
bool has_nan = false;
// Check for Nans
// Checking for nulls in input and flag nan_as_null, as the count will
// only get affected if these two conditions are true. NAN will only be
// be an extra if nan_as_null was true and input also had null, which
// will increase the count by 1.
if(input.has_nulls() and nan_as_null){
has_nan = cudf::experimental::type_dispatcher(input.type(), has_nans{}, input, stream);
}
auto count = detail::unique_count(table_view{{input}}, true, stream);
// if nan is considered null and there are already null values
if (nan_as_null and has_nan and input.has_nulls())
--count;
if(ignore_nulls and input.has_nulls())
return --count;
else
return count;
}
}// namespace detail
std::unique_ptr<experimental::table>
drop_duplicates(table_view const& input,
std::vector<size_type> const& keys,
duplicate_keep_option const& keep,
bool const& nulls_are_equal,
rmm::mr::device_memory_resource* mr) {
return detail::drop_duplicates(input, keys, keep, nulls_are_equal, mr);
}
cudf::size_type unique_count(column_view const& input,
bool const& ignore_nulls,
bool const& nan_as_null,
rmm::mr::device_memory_resource *mr) {
return detail::unique_count(input, ignore_nulls, nan_as_null);
}
}// namespace experimental
}// namespace cudf
| 89275a05f62e4838fea83a3ab658215ea498fdff.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/types.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/stream_compaction.hpp>
#include <cudf/detail/stream_compaction.hpp>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/gather.hpp>
#include <thrust/logical.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <algorithm>
#include <cmath>
namespace cudf {
namespace experimental {
namespace detail {
/*
* unique_copy copies elements from the range [first, last) to a range beginning
* with output, except that in a consecutive group of duplicate elements only
* depending on last argument keep, only the first one is copied, or the last
* one is copied or neither is copied. The return value is the end of the range
* to which the elements are copied.
*/
template<typename Exec,
typename InputIterator,
typename OutputIterator,
typename BinaryPredicate>
OutputIterator unique_copy(Exec&& exec,
InputIterator first,
InputIterator last,
OutputIterator output,
BinaryPredicate comp,
const duplicate_keep_option keep)
{
size_type last_index = thrust::distance(first,last)-1;
if (keep == duplicate_keep_option::KEEP_NONE) {
return thrust::copy_if(exec,
first,
last,
thrust::counting_iterator<size_type>(0),
output,
[first, comp, last_index] __device__ (size_type i) {
return (i == 0 || !comp(first[i], first[i-1]))
&& (i == last_index || !comp(first[i], first[i+1]));
});
} else {
size_type offset = 1;
if (keep == duplicate_keep_option::KEEP_FIRST) {
last_index = 0;
offset = -1;
}
return thrust::copy_if(exec,
first,
last,
thrust::counting_iterator<size_type>(0),
output,
[first, comp, last_index, offset] __device__ (size_type i) {
return (i == last_index || !comp(first[i], first[i+offset]));
});
}
}
/**
* @brief Create a column_view of index values which represent the row values
* without duplicates as per @p `keep`
*
* Given a `keys` table_view, each row index is copied to output `unique_indices`, if the corresponding
* row of `keys` table_view is unique, where the definition of unique depends on the value of @p keep:
* - KEEP_FIRST: only the first of a sequence of duplicate rows is copied
* - KEEP_LAST: only the last of a sequence of duplicate rows is copied
* - KEEP_NONE: only unique rows are kept
*
* @param[in] keys table_view to identify duplicate rows
* @param[out] unique_indices Column to store the index with unique rows
* @param[in] keep keep first entry, last entry, or no entries if duplicates found
* @param[in] nulls_are_equal flag to denote nulls are equal if true,
* nulls are not equal if false
* @param[in] mr Optional, The resource to use for all allocations
* @param[in] stream Optional CUDA stream on which to execute kernels
*
* @return column_view column_view of unique row index as per specified `keep`, this is actually slice of `unique_indices`.
*/
column_view get_unique_ordered_indices(cudf::table_view const& keys,
cudf::mutable_column_view & unique_indices,
duplicate_keep_option const& keep,
bool const& nulls_are_equal = true,
cudaStream_t stream=0)
{
// sort only indices
auto sorted_indices = sorted_order(keys,
std::vector<order>{},
std::vector<null_order>{},
rmm::mr::get_default_resource(),
stream);
// extract unique indices
auto device_input_table = cudf::table_device_view::create(keys, stream);
if(cudf::has_nulls(keys)) {
auto comp = row_equality_comparator<true>(*device_input_table,
*device_input_table,
nulls_are_equal);
auto result_end = unique_copy(rmm::exec_policy(stream)->on(stream),
sorted_indices->view().begin<cudf::size_type>(),
sorted_indices->view().end<cudf::size_type>(),
unique_indices.begin<cudf::size_type>(),
comp,
keep);
return cudf::experimental::detail::slice(column_view(unique_indices), 0,
thrust::distance(unique_indices.begin<cudf::size_type>(), result_end));
} else {
auto comp = row_equality_comparator<false>(*device_input_table,
*device_input_table,
nulls_are_equal);
auto result_end = unique_copy(rmm::exec_policy(stream)->on(stream),
sorted_indices->view().begin<cudf::size_type>(),
sorted_indices->view().end<cudf::size_type>(),
unique_indices.begin<cudf::size_type>(),
comp,
keep);
return cudf::experimental::detail::slice(column_view(unique_indices), 0,
thrust::distance(unique_indices.begin<cudf::size_type>(), result_end));
}
}
cudf::size_type unique_count(table_view const& keys,
bool const& nulls_are_equal = true,
cudaStream_t stream=0)
{
// sort only indices
auto sorted_indices = sorted_order(keys,
std::vector<order>{},
std::vector<null_order>{},
rmm::mr::get_default_resource(),
stream);
// count unique elements
auto sorted_row_index = sorted_indices->view().data<cudf::size_type>();
auto device_input_table = cudf::table_device_view::create(keys, stream);
if(cudf::has_nulls(keys)) {
row_equality_comparator<true> comp (*device_input_table,
*device_input_table,
nulls_are_equal);
return thrust::count_if(rmm::exec_policy(stream)->on(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(keys.num_rows()),
[sorted_row_index, comp]
__device__ (cudf::size_type i) {
return (i == 0 || not comp(sorted_row_index[i], sorted_row_index[i-1]));
});
} else {
row_equality_comparator<false> comp(*device_input_table,
*device_input_table,
nulls_are_equal);
return thrust::count_if(rmm::exec_policy(stream)->on(stream),
thrust::counting_iterator<cudf::size_type>(0),
thrust::counting_iterator<cudf::size_type>(keys.num_rows()),
[sorted_row_index, comp]
__device__ (cudf::size_type i) {
return (i == 0 || not comp(sorted_row_index[i], sorted_row_index[i-1]));
});
}
}
std::unique_ptr<experimental::table>
drop_duplicates(table_view const& input,
std::vector<size_type> const& keys,
duplicate_keep_option const& keep,
bool const& nulls_are_equal,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
if (0 == input.num_rows() ||
0 == input.num_columns() ||
0 == keys.size()
) {
return experimental::empty_like(input);
}
auto keys_view = input.select(keys);
// The values will be filled into this column
auto unique_indices =
cudf::make_numeric_column(data_type{INT32},
keys_view.num_rows(), mask_state::UNALLOCATED, stream, mr);
auto mutable_unique_indices_view = unique_indices->mutable_view();
// This is just slice of `unique_indices` but with different size as per the
// keys_view has been processed in `get_unique_ordered_indices`
auto unique_indices_view =
detail::get_unique_ordered_indices(keys_view,
mutable_unique_indices_view,
keep, nulls_are_equal,
stream);
// run gather operation to establish new order
return detail::gather(input, unique_indices_view, false, false, false, mr, stream);
}
cudf::size_type unique_count(column_view const& input,
bool const& ignore_nulls,
bool const& nan_as_null,
cudaStream_t stream)
{
if (0 == input.size() || input.null_count() == input.size()) {
return 0;
}
cudf::size_type nrows = input.size();
bool has_nan = false;
// Check for Nans
// Checking for nulls in input and flag nan_as_null, as the count will
// only get affected if these two conditions are true. NAN will only be
// be an extra if nan_as_null was true and input also had null, which
// will increase the count by 1.
if(input.has_nulls() and nan_as_null){
has_nan = cudf::experimental::type_dispatcher(input.type(), has_nans{}, input, stream);
}
auto count = detail::unique_count(table_view{{input}}, true, stream);
// if nan is considered null and there are already null values
if (nan_as_null and has_nan and input.has_nulls())
--count;
if(ignore_nulls and input.has_nulls())
return --count;
else
return count;
}
}// namespace detail
std::unique_ptr<experimental::table>
drop_duplicates(table_view const& input,
std::vector<size_type> const& keys,
duplicate_keep_option const& keep,
bool const& nulls_are_equal,
rmm::mr::device_memory_resource* mr) {
return detail::drop_duplicates(input, keys, keep, nulls_are_equal, mr);
}
cudf::size_type unique_count(column_view const& input,
bool const& ignore_nulls,
bool const& nan_as_null,
rmm::mr::device_memory_resource *mr) {
return detail::unique_count(input, ignore_nulls, nan_as_null);
}
}// namespace experimental
}// namespace cudf
|
245ed7e50c41c05984371a3ede414be69087fddf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Solves the Panfilov model using an explicit numerical scheme.
* Based on code orginally provided by Xing Cai, Simula Research Laboratory
* and reimplementation by Scott B. Baden, UCSD
*
* Modified and restructured by Didem Unat, Koc University
*
* Refer to "Detailed Numerical Analyses of the Aliev-Panfilov Model on GPGPU"
* https://www.simula.no/publications/detailed-numerical-analyses-aliev-panfilov-model-gpgpu
* by Xing Cai, Didem Unat and Scott Baden
*
*/
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <getopt.h>
#include <vector>
#include <algorithm>
#define TILE_DIM 32
using namespace std;
// External functions
extern "C" void splot(double **E, double T, int niter, int m, int n);
void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int&num_threads);
// Utilities
//
// Timer
// Make successive calls and take a difference to get the elapsed time.
static const double kMicro = 1.0e-6;
double getTime()
{
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
if(RC == -1) {
cerr << "ERROR: Bad call to gettimeofday" << endl;
return(-1);
}
return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) );
} // end getTime()
// Reports statistics about the computation
// These values should not vary (except to within roundoff)
// when we use different numbers of processes to solve the problem
double stats(vector<double> E, int m, int n, double *_mx){
double mx = -1;
double l2norm = 0;
int i, j;
for (j=1; j<=m; j++) {
for (i=1; i<=n; i++) {
l2norm += E[j*(n+2) + i]*E[j*(n+2) + i];
if (E[j*(n+2) + i] > mx)
mx = E[j*(n+2) + i];
}
}
*_mx = mx;
l2norm /= (double) ((m)*(n));
l2norm = sqrt(l2norm);
return l2norm;
}
// External functions
__global__ void mirror_boundaries(double *E_prev, const int n, const int m);
__global__ void solve_for_pde(double *E, const double *E_prev, const double alpha,
const int n, const int m);
__global__ void solve_for_ode(double *E, double *R, const double alpha,
const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b);
// Main program
int main (int argc, char** argv)
{
/*
* Solution arrays
* E is the "Excitation" variable, a voltage
* R is the "Recovery" variable
* E_prev is the Excitation variable for the previous timestep,
* and is used in time integration
*/
// Various constants - these definitions shouldn't change
const double a=0.1, b=0.1, kk=8.0, M1= 0.07, M2=0.3, epsilon=0.01, d=5e-5;
double T=1000.0;
int m=200,n=200;
int plot_freq = 0;
int px = 1, py = 1;
int no_comm = 0;
int num_threads=1;
cmdLine( argc, argv, T, n,px, py, plot_freq, no_comm, num_threads);
m = n;
// Allocate contiguous memory for solution arrays
// The computational box is defined on [1:m+1,1:n+1]
// We pad the arrays in order to facilitate differencing on the
// boundaries of the computation box
// Initialize Host matrices
std::vector<double> h_E((m+2)*(n+2)), h_E_prev((m+2)*(n+2)), h_R((m+2)*(n+2)), h_tmp((m+2)*(n+2));
int i,j;
// Initialization
for (j=1; j<=m; j++)
for (i=1; i<=n; i++)
h_E_prev[j*(m+2) + i] = h_R[j*(m+2) + i] = 0;
for (j=1; j<=m; j++)
for (i=n/2+1; i<=n; i++)
h_E_prev[j*(m+2) + i] = 1.0;
for (j=m/2+1; j<=m; j++)
for (i=1; i<=n; i++)
h_R[j*(m+2) + i] = 1.0;
// Initialize device matrices
double *d_E = 0, *d_E_prev = 0, *d_R = 0, *d_tmp = 0;
hipMalloc((void**)&d_E, sizeof(double) * (m+2) * (n+2));
hipMalloc((void**)&d_E_prev, sizeof(double) * (m+2) * (n+2));
hipMalloc((void**)&d_R, sizeof(double) * (m+2) * (n+2));
hipMalloc((void**)&d_tmp, sizeof(double) * (m+2) * (n+2));
hipMemcpy(d_E, &h_E[0], sizeof(double) * (m+2) * (n+2), hipMemcpyHostToDevice);
hipMemcpy(d_E_prev, &h_E_prev[0], sizeof(double) * (m+2) * (n+2), hipMemcpyHostToDevice);
hipMemcpy(d_R, &h_R[0], sizeof(double) * (m+2) * (n+2), hipMemcpyHostToDevice);
hipMemcpy(d_tmp, &h_tmp[0], sizeof(double) * (m+2) * (n+2), hipMemcpyHostToDevice);
const dim3 thread_size(TILE_DIM,TILE_DIM); // Max thread on one unit
const dim3 num_blocks(m/TILE_DIM+1,n/TILE_DIM+1); // Division will take floor. So we add one. We check the boundaries inside kernels.
double dx = 1.0/n;
// For time integration, these values shouldn't change
double rp= kk*(b+1)*(b+1)/4;
double dte=(dx*dx)/(d*4+((dx*dx))*(rp+kk));
double dtr=1/(epsilon+((M1/M2)*rp));
double dt = (dte<dtr) ? 0.95*dte : 0.95*dtr;
double alpha = d*dt/(dx*dx);
cout << "Grid Size : " << n << endl;
cout << "Duration of Sim : " << T << endl;
cout << "Time step dt : " << dt << endl;
cout << "Process geometry: " << px << " x " << py << endl;
if (no_comm)
cout << "Communication : DISABLED" << endl;
cout << endl;
// Start the timer
double t0 = getTime();
// Simulated time is different from the integer timestep number
// Simulated time
double t = 0.0;
// Integer timestep number
int niter=0;
while (t<T) {
t += dt;
niter++;
hipLaunchKernelGGL(( mirror_boundaries), dim3(num_blocks),dim3(thread_size), 0, 0, d_E_prev, n, m);
hipLaunchKernelGGL(( solve_for_pde), dim3(num_blocks),dim3(thread_size), 0, 0, d_E, d_E_prev, alpha, n, m);
hipLaunchKernelGGL(( solve_for_ode), dim3(num_blocks),dim3(thread_size), 0, 0, d_E, d_R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b);
//swap current E with previous E
d_tmp = d_E; d_E = d_E_prev; d_E_prev = d_tmp;
// if (plot_freq){
// int k = (int)(t/plot_freq);
// if ((t - k * plot_freq) < dt){
// splot(E,t,niter,m+2,n+2);
// }
// }
}//end of while loop
double time_elapsed = getTime() - t0;
double Gflops = (double)(niter * (1E-9 * n * n ) * 28.0) / time_elapsed ;
double BW = (double)(niter * 1E-9 * (n * n * sizeof(double) * 4.0 ))/time_elapsed;
cout << "Number of Iterations : " << niter << endl;
cout << "Elapsed Time (sec) : " << time_elapsed << endl;
cout << "Sustained Gflops Rate : " << Gflops << endl;
cout << "Sustained Bandwidth (GB/sec): " << BW << endl << endl;
hipMemcpy(&h_E_prev[0], d_E_prev, sizeof(double) * (m+2) * (n+2), hipMemcpyDeviceToHost);
double mx;
double l2norm = stats(h_E_prev,m,n,&mx);
cout << "Max: " << mx << " L2norm: "<< l2norm << endl;
if (plot_freq){
cout << "\n\nEnter any input to close the program and the plot..." << endl;
getchar();
}
hipFree (d_E);
hipFree (d_E_prev);
hipFree (d_R);
hipFree (d_tmp);
return 0;
}
void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int& num_threads){
/// Command line arguments
// Default value of the domain sizes
static struct option long_options[] = {
{"n", required_argument, 0, 'n'},
{"px", required_argument, 0, 'x'},
{"py", required_argument, 0, 'y'},
{"tfinal", required_argument, 0, 't'},
{"plot", required_argument, 0, 'p'},
{"nocomm", no_argument, 0, 'k'},
{"numthreads", required_argument, 0, 'o'},
};
// Process command line arguments
int ac;
for(ac=1;ac<argc;ac++) {
int c;
while ((c=getopt_long(argc,argv,"n:x:y:t:kp:o:",long_options,NULL)) != -1){
switch (c) {
// Size of the computational box
case 'n':
n = atoi(optarg);
break;
// X processor geometry
case 'x':
px = atoi(optarg);
// Y processor geometry
case 'y':
py = atoi(optarg);
// Length of simulation, in simulated time units
case 't':
T = atof(optarg);
break;
// Turn off communication
case 'k':
no_comm = 1;
break;
// Plot the excitation variable
case 'p':
plot_freq = atoi(optarg);
break;
// Plot the excitation variable
case 'o':
num_threads = atoi(optarg);
break;
// Error
default:
printf("Usage: a.out [-n <domain size>] [-t <final time >]\n\t [-p <plot frequency>]\n\t[-px <x processor geometry> [-py <y proc. geometry] [-k turn off communication] [-o <Number of OpenMP threads>]\n");
exit(-1);
}
}
}
}
/* **********************************************************
* Author : Urvashi R.V. [04/06/2004]
* Modified by Didem Unat [03/23/18]
*************************************************************/
#include <stdio.h>
/* Function to plot the 2D array
* 'gnuplot' is instantiated via a pipe and
* the values to be plotted are passed through, along
* with gnuplot commands */
FILE *gnu=NULL;
void splot(double **U, double T, int niter, int m, int n)
{
int i, j;
if(gnu==NULL) gnu = popen("gnuplot","w");
double mx = -1, mn = 32768;
for (j=0; j<m; j++)
for (i=0; i<n; i++){
if (U[j][i] > mx)
mx = U[j][i];
if (U[j][i] < mn)
mn = U[j][i];
}
fprintf(gnu,"set title \"T = %f [niter = %d]\"\n",T, niter);
fprintf(gnu,"set size square\n");
fprintf(gnu,"set key off\n");
fprintf(gnu,"set pm3d map\n");
// Various color schemes
fprintf(gnu,"set palette defined (-3 \"blue\", 0 \"white\", 1 \"red\")\n");
// fprintf(gnu,"set palette rgbformulae 22, 13, 31\n");
// fprintf(gnu,"set palette rgbformulae 30, 31, 32\n");
fprintf(gnu,"splot [0:%d] [0:%d][%f:%f] \"-\"\n",m-1,n-1,mn,mx);
for (j=0; j<m; j++){
for (i=0; i<n; i++) {
fprintf(gnu,"%d %d %f\n", i, j, U[i][j]);
}
fprintf(gnu,"\n");
}
fprintf(gnu,"e\n");
fflush(gnu);
return;
}
| 245ed7e50c41c05984371a3ede414be69087fddf.cu | /*
* Solves the Panfilov model using an explicit numerical scheme.
* Based on code orginally provided by Xing Cai, Simula Research Laboratory
* and reimplementation by Scott B. Baden, UCSD
*
* Modified and restructured by Didem Unat, Koc University
*
* Refer to "Detailed Numerical Analyses of the Aliev-Panfilov Model on GPGPU"
* https://www.simula.no/publications/detailed-numerical-analyses-aliev-panfilov-model-gpgpu
* by Xing Cai, Didem Unat and Scott Baden
*
*/
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <getopt.h>
#include <vector>
#include <algorithm>
#define TILE_DIM 32
using namespace std;
// External functions
extern "C" void splot(double **E, double T, int niter, int m, int n);
void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int&num_threads);
// Utilities
//
// Timer
// Make successive calls and take a difference to get the elapsed time.
static const double kMicro = 1.0e-6;
double getTime()
{
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
if(RC == -1) {
cerr << "ERROR: Bad call to gettimeofday" << endl;
return(-1);
}
return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) );
} // end getTime()
// Reports statistics about the computation
// These values should not vary (except to within roundoff)
// when we use different numbers of processes to solve the problem
double stats(vector<double> E, int m, int n, double *_mx){
double mx = -1;
double l2norm = 0;
int i, j;
for (j=1; j<=m; j++) {
for (i=1; i<=n; i++) {
l2norm += E[j*(n+2) + i]*E[j*(n+2) + i];
if (E[j*(n+2) + i] > mx)
mx = E[j*(n+2) + i];
}
}
*_mx = mx;
l2norm /= (double) ((m)*(n));
l2norm = sqrt(l2norm);
return l2norm;
}
// External functions
__global__ void mirror_boundaries(double *E_prev, const int n, const int m);
__global__ void solve_for_pde(double *E, const double *E_prev, const double alpha,
const int n, const int m);
__global__ void solve_for_ode(double *E, double *R, const double alpha,
const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b);
// Main program
int main (int argc, char** argv)
{
/*
* Solution arrays
* E is the "Excitation" variable, a voltage
* R is the "Recovery" variable
* E_prev is the Excitation variable for the previous timestep,
* and is used in time integration
*/
// Various constants - these definitions shouldn't change
const double a=0.1, b=0.1, kk=8.0, M1= 0.07, M2=0.3, epsilon=0.01, d=5e-5;
double T=1000.0;
int m=200,n=200;
int plot_freq = 0;
int px = 1, py = 1;
int no_comm = 0;
int num_threads=1;
cmdLine( argc, argv, T, n,px, py, plot_freq, no_comm, num_threads);
m = n;
// Allocate contiguous memory for solution arrays
// The computational box is defined on [1:m+1,1:n+1]
// We pad the arrays in order to facilitate differencing on the
// boundaries of the computation box
// Initialize Host matrices
std::vector<double> h_E((m+2)*(n+2)), h_E_prev((m+2)*(n+2)), h_R((m+2)*(n+2)), h_tmp((m+2)*(n+2));
int i,j;
// Initialization
for (j=1; j<=m; j++)
for (i=1; i<=n; i++)
h_E_prev[j*(m+2) + i] = h_R[j*(m+2) + i] = 0;
for (j=1; j<=m; j++)
for (i=n/2+1; i<=n; i++)
h_E_prev[j*(m+2) + i] = 1.0;
for (j=m/2+1; j<=m; j++)
for (i=1; i<=n; i++)
h_R[j*(m+2) + i] = 1.0;
// Initialize device matrices
double *d_E = 0, *d_E_prev = 0, *d_R = 0, *d_tmp = 0;
cudaMalloc((void**)&d_E, sizeof(double) * (m+2) * (n+2));
cudaMalloc((void**)&d_E_prev, sizeof(double) * (m+2) * (n+2));
cudaMalloc((void**)&d_R, sizeof(double) * (m+2) * (n+2));
cudaMalloc((void**)&d_tmp, sizeof(double) * (m+2) * (n+2));
cudaMemcpy(d_E, &h_E[0], sizeof(double) * (m+2) * (n+2), cudaMemcpyHostToDevice);
cudaMemcpy(d_E_prev, &h_E_prev[0], sizeof(double) * (m+2) * (n+2), cudaMemcpyHostToDevice);
cudaMemcpy(d_R, &h_R[0], sizeof(double) * (m+2) * (n+2), cudaMemcpyHostToDevice);
cudaMemcpy(d_tmp, &h_tmp[0], sizeof(double) * (m+2) * (n+2), cudaMemcpyHostToDevice);
const dim3 thread_size(TILE_DIM,TILE_DIM); // Max thread on one unit
const dim3 num_blocks(m/TILE_DIM+1,n/TILE_DIM+1); // Division will take floor. So we add one. We check the boundaries inside kernels.
double dx = 1.0/n;
// For time integration, these values shouldn't change
double rp= kk*(b+1)*(b+1)/4;
double dte=(dx*dx)/(d*4+((dx*dx))*(rp+kk));
double dtr=1/(epsilon+((M1/M2)*rp));
double dt = (dte<dtr) ? 0.95*dte : 0.95*dtr;
double alpha = d*dt/(dx*dx);
cout << "Grid Size : " << n << endl;
cout << "Duration of Sim : " << T << endl;
cout << "Time step dt : " << dt << endl;
cout << "Process geometry: " << px << " x " << py << endl;
if (no_comm)
cout << "Communication : DISABLED" << endl;
cout << endl;
// Start the timer
double t0 = getTime();
// Simulated time is different from the integer timestep number
// Simulated time
double t = 0.0;
// Integer timestep number
int niter=0;
while (t<T) {
t += dt;
niter++;
mirror_boundaries<<<num_blocks,thread_size>>>(d_E_prev, n, m);
solve_for_pde<<<num_blocks,thread_size>>>(d_E, d_E_prev, alpha, n, m);
solve_for_ode<<<num_blocks,thread_size>>>(d_E, d_R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b);
//swap current E with previous E
d_tmp = d_E; d_E = d_E_prev; d_E_prev = d_tmp;
// if (plot_freq){
// int k = (int)(t/plot_freq);
// if ((t - k * plot_freq) < dt){
// splot(E,t,niter,m+2,n+2);
// }
// }
}//end of while loop
double time_elapsed = getTime() - t0;
double Gflops = (double)(niter * (1E-9 * n * n ) * 28.0) / time_elapsed ;
double BW = (double)(niter * 1E-9 * (n * n * sizeof(double) * 4.0 ))/time_elapsed;
cout << "Number of Iterations : " << niter << endl;
cout << "Elapsed Time (sec) : " << time_elapsed << endl;
cout << "Sustained Gflops Rate : " << Gflops << endl;
cout << "Sustained Bandwidth (GB/sec): " << BW << endl << endl;
cudaMemcpy(&h_E_prev[0], d_E_prev, sizeof(double) * (m+2) * (n+2), cudaMemcpyDeviceToHost);
double mx;
double l2norm = stats(h_E_prev,m,n,&mx);
cout << "Max: " << mx << " L2norm: "<< l2norm << endl;
if (plot_freq){
cout << "\n\nEnter any input to close the program and the plot..." << endl;
getchar();
}
cudaFree (d_E);
cudaFree (d_E_prev);
cudaFree (d_R);
cudaFree (d_tmp);
return 0;
}
void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int& num_threads){
/// Command line arguments
// Default value of the domain sizes
static struct option long_options[] = {
{"n", required_argument, 0, 'n'},
{"px", required_argument, 0, 'x'},
{"py", required_argument, 0, 'y'},
{"tfinal", required_argument, 0, 't'},
{"plot", required_argument, 0, 'p'},
{"nocomm", no_argument, 0, 'k'},
{"numthreads", required_argument, 0, 'o'},
};
// Process command line arguments
int ac;
for(ac=1;ac<argc;ac++) {
int c;
while ((c=getopt_long(argc,argv,"n:x:y:t:kp:o:",long_options,NULL)) != -1){
switch (c) {
// Size of the computational box
case 'n':
n = atoi(optarg);
break;
// X processor geometry
case 'x':
px = atoi(optarg);
// Y processor geometry
case 'y':
py = atoi(optarg);
// Length of simulation, in simulated time units
case 't':
T = atof(optarg);
break;
// Turn off communication
case 'k':
no_comm = 1;
break;
// Plot the excitation variable
case 'p':
plot_freq = atoi(optarg);
break;
// Plot the excitation variable
case 'o':
num_threads = atoi(optarg);
break;
// Error
default:
printf("Usage: a.out [-n <domain size>] [-t <final time >]\n\t [-p <plot frequency>]\n\t[-px <x processor geometry> [-py <y proc. geometry] [-k turn off communication] [-o <Number of OpenMP threads>]\n");
exit(-1);
}
}
}
}
/* **********************************************************
* Author : Urvashi R.V. [04/06/2004]
* Modified by Didem Unat [03/23/18]
*************************************************************/
#include <stdio.h>
/* Function to plot the 2D array
* 'gnuplot' is instantiated via a pipe and
* the values to be plotted are passed through, along
* with gnuplot commands */
FILE *gnu=NULL;
void splot(double **U, double T, int niter, int m, int n)
{
int i, j;
if(gnu==NULL) gnu = popen("gnuplot","w");
double mx = -1, mn = 32768;
for (j=0; j<m; j++)
for (i=0; i<n; i++){
if (U[j][i] > mx)
mx = U[j][i];
if (U[j][i] < mn)
mn = U[j][i];
}
fprintf(gnu,"set title \"T = %f [niter = %d]\"\n",T, niter);
fprintf(gnu,"set size square\n");
fprintf(gnu,"set key off\n");
fprintf(gnu,"set pm3d map\n");
// Various color schemes
fprintf(gnu,"set palette defined (-3 \"blue\", 0 \"white\", 1 \"red\")\n");
// fprintf(gnu,"set palette rgbformulae 22, 13, 31\n");
// fprintf(gnu,"set palette rgbformulae 30, 31, 32\n");
fprintf(gnu,"splot [0:%d] [0:%d][%f:%f] \"-\"\n",m-1,n-1,mn,mx);
for (j=0; j<m; j++){
for (i=0; i<n; i++) {
fprintf(gnu,"%d %d %f\n", i, j, U[i][j]);
}
fprintf(gnu,"\n");
}
fprintf(gnu,"e\n");
fflush(gnu);
return;
}
|
56ad0685323b79c6f616336f92abf22b103559ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
CSci 360 Computer Architecture 3
Hunter College of the City University of New York
Prof. Stewart Weiss
CUDA-based Parallel Radix Sort
For complete details and an article about other approaches, see
http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html
This algorithm is due to
Mark Harris
NVIDIA Corporation
Shubhabrata Sengupta
University of California, Davis
John D. Owens
University of California, Davis
I have rewritten parts of it to make it clearer, and provided a proof
of correctness for the partition step.
*/
// Forward declaration of partition_by_bit(), called by radix_sort()
__device__ void partition_by_bit(unsigned int *values, unsigned int bit);
/*******************************************************************************
RADIX_SORT()
For each bit position from the least significant to the most significant,
partition the elements so that all elements with a 0 in that bit position
precede those with a 1 in that position, using a stable sort.
When all bits have been so processed, the array is sorted.
Reminder -- a sort is stable if the sort preserves the relative order of
equal elements.
Because this is a device function (executed by each thread concurrently),
after each partitioning step, the threads must execute __syncthreads() so
that the array is guaranteed to be ready for the next step.
*******************************************************************************/
__device__ void radix_sort(unsigned int *values)
{
int bit;
for( bit = 0; bit < 32; ++bit )
{
partition_by_bit(values, bit);
__syncthreads();
}
}
/*******************************************************************************
PLUS_SCAN()
plus_scan(a[]), where a[] is an array of integers, replaces a[] by the prefix
sums of the elements of a. The prefix sum of an element in an array (or more
generally, any sequence) is the sum of all elements up to and including that
element. The sum operation can be replaced by any binary associative operator,
such as multiplication.
A thread with ID i that calls plus_scan(a) gets as its return value the new
element in a[i]. All threads together collectively replace the elements of
a[].
Example:
A = 3 1 7 0 4 1 6 3
Successive iterations yield
offset = 1 A = 3 4 8 7 4 5 7 9
offset = 2 A = 3 4 11 11 12 12 11 14
offset = 4 A = 3 4 11 11 15 16 22 25
When it is finished it will have taken log N steps and used N log N adds.
(This means that it is not work-efficient, since the sequential algorithm
uses N adds.)
*******************************************************************************/
template<class T>
__device__ T plus_scan(T *x)
{
unsigned int i = threadIdx.x; // id of thread executing this instance
unsigned int n = blockDim.x; // total number of threads in this block
unsigned int offset; // distance between elements to be added
for( offset = 1; offset < n; offset *= 2) {
T t;
if ( i >= offset )
t = x[i-offset];
__syncthreads();
if ( i >= offset )
x[i] = t + x[i]; // i.e., x[i] = x[i] + x[i-1]
__syncthreads();
}
return x[i];
}
/*******************************************************************************
partition_by_bit()
This function is executed by every thread. Given an array of non-negative
integer values, and a bit position, b, this partitions the array such that
for all values[i], i = 0,...,n-1, the value of bit b in each element
values[k] for k < i is <= the value of bit b in values[i], and if bit b in
values[j] == bit b in values[i], and j < i, then after the partition, the
two elements will be in the same relative order (i.e., it is a stable sort).
Each thread is responsible for repositioning a single element of the array.
*******************************************************************************/
__device__ void partition_by_bit(unsigned int *values, unsigned int bit)
{
unsigned int i = threadIdx.x;
unsigned int size = blockDim.x;
unsigned int x_i = values[i]; // value of integer at position i
unsigned int p_i = (x_i >> bit) & 1; // value of bit at position bit
// Replace values array so that values[i] is the value of bit bit in
// element i.
values[i] = p_i;
// Wait for all threads to finish this.
__syncthreads();
// Now the values array consists of 0's and 1's, such that values[i] = 0
// if the bit at position bit in element i was 0 and 1 otherwise.
// Compute number of True bits (1-bits) up to and including values[i],
// transforming values[] so that values[i] contains the sum of the 1-bits
// from values[0] .. values[i]
unsigned int T_before = plus_scan(values);
/*
plus_scan(values) returns the total number of 1-bits for all j such that
j <= i. This is assigned to T_before, the number of 1-bits before i
(includes i itself)
*/
// The plus_scan() function does not return here until all threads have
// reached the __syncthreads() call in the last iteration of its loop
// Therefore, when it does return, we know that the entire array has had
// the prefix sums computed, and that values[size-1] is the sum of all
// elements in the array, which happens to be the number of 1-bits in
// the current bit position.
unsigned int T_total = values[size-1];
// T_total, after the scan, is the total number of 1-bits in the entire array.
unsigned int F_total = size - T_total;
/*
F_total is the total size of the array less the number of 1-bits and hence
is the number of 0-bits.
*/
__syncthreads();
/*
The value x_i must now be put back into the values array in the correct
position. The array has to satisfy the condition that all values with a 0 in
the current bit position must precede all those with a 1 in that position
and it must be stable, meaning that if x_j and x_k both had the same bit
value before, and j < k, then x_j must precede x_k after sorting.
Therefore, if x_i had a 1 in the current bit position before, it must now
be in the position such that all x_j that had a 0 precede it, and all x_j
that had a 1 in that bit and for which j < i, must precede it. Therefore
if x_i had a 1, it must go into the index T_before-1 + F_total, which is the
sum of the 0-bits and 1-bits that preceded it before (subtracting 1 since
T_before includes x_i itself).
If x_i has a 0 in the current bit position, then it has to be "slid" down
in the array before all x_j such that x_j has a 1 in the current bit, but
no farther than that. Since there are T_before such j, it has to go to
position i - T_before. (There are T_before such j because x_i had a zero,
so in the prefix sum, it does not contribute to the sum.)
*/
if ( p_i )
values[T_before-1 + F_total] = x_i;
else
values[i - T_before] = x_i;
/*
The interesting thing is that no two values will be placed in the same
position. I.e., this is a permutation of the array.
Proof: Suppose that x_i and x_j both end up in index k. There are three
cases:
Case 1. x_i and x_j have a 1 in the current bit position
Since F_total is the same for all threads, this implies that T_before must
be the same for threads i and j. But this is not possible because one must
precede the other and therefore the one that precedes it must have smaller
T_before.
Case 2. x_i and x_j both have a 0 in the current bit position.
Since they both are in k, we have
k = i - T_bef_i = j - T_Bef_j or
i - j = T_bef_i - T_bef_j
Assume i > j without loss of generality. This implies that the number of
1-bits from position j+1 to position i-1 (since both x_j and x_i have
0-bits) is i-j. But that is impossible since there are only i-j-2 positions
from j+1 to i-1.
Case 3. x_i and x_j have different bit values.
Assume without loss of generality that x_j has the 0-bit and x_i, the 1-bit.
T_before_j is the number of 1 bits in positions strictly less than j,
because there is a 0 in position j. The total number of positions less than
j is j, since the array is 0-based. Therefore:
j-T_before_j is the number of 0-bits in positions strictly less than j.
This must be strictly less than F_total, since x_j has a 0 in position j,
so there is at least one more 0 besides those below position j. Hence:
(1) F_total > j - T_before_j
Turning to i, T_before_i is at least 1, since x_i has a 1 in its bit. So,
T_before_i - 1 is at least 0, and
(2) T_before_i - 1 + F_total >= F_total.
Therefore, combining (1) and (2)
(3) T_before_i - 1 + F_total >= F_total
> j - T_before_j
But if x_i and x_j map to the same position, then
(4) j - T_before_j = T_before_i - 1 + F_total
> j - T_before_j
which is a contradiction since a number cannot be greater than itself!
Therefore it is impossible for x_i and x_j to be placed in the same index
if i != j.
*/
}
| 56ad0685323b79c6f616336f92abf22b103559ad.cu | /*
CSci 360 Computer Architecture 3
Hunter College of the City University of New York
Prof. Stewart Weiss
CUDA-based Parallel Radix Sort
For complete details and an article about other approaches, see
http://http.developer.nvidia.com/GPUGems3/gpugems3_ch39.html
This algorithm is due to
Mark Harris
NVIDIA Corporation
Shubhabrata Sengupta
University of California, Davis
John D. Owens
University of California, Davis
I have rewritten parts of it to make it clearer, and provided a proof
of correctness for the partition step.
*/
// Forward declaration of partition_by_bit(), called by radix_sort()
__device__ void partition_by_bit(unsigned int *values, unsigned int bit);
/*******************************************************************************
RADIX_SORT()
For each bit position from the least significant to the most significant,
partition the elements so that all elements with a 0 in that bit position
precede those with a 1 in that position, using a stable sort.
When all bits have been so processed, the array is sorted.
Reminder -- a sort is stable if the sort preserves the relative order of
equal elements.
Because this is a device function (executed by each thread concurrently),
after each partitioning step, the threads must execute __syncthreads() so
that the array is guaranteed to be ready for the next step.
*******************************************************************************/
__device__ void radix_sort(unsigned int *values)
{
int bit;
for( bit = 0; bit < 32; ++bit )
{
partition_by_bit(values, bit);
__syncthreads();
}
}
/*******************************************************************************
PLUS_SCAN()
plus_scan(a[]), where a[] is an array of integers, replaces a[] by the prefix
sums of the elements of a. The prefix sum of an element in an array (or more
generally, any sequence) is the sum of all elements up to and including that
element. The sum operation can be replaced by any binary associative operator,
such as multiplication.
A thread with ID i that calls plus_scan(a) gets as its return value the new
element in a[i]. All threads together collectively replace the elements of
a[].
Example:
A = 3 1 7 0 4 1 6 3
Successive iterations yield
offset = 1 A = 3 4 8 7 4 5 7 9
offset = 2 A = 3 4 11 11 12 12 11 14
offset = 4 A = 3 4 11 11 15 16 22 25
When it is finished it will have taken log N steps and used N log N adds.
(This means that it is not work-efficient, since the sequential algorithm
uses N adds.)
*******************************************************************************/
template<class T>
__device__ T plus_scan(T *x)
{
unsigned int i = threadIdx.x; // id of thread executing this instance
unsigned int n = blockDim.x; // total number of threads in this block
unsigned int offset; // distance between elements to be added
for( offset = 1; offset < n; offset *= 2) {
T t;
if ( i >= offset )
t = x[i-offset];
__syncthreads();
if ( i >= offset )
x[i] = t + x[i]; // i.e., x[i] = x[i] + x[i-1]
__syncthreads();
}
return x[i];
}
/*******************************************************************************
partition_by_bit()
This function is executed by every thread. Given an array of non-negative
integer values, and a bit position, b, this partitions the array such that
for all values[i], i = 0,...,n-1, the value of bit b in each element
values[k] for k < i is <= the value of bit b in values[i], and if bit b in
values[j] == bit b in values[i], and j < i, then after the partition, the
two elements will be in the same relative order (i.e., it is a stable sort).
Each thread is responsible for repositioning a single element of the array.
*******************************************************************************/
__device__ void partition_by_bit(unsigned int *values, unsigned int bit)
{
unsigned int i = threadIdx.x;
unsigned int size = blockDim.x;
unsigned int x_i = values[i]; // value of integer at position i
unsigned int p_i = (x_i >> bit) & 1; // value of bit at position bit
// Replace values array so that values[i] is the value of bit bit in
// element i.
values[i] = p_i;
// Wait for all threads to finish this.
__syncthreads();
// Now the values array consists of 0's and 1's, such that values[i] = 0
// if the bit at position bit in element i was 0 and 1 otherwise.
// Compute number of True bits (1-bits) up to and including values[i],
// transforming values[] so that values[i] contains the sum of the 1-bits
// from values[0] .. values[i]
unsigned int T_before = plus_scan(values);
/*
plus_scan(values) returns the total number of 1-bits for all j such that
j <= i. This is assigned to T_before, the number of 1-bits before i
(includes i itself)
*/
// The plus_scan() function does not return here until all threads have
// reached the __syncthreads() call in the last iteration of its loop
// Therefore, when it does return, we know that the entire array has had
// the prefix sums computed, and that values[size-1] is the sum of all
// elements in the array, which happens to be the number of 1-bits in
// the current bit position.
unsigned int T_total = values[size-1];
// T_total, after the scan, is the total number of 1-bits in the entire array.
unsigned int F_total = size - T_total;
/*
F_total is the total size of the array less the number of 1-bits and hence
is the number of 0-bits.
*/
__syncthreads();
/*
The value x_i must now be put back into the values array in the correct
position. The array has to satisfy the condition that all values with a 0 in
the current bit position must precede all those with a 1 in that position
and it must be stable, meaning that if x_j and x_k both had the same bit
value before, and j < k, then x_j must precede x_k after sorting.
Therefore, if x_i had a 1 in the current bit position before, it must now
be in the position such that all x_j that had a 0 precede it, and all x_j
that had a 1 in that bit and for which j < i, must precede it. Therefore
if x_i had a 1, it must go into the index T_before-1 + F_total, which is the
sum of the 0-bits and 1-bits that preceded it before (subtracting 1 since
T_before includes x_i itself).
If x_i has a 0 in the current bit position, then it has to be "slid" down
in the array before all x_j such that x_j has a 1 in the current bit, but
no farther than that. Since there are T_before such j, it has to go to
position i - T_before. (There are T_before such j because x_i had a zero,
so in the prefix sum, it does not contribute to the sum.)
*/
if ( p_i )
values[T_before-1 + F_total] = x_i;
else
values[i - T_before] = x_i;
/*
The interesting thing is that no two values will be placed in the same
position. I.e., this is a permutation of the array.
Proof: Suppose that x_i and x_j both end up in index k. There are three
cases:
Case 1. x_i and x_j have a 1 in the current bit position
Since F_total is the same for all threads, this implies that T_before must
be the same for threads i and j. But this is not possible because one must
precede the other and therefore the one that precedes it must have smaller
T_before.
Case 2. x_i and x_j both have a 0 in the current bit position.
Since they both are in k, we have
k = i - T_bef_i = j - T_Bef_j or
i - j = T_bef_i - T_bef_j
Assume i > j without loss of generality. This implies that the number of
1-bits from position j+1 to position i-1 (since both x_j and x_i have
0-bits) is i-j. But that is impossible since there are only i-j-2 positions
from j+1 to i-1.
Case 3. x_i and x_j have different bit values.
Assume without loss of generality that x_j has the 0-bit and x_i, the 1-bit.
T_before_j is the number of 1 bits in positions strictly less than j,
because there is a 0 in position j. The total number of positions less than
j is j, since the array is 0-based. Therefore:
j-T_before_j is the number of 0-bits in positions strictly less than j.
This must be strictly less than F_total, since x_j has a 0 in position j,
so there is at least one more 0 besides those below position j. Hence:
(1) F_total > j - T_before_j
Turning to i, T_before_i is at least 1, since x_i has a 1 in its bit. So,
T_before_i - 1 is at least 0, and
(2) T_before_i - 1 + F_total >= F_total.
Therefore, combining (1) and (2)
(3) T_before_i - 1 + F_total >= F_total
> j - T_before_j
But if x_i and x_j map to the same position, then
(4) j - T_before_j = T_before_i - 1 + F_total
> j - T_before_j
which is a contradiction since a number cannot be greater than itself!
Therefore it is impossible for x_i and x_j to be placed in the same index
if i != j.
*/
}
|
7579f19ad18624ab43dc745788aeb60e5a09bd03.hip | // !!! This is a file automatically generated by hipify!!!
/*
* lcudamorph.c
*
* Created on: Feb 6, 2010
* Author: henmak
* Modified by: vicdan-8 November 2011
*/
#include <hip/hip_runtime.h>
#include <memory>
#include <iostream>
#include <cassert>
#include <stdio.h>
#include <string.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include "morphology.cuh"
#include "sharedmem.cuh"
#define PRINT_ON
#ifndef PRINTF
# ifndef PRINT_ON
# define PRINTF(...) ((void)0)
# else
# define PRINTF(fmt,...) (printf(fmt, ## __VA_ARGS__))
# endif
#endif
// 1.0f uint = 1065353216
#define FLOAT_MAX 1
enum vhgwDirection {
HORIZONTAL,
VERTICAL
};
enum morphOperation {
ERODE,
DILATE
};
template<class dataType, morphOperation MOP>
__device__ inline dataType minMax(const dataType a, const dataType b) {
return (MOP == ERODE) ? min(a,b) : max(a,b);
}
// Simple macro for minMax
#define MINMAX(op,dst, newVal) dst = minMax<dataType, op>((dataType)dst, (dataType)newVal);
texture<float, 2, hipReadModeElementType> srcTex;
/*
* Generally one does not include .cu files like this, but here it allows the templated functions to be called,
* without having to construct wrappers for all the various combinations of dataType/morphOp.
*
* Note: Drawback of this is that morphology.cu has to be updated for changes in the below includes to be reflected in the object file.
*/
// 3x3
#include "cu/lcuda3x3.cu"
// vHGW (1D SE)
#include "cu/lcudavhgw.cu"
// Generic
#include "cu/lcudaGenericKernel.cu"
template <class dataType, morphOperation MOP>
static void _dispatchMorphOp(const dataType* pSrc, Npp32s nSrcStep, dataType* pDst, Npp32s nDstStep, NppiSize srcROI, const Npp8u * pMask,
const float* maskHeight, NppiSize maskSize, NppiPoint anchor, NppiSize borderSize, char isFlat, int seBinary) {
int offsetX = (borderSize.width)/4 - (maskSize.width)/2;
int offsetY = (borderSize.height)/4 - (maskSize.height)/2;
// Steps are specified in bytes. Pointer aritmetic below requires we compensate for size of <dataType>.
nSrcStep /= sizeof(dataType);
nDstStep /= sizeof(dataType);
int srcBorderOffset = (nSrcStep * offsetY + offsetX);
char processed = 0;
if (isFlat) {
// Custom fast 3x3
if (maskSize.height == 3 && maskSize.width == 3) {
_global3x3<dataType, MOP>(pSrc + srcBorderOffset, nSrcStep, pDst, nDstStep, srcROI, pMask, seBinary);
processed = 1;
}
// Vertical vHGW
else if (maskSize.width == 1) {
if (MOP == ERODE) {
PRINTF("Vertical Erosion: SE Size (%dx%d)\n", maskSize.width, maskSize.height);
PRINTF("Erosion: Offset (%d,%d)\n", offsetX, offsetY);
}
else {
PRINTF("Vertical Dilate: SE Size (%dx%d)\n", maskSize.width, maskSize.height);
PRINTF("Dilate: Offset (%d,%d)\n", offsetX, offsetY);
}
_globalVHGW<dataType, MOP, VERTICAL>(pSrc + srcBorderOffset, nSrcStep, pDst, nDstStep, srcROI, maskSize.height, borderSize);
processed = 1;
}
// Horizontal vHGW
else if (maskSize.height == 1) {
if (MOP == ERODE) {
PRINTF("Horizontal Erosion: SE Size (%dx%d)\n", maskSize.width, maskSize.height);
PRINTF("Erosion: Offset (%d,%d)\n", offsetX, offsetY);
}
else {
PRINTF("Horizontal Dilate: SE Size (%dx%d)\n", maskSize.width, maskSize.height);
PRINTF("Dilate: Offset (%d,%d)\n", offsetX, offsetY);
}
_globalVHGW<dataType, MOP, HORIZONTAL>(pSrc + srcBorderOffset, nSrcStep, pDst, nDstStep, srcROI, maskSize.width, borderSize);
processed = 1;
}
}
// Non-flat and other arbitrary SE
if (!processed) {
PRINTF("Generic!\n");
_globalGeneric<dataType, MOP>(pSrc + srcBorderOffset, nSrcStep, pDst, nDstStep, srcROI, pMask, maskSize, maskHeight, borderSize, anchor);
}
// Block until async kernels calls have been executed.
hipDeviceSynchronize();
}
/*
* Public functions to perform erosion or dilation.
*/
//void performDilation(const lcudaFloat * pSrc, Npp32s nSrcStep, lcudaFloat * pDst, Npp32s nDstStep, NppiSize srcROI,
// const Npp8u * pMask, const float* maskHeight, NppiSize maskSize, NppiPoint anchor,
// NppiSize borderSize, char isFlat, int seBinary) {
// _dispatchMorphOp<lcudaFloat, DILATE>(pSrc, nSrcStep, pDst, nDstStep, srcROI, pMask, maskHeight, maskSize, anchor, borderSize, isFlat, seBinary);
//}
//void performErosion(const lcudaFloat * pSrc, Npp32s nSrcStep, lcudaFloat * pDst, Npp32s nDstStep, NppiSize srcROI,
// const Npp8u * pMask, const float* maskHeight, NppiSize maskSize, NppiPoint anchor,
// NppiSize borderSize, char isFlat, int seBinary) {
// _dispatchMorphOp<lcudaFloat, ERODE>(pSrc, nSrcStep, pDst, nDstStep, srcROI, pMask, maskHeight, maskSize, anchor, borderSize, isFlat, seBinary);
//}
EXTERN void performErosion_8u(const Npp8u * pSrc, Npp32s nSrcStep, Npp8u * pDst, Npp32s nDstStep, NppiSize srcROI,
const Npp8u * pMask, const float * maskHeight, NppiSize maskSize, NppiPoint anchor, NppiSize borderSize, char isFlat, int seBinary) {
_dispatchMorphOp<lcuda8u, ERODE>(pSrc, nSrcStep, pDst, nDstStep, srcROI, pMask, maskHeight, maskSize, anchor, borderSize, isFlat, seBinary);
}
EXTERN void performDilation_8u(const Npp8u * pSrc, Npp32s nSrcStep, Npp8u * pDst, Npp32s nDstStep, NppiSize srcROI,
const Npp8u * pMask, const float * maskHeight, NppiSize maskSize, NppiPoint anchor, NppiSize borderSize, char isFlat, int seBinary) {
_dispatchMorphOp<lcuda8u, DILATE>(pSrc, nSrcStep, pDst, nDstStep, srcROI, pMask, maskHeight, maskSize, anchor, borderSize, isFlat, seBinary);
}
template <class matrixType, class dataType>
static void _lcudaCopyBorder(matrixType src, matrixType dst, int color, int offsetX, int offsetY) {
PRINTF("SRC: %d %d\n", src.width, src.height);
PRINTF("DST: %d %d\n", dst.width, dst.height);
PRINTF("Offsets x %d, y %d\n", offsetX, offsetY);
int realPitch = dst.pitch / sizeof(dataType);
thrust::device_ptr<dataType> dev_ptr(dst.data);
thrust::fill(dev_ptr, dev_ptr + (dst.height-1)*realPitch + dst.width, color);
dataType *data = dst.data + offsetY * realPitch + offsetX;
hipMemcpy2D(data, dst.pitch, src.data, src.pitch, src.width*sizeof(dataType), src.height, hipMemcpyDeviceToDevice);
}
void lcudaCopyBorder(lcudaMatrix src, lcudaMatrix dst, int color, int offsetX, int offsetY) {
_lcudaCopyBorder<lcudaMatrix, lcudaFloat>(src, dst, color, offsetX, offsetY);
}
EXTERN void lcudaCopyBorder_8u(lcudaMatrix_8u src, lcudaMatrix_8u dst, int color, int offsetX, int offsetY) {
_lcudaCopyBorder<lcudaMatrix_8u, lcuda8u>(src, dst, color, offsetX, offsetY);
}
| 7579f19ad18624ab43dc745788aeb60e5a09bd03.cu | /*
* lcudamorph.c
*
* Created on: Feb 6, 2010
* Author: henmak
* Modified by: vicdan-8 November 2011
*/
#include <cuda.h>
#include <memory>
#include <iostream>
#include <cassert>
#include <stdio.h>
#include <string.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include "morphology.cuh"
#include "sharedmem.cuh"
#define PRINT_ON
#ifndef PRINTF
# ifndef PRINT_ON
# define PRINTF(...) ((void)0)
# else
# define PRINTF(fmt,...) (printf(fmt, ## __VA_ARGS__))
# endif
#endif
// 1.0f uint = 1065353216
#define FLOAT_MAX 1
enum vhgwDirection {
HORIZONTAL,
VERTICAL
};
enum morphOperation {
ERODE,
DILATE
};
template<class dataType, morphOperation MOP>
__device__ inline dataType minMax(const dataType a, const dataType b) {
return (MOP == ERODE) ? min(a,b) : max(a,b);
}
// Simple macro for minMax
#define MINMAX(op,dst, newVal) dst = minMax<dataType, op>((dataType)dst, (dataType)newVal);
texture<float, 2, cudaReadModeElementType> srcTex;
/*
* Generally one does not include .cu files like this, but here it allows the templated functions to be called,
* without having to construct wrappers for all the various combinations of dataType/morphOp.
*
* Note: Drawback of this is that morphology.cu has to be updated for changes in the below includes to be reflected in the object file.
*/
// 3x3
#include "cu/lcuda3x3.cu"
// vHGW (1D SE)
#include "cu/lcudavhgw.cu"
// Generic
#include "cu/lcudaGenericKernel.cu"
template <class dataType, morphOperation MOP>
static void _dispatchMorphOp(const dataType* pSrc, Npp32s nSrcStep, dataType* pDst, Npp32s nDstStep, NppiSize srcROI, const Npp8u * pMask,
const float* maskHeight, NppiSize maskSize, NppiPoint anchor, NppiSize borderSize, char isFlat, int seBinary) {
int offsetX = (borderSize.width)/4 - (maskSize.width)/2;
int offsetY = (borderSize.height)/4 - (maskSize.height)/2;
// Steps are specified in bytes. Pointer aritmetic below requires we compensate for size of <dataType>.
nSrcStep /= sizeof(dataType);
nDstStep /= sizeof(dataType);
int srcBorderOffset = (nSrcStep * offsetY + offsetX);
char processed = 0;
if (isFlat) {
// Custom fast 3x3
if (maskSize.height == 3 && maskSize.width == 3) {
_global3x3<dataType, MOP>(pSrc + srcBorderOffset, nSrcStep, pDst, nDstStep, srcROI, pMask, seBinary);
processed = 1;
}
// Vertical vHGW
else if (maskSize.width == 1) {
if (MOP == ERODE) {
PRINTF("Vertical Erosion: SE Size (%dx%d)\n", maskSize.width, maskSize.height);
PRINTF("Erosion: Offset (%d,%d)\n", offsetX, offsetY);
}
else {
PRINTF("Vertical Dilate: SE Size (%dx%d)\n", maskSize.width, maskSize.height);
PRINTF("Dilate: Offset (%d,%d)\n", offsetX, offsetY);
}
_globalVHGW<dataType, MOP, VERTICAL>(pSrc + srcBorderOffset, nSrcStep, pDst, nDstStep, srcROI, maskSize.height, borderSize);
processed = 1;
}
// Horizontal vHGW
else if (maskSize.height == 1) {
if (MOP == ERODE) {
PRINTF("Horizontal Erosion: SE Size (%dx%d)\n", maskSize.width, maskSize.height);
PRINTF("Erosion: Offset (%d,%d)\n", offsetX, offsetY);
}
else {
PRINTF("Horizontal Dilate: SE Size (%dx%d)\n", maskSize.width, maskSize.height);
PRINTF("Dilate: Offset (%d,%d)\n", offsetX, offsetY);
}
_globalVHGW<dataType, MOP, HORIZONTAL>(pSrc + srcBorderOffset, nSrcStep, pDst, nDstStep, srcROI, maskSize.width, borderSize);
processed = 1;
}
}
// Non-flat and other arbitrary SE
if (!processed) {
PRINTF("Generic!\n");
_globalGeneric<dataType, MOP>(pSrc + srcBorderOffset, nSrcStep, pDst, nDstStep, srcROI, pMask, maskSize, maskHeight, borderSize, anchor);
}
// Block until async kernels calls have been executed.
cudaThreadSynchronize();
}
/*
* Public functions to perform erosion or dilation.
*/
//void performDilation(const lcudaFloat * pSrc, Npp32s nSrcStep, lcudaFloat * pDst, Npp32s nDstStep, NppiSize srcROI,
// const Npp8u * pMask, const float* maskHeight, NppiSize maskSize, NppiPoint anchor,
// NppiSize borderSize, char isFlat, int seBinary) {
// _dispatchMorphOp<lcudaFloat, DILATE>(pSrc, nSrcStep, pDst, nDstStep, srcROI, pMask, maskHeight, maskSize, anchor, borderSize, isFlat, seBinary);
//}
//void performErosion(const lcudaFloat * pSrc, Npp32s nSrcStep, lcudaFloat * pDst, Npp32s nDstStep, NppiSize srcROI,
// const Npp8u * pMask, const float* maskHeight, NppiSize maskSize, NppiPoint anchor,
// NppiSize borderSize, char isFlat, int seBinary) {
// _dispatchMorphOp<lcudaFloat, ERODE>(pSrc, nSrcStep, pDst, nDstStep, srcROI, pMask, maskHeight, maskSize, anchor, borderSize, isFlat, seBinary);
//}
EXTERN void performErosion_8u(const Npp8u * pSrc, Npp32s nSrcStep, Npp8u * pDst, Npp32s nDstStep, NppiSize srcROI,
const Npp8u * pMask, const float * maskHeight, NppiSize maskSize, NppiPoint anchor, NppiSize borderSize, char isFlat, int seBinary) {
_dispatchMorphOp<lcuda8u, ERODE>(pSrc, nSrcStep, pDst, nDstStep, srcROI, pMask, maskHeight, maskSize, anchor, borderSize, isFlat, seBinary);
}
EXTERN void performDilation_8u(const Npp8u * pSrc, Npp32s nSrcStep, Npp8u * pDst, Npp32s nDstStep, NppiSize srcROI,
const Npp8u * pMask, const float * maskHeight, NppiSize maskSize, NppiPoint anchor, NppiSize borderSize, char isFlat, int seBinary) {
_dispatchMorphOp<lcuda8u, DILATE>(pSrc, nSrcStep, pDst, nDstStep, srcROI, pMask, maskHeight, maskSize, anchor, borderSize, isFlat, seBinary);
}
template <class matrixType, class dataType>
static void _lcudaCopyBorder(matrixType src, matrixType dst, int color, int offsetX, int offsetY) {
PRINTF("SRC: %d %d\n", src.width, src.height);
PRINTF("DST: %d %d\n", dst.width, dst.height);
PRINTF("Offsets x %d, y %d\n", offsetX, offsetY);
int realPitch = dst.pitch / sizeof(dataType);
thrust::device_ptr<dataType> dev_ptr(dst.data);
thrust::fill(dev_ptr, dev_ptr + (dst.height-1)*realPitch + dst.width, color);
dataType *data = dst.data + offsetY * realPitch + offsetX;
cudaMemcpy2D(data, dst.pitch, src.data, src.pitch, src.width*sizeof(dataType), src.height, cudaMemcpyDeviceToDevice);
}
void lcudaCopyBorder(lcudaMatrix src, lcudaMatrix dst, int color, int offsetX, int offsetY) {
_lcudaCopyBorder<lcudaMatrix, lcudaFloat>(src, dst, color, offsetX, offsetY);
}
EXTERN void lcudaCopyBorder_8u(lcudaMatrix_8u src, lcudaMatrix_8u dst, int color, int offsetX, int offsetY) {
_lcudaCopyBorder<lcudaMatrix_8u, lcuda8u>(src, dst, color, offsetX, offsetY);
}
|
4272bf1982b35885d0d5bffb6699e175ac56452c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include <sys/time.h>
#include "radixsort.h"
#include "random.hpp"
#include "timer.h"
#include "bsearch_cuda.h"
int main(int argc, char *argv[]) {
if (argc < 4) {
fprintf(stderr, "usage: %s <D size> <Q size> <I/T Size>"
"<seed> <device>\n",
argv[0]);
return 1;
}
CUDA_SAFE_CALL( hipSetDevice( atoi(argv[5] ) ) );
//CUDA_SAFE_CALL( hipFree(NULL) );
int D_size = atoi(argv[1]);
int Q_size = atoi(argv[2]);
int I_size = atoi(argv[3]);
int T_size = I_size;
int seed = atoi(argv[4]);
hipError_t err;
//{{{ gen Q and D
RNG_rand48 D_r(seed);
D_r.generate(D_size);
unsigned int *D_d = (unsigned int *)D_r.get_random_numbers();
RNG_rand48 Q_r(seed);
Q_r.generate(Q_size);
unsigned int *Q_d = (unsigned int *)Q_r.get_random_numbers();
hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "rand errors: %s.\n", hipGetErrorString( err) );
//}}}
//{{{ sort D
start();
nvRadixSort::RadixSort sort_D_d(D_size, true);
sort_D_d.sort((unsigned int*)D_d, 0, D_size, 32);
hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "sort d: %s.\n", hipGetErrorString( err) );
stop();
unsigned long sort_d_time = report();
//}}}
unsigned int *D_h = (unsigned int *)malloc(
D_size * sizeof(unsigned int));
hipMemcpy(D_h, D_d, (D_size) * sizeof(unsigned int),
hipMemcpyDeviceToHost);
unsigned int *Q_h = (unsigned int *)malloc(
Q_size * sizeof(unsigned int));
hipMemcpy(Q_h, Q_d, (Q_size) * sizeof(unsigned int),
hipMemcpyDeviceToHost);
int block_size = 256;
dim3 dimBlock(block_size);
int grid_size = ( Q_size + block_size - 1) / (block_size * 1);
dim3 dimGrid( grid_size );
//{{{b_search
unsigned int *R_d;
hipMalloc((void **)&R_d, (Q_size)*sizeof(unsigned int));
start();
hipLaunchKernelGGL(( b_search) , dim3(dimGrid), dim3(dimBlock), 0, 0, D_d, D_size, Q_d, Q_size, R_d);
hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "b_search: %s.\n", hipGetErrorString( err) );
stop();
unsigned long search_noindex_1_time = report();
start();
hipLaunchKernelGGL(( b_search) , dim3(dimGrid), dim3(dimBlock), 0, 0, D_d, D_size, Q_d, Q_size, R_d);
hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "b_search: %s.\n", hipGetErrorString( err) );
stop();
unsigned long search_noindex_2_time = report();
unsigned int *BR_h = (unsigned int *)malloc( Q_size * sizeof(unsigned int));
hipMemcpy(BR_h, R_d, (Q_size) * sizeof(unsigned int),
hipMemcpyDeviceToHost);
hipFree(R_d);
//}}}
//{{{ index
int index_grid_size = ( I_size + block_size - 1) / (block_size * 1);
dim3 index_dimGrid( index_grid_size );
unsigned int *I_d;
hipMalloc((void **)&I_d, (I_size)*sizeof(unsigned int));
start();
hipLaunchKernelGGL(( gen_index) , dim3(index_dimGrid), dim3(dimBlock), 0, 0, D_d, D_size, I_d, I_size);
hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "index: %s.\n", hipGetErrorString( err) );
stop();
unsigned long index_time = report();
unsigned int *I_h = (unsigned int *)malloc(
I_size * sizeof(unsigned int));
hipMemcpy(I_h, I_d, (I_size) * sizeof(unsigned int),
hipMemcpyDeviceToHost);
//}}}
//{{{ i_gm_binary_search
unsigned int *IGR_d;
hipMalloc((void **)&IGR_d, (Q_size)*sizeof(unsigned int));
start();
hipLaunchKernelGGL(( i_gm_binary_search), dim3(dimGrid), dim3(dimBlock), 0, 0,
D_d, D_size, Q_d, Q_size, IGR_d, I_d, I_size);
hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "binary_search_gp 1: %s.\n", hipGetErrorString( err) );
stop();
unsigned long search_gmindex_1_time = report();
unsigned int *IGR_h = (unsigned int *)malloc(
Q_size * sizeof(unsigned int));
hipMemcpy(IGR_h, IGR_d, (Q_size) * sizeof(unsigned int),
hipMemcpyDeviceToHost);
hipFree(IGR_d);
//}}}
//{{{ i_sm_binary_search
unsigned int *ISR_d;
hipMalloc((void **)&ISR_d, (Q_size)*sizeof(unsigned int));
start();
hipLaunchKernelGGL(( i_sm_binary_search), dim3(dimGrid), dim3(dimBlock), I_size * sizeof(unsigned int) , 0,
D_d, D_size, Q_d, Q_size, ISR_d, I_d, I_size);
hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "binary_search_gp: %s.\n", hipGetErrorString( err) );
stop();
unsigned long search_smindex_1_time = report();
unsigned int *ISR_h = (unsigned int *)malloc(
Q_size * sizeof(unsigned int));
hipMemcpy(ISR_h, ISR_d, (Q_size) * sizeof(unsigned int),
hipMemcpyDeviceToHost);
hipFree(ISR_d);
//}}}
//{{{ tree
int tree_grid_size = ( T_size + block_size - 1) / (block_size * 1);
dim3 tree_dimGrid( tree_grid_size );
unsigned int *T_d;
hipMalloc((void **)&T_d, (T_size)*sizeof(unsigned int));
start();
hipLaunchKernelGGL(( gen_tree) , dim3(tree_dimGrid), dim3(dimBlock), 0, 0, D_d, D_size, T_d, T_size);
hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "tree: %s.\n", hipGetErrorString( err) );
stop();
unsigned long tree_time = report();
unsigned int *T_h = (unsigned int *)malloc(
T_size * sizeof(unsigned int));
hipMemcpy(T_h, T_d, (T_size) * sizeof(unsigned int),
hipMemcpyDeviceToHost);
//}}}
//{{{ t_gm_binary_search
unsigned int *TGR_d;
hipMalloc((void **)&TGR_d, (Q_size)*sizeof(unsigned int));
start();
hipLaunchKernelGGL(( t_gm_binary_search), dim3(dimGrid),
dim3( dimBlock), 0, 0,
D_d, D_size, Q_d, Q_size, TGR_d, T_d, T_size);
hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "binary_search_gp: %s.\n", hipGetErrorString( err) );
stop();
unsigned long search_gmtree_1_time = report();
/* DEBUG START */
/*
unsigned int *R_h = (unsigned int *) malloc(Q_size*sizeof(unsigned int));
hipMemcpy(R_h, R_d, (Q_size) * sizeof(unsigned int),
hipMemcpyDeviceToHost);
unsigned int *Q_h = (unsigned int *) malloc(Q_size*sizeof(unsigned int));
hipMemcpy(Q_h, Q_d, (Q_size) * sizeof(unsigned int),
hipMemcpyDeviceToHost);
unsigned int *T_h = (unsigned int *) malloc(T_size*sizeof(unsigned int));
hipMemcpy(T_h, T_d, (T_size) * sizeof(unsigned int),
hipMemcpyDeviceToHost);
int x;
for (x = 0; x < Q_size; x++)
printf("%d\t%u\t%u\n", x, Q_h[x], R_h[x]);
for (x = 0; x < T_size; x++)
printf("%d\t%u\n", x, T_h[x]);
*/
/* DEBUG END */
unsigned int *TGR_h = (unsigned int *)malloc(
Q_size * sizeof(unsigned int));
hipMemcpy(TGR_h, TGR_d, (Q_size) * sizeof(unsigned int),
hipMemcpyDeviceToHost);
hipFree(TGR_d);
//}}}
//{{{ t_gm_binary_search
unsigned int *TSR_d;
hipMalloc((void **)&TSR_d, (Q_size)*sizeof(unsigned int));
start();
hipLaunchKernelGGL(( t_sm_binary_search), dim3(dimGrid),
dim3( dimBlock),
T_size * sizeof(unsigned int) , 0,
D_d, D_size, Q_d, Q_size, TSR_d, T_d, T_size);
hipDeviceSynchronize();
err = hipGetLastError();
if(err != hipSuccess)
fprintf(stderr, "binary_search_gp: %s.\n", hipGetErrorString( err) );
stop();
unsigned long search_smtree_1_time = report();
unsigned int *TSR_h = (unsigned int *)malloc(
Q_size * sizeof(unsigned int));
hipMemcpy(TSR_h, TSR_d, (Q_size) * sizeof(unsigned int),
hipMemcpyDeviceToHost);
hipFree(TSR_d);
//}}}
int *I = (int *) malloc(I_size * sizeof(int));
int i;
for (i = 0; i < Q_size; i++) {
if (BR_h[i] != IGR_h[i])
printf(".");
else
printf("-");
if (BR_h[i] != ISR_h[i])
printf(".");
else
printf("-");
if (BR_h[i] != TGR_h[i])
printf(".");
else
printf("-");
if (BR_h[i] != TSR_h[i])
printf(".");
else
printf("-");
printf("\t");
/*
if ( (BR_h[i] != IGR_h[i]) ||
(BR_h[i] != ISR_h[i]) ||
(BR_h[i] != TGR_h[i]) ||
(BR_h[i] != TSR_h[i]) )
printf("-\t");
else
printf("+\t");
*/
printf( "%d\t"
"%u\t"
"q:%u\t"
"b:%u\t"
"ig:%u\t"
"is:%u\t"
"tg:%u\t"
"ts:%u\n",
i,
D_h[ BR_h[i] ],
Q_h[i],
BR_h[i],
IGR_h[i],
ISR_h[i],
TGR_h[i],
TSR_h[i]
);
/*
if ( D_h[ BR_h[i] ] == Q_h[i] )
printf( "=\t%d\t"
"d:%u\t"
"q:%u\t"
"b:%u\t"
"ig:%u\t"
"is:%u\t"
"tg:%u\t"
"ts:%u\n",
i,
D_h[ BR_h[i] ],
Q_h[i],
BR_h[i],
IGR_h[i],
ISR_h[i],
TGR_h[i],
TSR_h[i]
);
*/
}
/*
for (i = 0; i < I_size - 1; i++)
printf("I\t%d\t%d\t%u\n", i, _i_to_I(i,I_size,D_size),I_h[i]);
*/
return 0;
}
| 4272bf1982b35885d0d5bffb6699e175ac56452c.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cutil.h>
#include <sys/time.h>
#include "radixsort.h"
#include "random.hpp"
#include "timer.h"
#include "bsearch_cuda.h"
int main(int argc, char *argv[]) {
if (argc < 4) {
fprintf(stderr, "usage: %s <D size> <Q size> <I/T Size>"
"<seed> <device>\n",
argv[0]);
return 1;
}
CUDA_SAFE_CALL( cudaSetDevice( atoi(argv[5] ) ) );
//CUDA_SAFE_CALL( cudaFree(NULL) );
int D_size = atoi(argv[1]);
int Q_size = atoi(argv[2]);
int I_size = atoi(argv[3]);
int T_size = I_size;
int seed = atoi(argv[4]);
cudaError_t err;
//{{{ gen Q and D
RNG_rand48 D_r(seed);
D_r.generate(D_size);
unsigned int *D_d = (unsigned int *)D_r.get_random_numbers();
RNG_rand48 Q_r(seed);
Q_r.generate(Q_size);
unsigned int *Q_d = (unsigned int *)Q_r.get_random_numbers();
cudaThreadSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "rand errors: %s.\n", cudaGetErrorString( err) );
//}}}
//{{{ sort D
start();
nvRadixSort::RadixSort sort_D_d(D_size, true);
sort_D_d.sort((unsigned int*)D_d, 0, D_size, 32);
cudaThreadSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "sort d: %s.\n", cudaGetErrorString( err) );
stop();
unsigned long sort_d_time = report();
//}}}
unsigned int *D_h = (unsigned int *)malloc(
D_size * sizeof(unsigned int));
cudaMemcpy(D_h, D_d, (D_size) * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
unsigned int *Q_h = (unsigned int *)malloc(
Q_size * sizeof(unsigned int));
cudaMemcpy(Q_h, Q_d, (Q_size) * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
int block_size = 256;
dim3 dimBlock(block_size);
int grid_size = ( Q_size + block_size - 1) / (block_size * 1);
dim3 dimGrid( grid_size );
//{{{b_search
unsigned int *R_d;
cudaMalloc((void **)&R_d, (Q_size)*sizeof(unsigned int));
start();
b_search <<<dimGrid, dimBlock>>> (D_d, D_size, Q_d, Q_size, R_d);
cudaThreadSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "b_search: %s.\n", cudaGetErrorString( err) );
stop();
unsigned long search_noindex_1_time = report();
start();
b_search <<<dimGrid, dimBlock>>> (D_d, D_size, Q_d, Q_size, R_d);
cudaThreadSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "b_search: %s.\n", cudaGetErrorString( err) );
stop();
unsigned long search_noindex_2_time = report();
unsigned int *BR_h = (unsigned int *)malloc( Q_size * sizeof(unsigned int));
cudaMemcpy(BR_h, R_d, (Q_size) * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
cudaFree(R_d);
//}}}
//{{{ index
int index_grid_size = ( I_size + block_size - 1) / (block_size * 1);
dim3 index_dimGrid( index_grid_size );
unsigned int *I_d;
cudaMalloc((void **)&I_d, (I_size)*sizeof(unsigned int));
start();
gen_index <<<index_dimGrid, dimBlock>>> ( D_d, D_size, I_d, I_size);
cudaThreadSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "index: %s.\n", cudaGetErrorString( err) );
stop();
unsigned long index_time = report();
unsigned int *I_h = (unsigned int *)malloc(
I_size * sizeof(unsigned int));
cudaMemcpy(I_h, I_d, (I_size) * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
//}}}
//{{{ i_gm_binary_search
unsigned int *IGR_d;
cudaMalloc((void **)&IGR_d, (Q_size)*sizeof(unsigned int));
start();
i_gm_binary_search<<< dimGrid, dimBlock>>> (
D_d, D_size, Q_d, Q_size, IGR_d, I_d, I_size);
cudaThreadSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "binary_search_gp 1: %s.\n", cudaGetErrorString( err) );
stop();
unsigned long search_gmindex_1_time = report();
unsigned int *IGR_h = (unsigned int *)malloc(
Q_size * sizeof(unsigned int));
cudaMemcpy(IGR_h, IGR_d, (Q_size) * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
cudaFree(IGR_d);
//}}}
//{{{ i_sm_binary_search
unsigned int *ISR_d;
cudaMalloc((void **)&ISR_d, (Q_size)*sizeof(unsigned int));
start();
i_sm_binary_search<<< dimGrid, dimBlock, I_size * sizeof(unsigned int) >>> (
D_d, D_size, Q_d, Q_size, ISR_d, I_d, I_size);
cudaThreadSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "binary_search_gp: %s.\n", cudaGetErrorString( err) );
stop();
unsigned long search_smindex_1_time = report();
unsigned int *ISR_h = (unsigned int *)malloc(
Q_size * sizeof(unsigned int));
cudaMemcpy(ISR_h, ISR_d, (Q_size) * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
cudaFree(ISR_d);
//}}}
//{{{ tree
int tree_grid_size = ( T_size + block_size - 1) / (block_size * 1);
dim3 tree_dimGrid( tree_grid_size );
unsigned int *T_d;
cudaMalloc((void **)&T_d, (T_size)*sizeof(unsigned int));
start();
gen_tree <<<tree_dimGrid, dimBlock>>> ( D_d, D_size, T_d, T_size);
cudaThreadSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "tree: %s.\n", cudaGetErrorString( err) );
stop();
unsigned long tree_time = report();
unsigned int *T_h = (unsigned int *)malloc(
T_size * sizeof(unsigned int));
cudaMemcpy(T_h, T_d, (T_size) * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
//}}}
//{{{ t_gm_binary_search
unsigned int *TGR_d;
cudaMalloc((void **)&TGR_d, (Q_size)*sizeof(unsigned int));
start();
t_gm_binary_search<<< dimGrid,
dimBlock>>> (
D_d, D_size, Q_d, Q_size, TGR_d, T_d, T_size);
cudaThreadSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "binary_search_gp: %s.\n", cudaGetErrorString( err) );
stop();
unsigned long search_gmtree_1_time = report();
/* DEBUG START */
/*
unsigned int *R_h = (unsigned int *) malloc(Q_size*sizeof(unsigned int));
cudaMemcpy(R_h, R_d, (Q_size) * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
unsigned int *Q_h = (unsigned int *) malloc(Q_size*sizeof(unsigned int));
cudaMemcpy(Q_h, Q_d, (Q_size) * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
unsigned int *T_h = (unsigned int *) malloc(T_size*sizeof(unsigned int));
cudaMemcpy(T_h, T_d, (T_size) * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
int x;
for (x = 0; x < Q_size; x++)
printf("%d\t%u\t%u\n", x, Q_h[x], R_h[x]);
for (x = 0; x < T_size; x++)
printf("%d\t%u\n", x, T_h[x]);
*/
/* DEBUG END */
unsigned int *TGR_h = (unsigned int *)malloc(
Q_size * sizeof(unsigned int));
cudaMemcpy(TGR_h, TGR_d, (Q_size) * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
cudaFree(TGR_d);
//}}}
//{{{ t_gm_binary_search
unsigned int *TSR_d;
cudaMalloc((void **)&TSR_d, (Q_size)*sizeof(unsigned int));
start();
t_sm_binary_search<<< dimGrid,
dimBlock,
T_size * sizeof(unsigned int) >>> (
D_d, D_size, Q_d, Q_size, TSR_d, T_d, T_size);
cudaThreadSynchronize();
err = cudaGetLastError();
if(err != cudaSuccess)
fprintf(stderr, "binary_search_gp: %s.\n", cudaGetErrorString( err) );
stop();
unsigned long search_smtree_1_time = report();
unsigned int *TSR_h = (unsigned int *)malloc(
Q_size * sizeof(unsigned int));
cudaMemcpy(TSR_h, TSR_d, (Q_size) * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
cudaFree(TSR_d);
//}}}
int *I = (int *) malloc(I_size * sizeof(int));
int i;
for (i = 0; i < Q_size; i++) {
if (BR_h[i] != IGR_h[i])
printf(".");
else
printf("-");
if (BR_h[i] != ISR_h[i])
printf(".");
else
printf("-");
if (BR_h[i] != TGR_h[i])
printf(".");
else
printf("-");
if (BR_h[i] != TSR_h[i])
printf(".");
else
printf("-");
printf("\t");
/*
if ( (BR_h[i] != IGR_h[i]) ||
(BR_h[i] != ISR_h[i]) ||
(BR_h[i] != TGR_h[i]) ||
(BR_h[i] != TSR_h[i]) )
printf("-\t");
else
printf("+\t");
*/
printf( "%d\t"
"%u\t"
"q:%u\t"
"b:%u\t"
"ig:%u\t"
"is:%u\t"
"tg:%u\t"
"ts:%u\n",
i,
D_h[ BR_h[i] ],
Q_h[i],
BR_h[i],
IGR_h[i],
ISR_h[i],
TGR_h[i],
TSR_h[i]
);
/*
if ( D_h[ BR_h[i] ] == Q_h[i] )
printf( "=\t%d\t"
"d:%u\t"
"q:%u\t"
"b:%u\t"
"ig:%u\t"
"is:%u\t"
"tg:%u\t"
"ts:%u\n",
i,
D_h[ BR_h[i] ],
Q_h[i],
BR_h[i],
IGR_h[i],
ISR_h[i],
TGR_h[i],
TSR_h[i]
);
*/
}
/*
for (i = 0; i < I_size - 1; i++)
printf("I\t%d\t%d\t%u\n", i, _i_to_I(i,I_size,D_size),I_h[i]);
*/
return 0;
}
|
65d34fe185036ff0bf2e29587f072c49ec71bf6e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/fake_quantization.h>
#include <NDArrayFactory.h>
namespace nd4j {
namespace ops {
namespace helpers {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// fakeQuantWithMinMaxVars_
// input - input tensor
// min - min scalar tensor
// max - max scalar tensor
// numBits - (default 16bit)
// narrowed - shrink is true
// output - output tensor
//
template <typename T>
static __host__ __device__ void
nudge(T min, T max, int quantMin, int quantMax, T* scale, T* nudgedMin, T* nudgedMax) {
T quantMaxF = static_cast<T>(quantMax);
T quantMinF = static_cast<T>(quantMin);
*scale = (max - min) / (quantMaxF - quantMinF);
auto zeroPointFromMin = quantMinF - min / *scale;
uint16_t const nudgedZeroPoint = [zeroPointFromMin, quantMin, quantMax, quantMaxF, quantMinF] {
if (zeroPointFromMin < quantMinF) {
return static_cast<uint16_t>(quantMin);
}
if (zeroPointFromMin > quantMaxF) {
return static_cast<uint16_t>(quantMax);
}
return nd4j::math::nd4j_round<T,uint16_t>(zeroPointFromMin);
}();
*nudgedMax = (quantMaxF - static_cast<T>(nudgedZeroPoint)) * (*scale);
*nudgedMin = (quantMinF - static_cast<T>(nudgedZeroPoint)) * (*scale);
}
template <typename T>
void fakeQuantWithMinMaxVars_(NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) {
int lowIntBound = narrowed?1:0;
int upperIntBound = (1 << numBits) - 1;
min->syncToHost(); // these are scalars, so nothing much happened
max->syncToHost();
T scale, nudgedMin, nudgedMax;
nudge(min->t<T>(0), max->t<T>(0), lowIntBound, upperIntBound, &scale, &nudgedMin, &nudgedMax);
auto wiseMinMaxAndSoOn = LAMBDA_T(x, nudgedMin, nudgedMax, scale) {
T val = x;
if (x < nudgedMin) {
val = nudgedMin;
}
else if (x > nudgedMax) {
val = nudgedMax;
}
else
val = x;
return (math::nd4j_floor<T,T>((val - nudgedMin) / scale + T(0.5)) * scale + nudgedMin);
};
input->applyLambda(wiseMinMaxAndSoOn, *output);
}
template <typename T>
static __global__ void fakeQuantWithMinMaxKernel(T* input, Nd4jLong* inputShape, T* min, T* max,
int lowIntBound, int upperIntBound, Nd4jLong channels,
T* output, Nd4jLong* outputShape, Nd4jLong length) {
__shared__ int block;
if (threadIdx.x == 0) {
block = length / channels; // to loop with last dimension as block
}
__syncthreads();
for (auto i = blockIdx.x; i < (int)channels; i += gridDim.x) {
T scale, nudgedMin, nudgedMax;
nudge(min[i], max[i], lowIntBound, upperIntBound, &scale, &nudgedMin, &nudgedMax);
// loop over blocks to quantization between nudged min and max
for (auto b = threadIdx.x; b < block; b += blockDim.x) {
T val = input[shape::getIndexOffset(b * channels + i, inputShape)];
if (val < nudgedMin) {
val = nudgedMin;
} else if (val > nudgedMax) {
val = nudgedMax;
}
output[shape::getIndexOffset(b * channels + i, outputShape)] =
(math::nd4j_floor<T, T>((val - nudgedMin) / scale + T(0.5f)) * scale + nudgedMin);
};
}
}
template <typename T>
void fakeQuantWithMinMaxVarsPerChannel_(LaunchContext* context, NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) {
int lowIntBound = narrowed?1:0;
int upperIntBound = (1 << numBits) - 1;
auto channels = min->lengthOf();
auto length = input->lengthOf();
NDArray::prepareSpecialUse({output}, {min, max, input});
auto stream = context->getCudaStream();
T* inputBuf = input->dataBuffer()->specialAsT<T>();
T* outputBuf = output->dataBuffer()->specialAsT<T>();
T* minBuf = min->dataBuffer()->specialAsT<T>();
T* maxBuf = max->dataBuffer()->specialAsT<T>();
hipLaunchKernelGGL(( fakeQuantWithMinMaxKernel), dim3(128), dim3(256), 256, *stream, inputBuf, input->specialShapeInfo(),
minBuf, maxBuf, lowIntBound, upperIntBound, channels, outputBuf, output->specialShapeInfo(), length);
NDArray::registerSpecialUse({output}, {min, max, input});
}
void fakeQuantWithMinMaxVars(NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), fakeQuantWithMinMaxVars_, (input, min, max, numBits, narrowed, output), FLOAT_TYPES);
}
void fakeQuantWithMinMaxVarsPerChannel(LaunchContext* context, NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), fakeQuantWithMinMaxVarsPerChannel_, (context, input, min, max, numBits, narrowed, output), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void fakeQuantWithMinMaxVars_, (NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output), FLOAT_TYPES);
BUILD_SINGLE_TEMPLATE(template void fakeQuantWithMinMaxVarsPerChannel_, (LaunchContext* context, NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output), FLOAT_TYPES);
}
}
}
| 65d34fe185036ff0bf2e29587f072c49ec71bf6e.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/fake_quantization.h>
#include <NDArrayFactory.h>
namespace nd4j {
namespace ops {
namespace helpers {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// fakeQuantWithMinMaxVars_
// input - input tensor
// min - min scalar tensor
// max - max scalar tensor
// numBits - (default 16bit)
// narrowed - shrink is true
// output - output tensor
//
template <typename T>
static __host__ __device__ void
nudge(T min, T max, int quantMin, int quantMax, T* scale, T* nudgedMin, T* nudgedMax) {
T quantMaxF = static_cast<T>(quantMax);
T quantMinF = static_cast<T>(quantMin);
*scale = (max - min) / (quantMaxF - quantMinF);
auto zeroPointFromMin = quantMinF - min / *scale;
uint16_t const nudgedZeroPoint = [zeroPointFromMin, quantMin, quantMax, quantMaxF, quantMinF] {
if (zeroPointFromMin < quantMinF) {
return static_cast<uint16_t>(quantMin);
}
if (zeroPointFromMin > quantMaxF) {
return static_cast<uint16_t>(quantMax);
}
return nd4j::math::nd4j_round<T,uint16_t>(zeroPointFromMin);
}();
*nudgedMax = (quantMaxF - static_cast<T>(nudgedZeroPoint)) * (*scale);
*nudgedMin = (quantMinF - static_cast<T>(nudgedZeroPoint)) * (*scale);
}
template <typename T>
void fakeQuantWithMinMaxVars_(NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) {
int lowIntBound = narrowed?1:0;
int upperIntBound = (1 << numBits) - 1;
min->syncToHost(); // these are scalars, so nothing much happened
max->syncToHost();
T scale, nudgedMin, nudgedMax;
nudge(min->t<T>(0), max->t<T>(0), lowIntBound, upperIntBound, &scale, &nudgedMin, &nudgedMax);
auto wiseMinMaxAndSoOn = LAMBDA_T(x, nudgedMin, nudgedMax, scale) {
T val = x;
if (x < nudgedMin) {
val = nudgedMin;
}
else if (x > nudgedMax) {
val = nudgedMax;
}
else
val = x;
return (math::nd4j_floor<T,T>((val - nudgedMin) / scale + T(0.5)) * scale + nudgedMin);
};
input->applyLambda(wiseMinMaxAndSoOn, *output);
}
template <typename T>
static __global__ void fakeQuantWithMinMaxKernel(T* input, Nd4jLong* inputShape, T* min, T* max,
int lowIntBound, int upperIntBound, Nd4jLong channels,
T* output, Nd4jLong* outputShape, Nd4jLong length) {
__shared__ int block;
if (threadIdx.x == 0) {
block = length / channels; // to loop with last dimension as block
}
__syncthreads();
for (auto i = blockIdx.x; i < (int)channels; i += gridDim.x) {
T scale, nudgedMin, nudgedMax;
nudge(min[i], max[i], lowIntBound, upperIntBound, &scale, &nudgedMin, &nudgedMax);
// loop over blocks to quantization between nudged min and max
for (auto b = threadIdx.x; b < block; b += blockDim.x) {
T val = input[shape::getIndexOffset(b * channels + i, inputShape)];
if (val < nudgedMin) {
val = nudgedMin;
} else if (val > nudgedMax) {
val = nudgedMax;
}
output[shape::getIndexOffset(b * channels + i, outputShape)] =
(math::nd4j_floor<T, T>((val - nudgedMin) / scale + T(0.5f)) * scale + nudgedMin);
};
}
}
template <typename T>
void fakeQuantWithMinMaxVarsPerChannel_(LaunchContext* context, NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) {
int lowIntBound = narrowed?1:0;
int upperIntBound = (1 << numBits) - 1;
auto channels = min->lengthOf();
auto length = input->lengthOf();
NDArray::prepareSpecialUse({output}, {min, max, input});
auto stream = context->getCudaStream();
T* inputBuf = input->dataBuffer()->specialAsT<T>();
T* outputBuf = output->dataBuffer()->specialAsT<T>();
T* minBuf = min->dataBuffer()->specialAsT<T>();
T* maxBuf = max->dataBuffer()->specialAsT<T>();
fakeQuantWithMinMaxKernel<<<128, 256, 256, *stream>>>(inputBuf, input->specialShapeInfo(),
minBuf, maxBuf, lowIntBound, upperIntBound, channels, outputBuf, output->specialShapeInfo(), length);
NDArray::registerSpecialUse({output}, {min, max, input});
}
void fakeQuantWithMinMaxVars(NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), fakeQuantWithMinMaxVars_, (input, min, max, numBits, narrowed, output), FLOAT_TYPES);
}
void fakeQuantWithMinMaxVarsPerChannel(LaunchContext* context, NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), fakeQuantWithMinMaxVarsPerChannel_, (context, input, min, max, numBits, narrowed, output), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void fakeQuantWithMinMaxVars_, (NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output), FLOAT_TYPES);
BUILD_SINGLE_TEMPLATE(template void fakeQuantWithMinMaxVarsPerChannel_, (LaunchContext* context, NDArray* input, NDArray* min, NDArray* max, int numBits, bool narrowed, NDArray* output), FLOAT_TYPES);
}
}
}
|
a59a90a06abcf0e521389ad69f77dec38b5cff1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "Measures.cuh"
#include "Jacobian_hip.cuh"
/******************************************************************************
** (Invariant) Measures:
******************************************************************************/
__device__ inline float3 getCosines(float arg)
{
float acs = acosf(arg);
return make_float3(cosf(acs / 3.0f), cosf(acs / 3.0f + 2.0f / 3.0f * TUM3D_PI), cosf(acs / 3.0f + 4.0f / 3.0f * TUM3D_PI));
}
__device__ float getLambda2(const float3x3& J)
{
float s01 = 0.5 * ((J.m[0].x + J.m[1].y) * (J.m[0].y + J.m[1].x) + J.m[0].z * J.m[2].y + J.m[1].z * J.m[2].x);
float s02 = 0.5 * ((J.m[0].x + J.m[2].z) * (J.m[0].z + J.m[2].x) + J.m[0].y * J.m[1].z + J.m[2].y * J.m[1].x);
float s12 = 0.5 * ((J.m[1].y + J.m[2].z) * (J.m[1].z + J.m[2].y) + J.m[1].x * J.m[0].z + J.m[2].x * J.m[0].y);
float s00 = J.m[0].x * J.m[0].x + J.m[0].y * J.m[1].x + J.m[0].z * J.m[2].x;
float s11 = J.m[1].x * J.m[0].y + J.m[1].y * J.m[1].y + J.m[1].z * J.m[2].y;
float s22 = J.m[2].x * J.m[0].z + J.m[2].y * J.m[1].z + J.m[2].z * J.m[2].z;
float b = +s00 + s11 + s22;
float c = -s00 * (s11 + s22) - s11 * s22 + s12 * s12 + s01 * s01 + s02 * s02;
float d = s00 * (s11 * s22 - s12 * s12) + 2.0 * s01 * s12 * s02 - s02 * s02 * s11 - s01 * s01 * s22;
const float onethird = 1.0f / 3.0f;
float xN = b * onethird;
float yN = d + xN * (c + xN * (b - xN));
float deltasqr = xN * xN + c * onethird;
float delta = -getSign(yN) * sqrt(deltasqr);
float hsqr = 4.0f * deltasqr * deltasqr * deltasqr;
float h = -2.0f * delta * deltasqr;
float yNsqr = yN * yN;
float lambda2;
if (yNsqr > hsqr)
{
float D = sqrt(yNsqr - hsqr);
lambda2 = xN + getSign(yN - D) * powf(0.5f * fabsf(yN - D), onethird)
+ getSign(yN + D) * powf(0.5f * fabsf(yN + D), onethird);
}
else if (yNsqr < hsqr)
{
float3 L = xN + 2.0 * delta * getCosines(-yN / h);
lambda2 = fminf(fmaxf(fminf(L.x, L.y), L.z), fmaxf(L.x, L.y));
}
else
{
if (h == 0.0f) lambda2 = xN;
else lambda2 = xN + delta;
}
return lambda2;
}
__device__ float getQHunt(const float3x3& jacobian)
{
float3x3 S = getStrainRateTensor(jacobian);
float3x3 O = getSpinTensor(jacobian);
float fS = FrobeniusNorm3x3(S);
float fO = FrobeniusNorm3x3(O);
return (0.5 * (fO * fO - fS * fS));
}
__device__ float getDeltaChong(const float3x3& J)
{
float3x3 S = getStrainRateTensor(J);
float3x3 O = getSpinTensor(J);
float3x3 SS = multMat3x3(S, S);
float3x3 OO = multMat3x3(O, O);
float3x3 SSpOO = addMat3x3(SS, OO);
float Q = -0.5f * Trace3x3(SSpOO);
//float Q = getQHunt(J);
float R = Det3x3(J);
Q /= 3.0f;
R /= 2.0f;
return (Q * Q * Q + R * R);
}
__device__ float getSquareRotation(const float3x3& J)
{
float3x3 O = getSpinTensor(J);
float3x3 Osq = multMat3x3(O, O);
return float(-0.5 * Trace3x3(Osq));
}
__device__ float getTurbulentViscosity(const float3x3& J, float3 v)
{
float3x3 S = getStrainRateTensor(J);
// kinetic dissipation rate
float kdr = 2.0f * sqrtf(0.001f / 100000.0f) *
(
S.m[0].x * S.m[0].x + S.m[0].y * S.m[0].y + S.m[0].z * S.m[0].z +
S.m[1].x * S.m[1].x + S.m[1].y * S.m[1].y + S.m[0].z * S.m[1].z +
S.m[1].x * S.m[1].x + S.m[1].y * S.m[1].y + S.m[1].z * S.m[1].z
);
// Kinetic Turbulent Energy
float kte = 0.5f * dot(v, v);
return ((kte * kte) / kdr);
}
__device__ float getEnstrophyProduction(const float3x3& J)
{
float3x3 S = getStrainRateTensor(J);
float3 w = getVorticity(J);
float e = S.m[0].x * w.x * w.x +
S.m[0].y * w.x * w.y +
S.m[0].z * w.x * w.z +
S.m[1].x * w.y * w.x +
S.m[1].y * w.y * w.y +
S.m[1].z * w.y * w.z +
S.m[2].x * w.z * w.x +
S.m[2].y * w.z * w.y +
S.m[2].z * w.z * w.z;
return e;
}
__device__ float getStrainProduction(const float3x3& J)
{
float3x3 S = getStrainRateTensor(J);
float e = S.m[0].x * S.m[0].x * S.m[0].x + S.m[0].x * S.m[0].y * S.m[1].x + S.m[0].x * S.m[0].z * S.m[2].x +
S.m[0].y * S.m[1].x * S.m[0].x + S.m[0].y * S.m[1].y * S.m[1].x + S.m[0].y * S.m[1].z * S.m[2].x +
S.m[0].z * S.m[2].x * S.m[0].x + S.m[0].z * S.m[2].y * S.m[1].x + S.m[0].z * S.m[2].z * S.m[2].x +
S.m[1].x * S.m[0].x * S.m[0].y + S.m[1].x * S.m[0].y * S.m[1].y + S.m[1].x * S.m[0].z * S.m[2].y +
S.m[1].y * S.m[1].x * S.m[0].y + S.m[1].y * S.m[1].y * S.m[1].y + S.m[1].y * S.m[1].z * S.m[2].y +
S.m[1].z * S.m[2].x * S.m[0].y + S.m[1].z * S.m[2].y * S.m[1].y + S.m[1].z * S.m[2].z * S.m[2].y +
S.m[2].x * S.m[0].x * S.m[0].z + S.m[2].x * S.m[0].y * S.m[1].z + S.m[2].x * S.m[0].z * S.m[2].z +
S.m[2].y * S.m[1].x * S.m[0].z + S.m[2].y * S.m[1].y * S.m[1].z + S.m[2].y * S.m[1].z * S.m[2].z +
S.m[2].z * S.m[2].x * S.m[0].z + S.m[2].z * S.m[2].y * S.m[1].z + S.m[2].z * S.m[2].z * S.m[2].z;
return e;
}
// SquareRateOfStrain == Q_S in the paper except for a factor -0.5
__device__ float getSquareRateOfStrain(const float3x3& J)
{
float3x3 S = getStrainRateTensor(J);
float3x3 Ssq = multMat3x3(S, S);
return Trace3x3(Ssq);
}
/***********************************************************************************************
* Eigensolver by Hasan et al.
* additional sorting of the eigenvalues (no positive definite tensor)
***********************************************************************************************/
__device__ inline void sort3Items(float3& v)
{
float t;
if (v.y < v.x)
{
t = v.x;
if (v.z < v.y) { v.x = v.z; v.z = t; }
else
{
if (v.z < t) { v.x = v.y; v.y = v.z; v.z = t; }
else { v.x = v.y; v.y = t; }
}
}
else
{
if (v.z < v.y)
{
t = v.z;
v.z = v.y;
if (t < v.x) { v.y = v.x; v.x = t; }
else { v.y = t; }
}
}
}
__device__ inline float3 getOrthonormalEigenvector(const float& eigenValue, const float3& vDiag, const float3& vOffDiag)
{
float3 vABC = make_float3(vDiag.x - eigenValue, vDiag.y - eigenValue, vDiag.z - eigenValue);
return normalize(make_float3((vOffDiag.x * vOffDiag.z - vABC.y * vOffDiag.y) * (vOffDiag.y * vOffDiag.z - vABC.z * vOffDiag.x),
-(vOffDiag.y * vOffDiag.z - vABC.z * vOffDiag.x) * (vOffDiag.y * vOffDiag.x - vABC.x * vOffDiag.z),
(vOffDiag.x * vOffDiag.z - vABC.y * vOffDiag.y) * (vOffDiag.y * vOffDiag.x - vABC.x * vOffDiag.z)));
}
__device__ inline void eigensolveHasan(const float3x3& J, float3& sortedEigenvalues, float3& eigenVector1, float3& eigenVector2, float3& eigenVector3)
{
const float3 vOne = make_float3(1, 1, 1);
float3 vDiag = make_float3(J.m[0].x, J.m[1].y, J.m[2].z); // xx , yy , zz
float3 vOffDiag = make_float3(J.m[0].y, J.m[0].z, J.m[1].z); // xy , xz , yz
float3 offSq = vOffDiag * vOffDiag;
float I1 = dot(vDiag, vOne);
float I2 = dot(make_float3(vDiag.x, vDiag.x, vDiag.y), make_float3(vDiag.y, vDiag.z, vDiag.z)) - dot(offSq, vOne);
float I3 = vDiag.x * vDiag.y * vDiag.z + 2.0f * vOffDiag.x * vOffDiag.y * vOffDiag.z - dot(make_float3(vDiag.z, vDiag.y, vDiag.x), offSq);
float I1_3 = I1 / 3.0f;
float I1_3Sq = I1_3 * I1_3;
float v = I1_3Sq - I2 / 3.0f;
float vInv = 1.0f / v;
float s = I1_3Sq * I1_3 - I1 * I2 / 6.0f + I3 / 2.0f;
float phi = acosf(s * vInv * sqrt(vInv)) / 3.0f;
float vSqrt2 = 2.0f * sqrt(v);
sortedEigenvalues = make_float3(I1_3 + vSqrt2 * cosf(phi), I1_3 - vSqrt2 * cosf((TUM3D_PI / 3.0f) + phi), I1_3 - vSqrt2 * cosf((TUM3D_PI / 3.0f) - phi));
sort3Items(sortedEigenvalues);
eigenVector1 = getOrthonormalEigenvector(sortedEigenvalues.x, vDiag, vOffDiag);
eigenVector2 = getOrthonormalEigenvector(sortedEigenvalues.y, vDiag, vOffDiag);
eigenVector3 = cross(eigenVector1, eigenVector2);
}
/***********************************************************************************************
* Preferential vorticity alignment (cosine of the angle between the second largest eigenvector
* of the strain rate tensor and the vorticity)
***********************************************************************************************/
__device__ float getPVA(const float3x3& J)
{
const float3 vOne = make_float3(1, 1, 1);
float3 vDiag = make_float3(J.m[0].x, J.m[1].y, J.m[2].z); // xx , yy , zz
float3 vOffDiag = make_float3(J.m[0].y, J.m[0].z, J.m[1].z); // xy , xz , yz
float3 offSq = vOffDiag * vOffDiag;
float I1 = dot(vDiag, vOne);
float I2 = dot(make_float3(vDiag.x, vDiag.x, vDiag.y), make_float3(vDiag.y, vDiag.z, vDiag.z)) - dot(offSq, vOne);
float I3 = vDiag.x * vDiag.y * vDiag.z + 2.0f * vOffDiag.x * vOffDiag.y * vOffDiag.z - dot(make_float3(vDiag.z, vDiag.y, vDiag.x), offSq);
float I1_3 = I1 / 3.0f;
float I1_3Sq = I1_3 * I1_3;
float v = I1_3Sq - I2 / 3.0f;
float vInv = 1.0f / v;
float s = I1_3Sq * I1_3 - I1 * I2 / 6.0f + I3 / 2.0f;
float phi = acosf(s * vInv * sqrt(vInv)) / 3.0f;
float vSqrt2 = 2.0f * sqrt(v);
float3 sortedEigenvalues = make_float3(I1_3 + vSqrt2 * cosf(phi), I1_3 - vSqrt2 * cosf((TUM3D_PI / 3.0f) + phi), I1_3 - vSqrt2 * cosf((TUM3D_PI / 3.0f) - phi));
sort3Items(sortedEigenvalues);
float3 e2 = getOrthonormalEigenvector(sortedEigenvalues.y, vDiag, vOffDiag);
float3 vorticity = getVorticity(J);
return abs(dot(e2, vorticity));
}
| a59a90a06abcf0e521389ad69f77dec38b5cff1d.cu | #include "Measures.cuh"
#include "Jacobian.cuh"
/******************************************************************************
** (Invariant) Measures:
******************************************************************************/
__device__ inline float3 getCosines(float arg)
{
float acs = acosf(arg);
return make_float3(cosf(acs / 3.0f), cosf(acs / 3.0f + 2.0f / 3.0f * TUM3D_PI), cosf(acs / 3.0f + 4.0f / 3.0f * TUM3D_PI));
}
__device__ float getLambda2(const float3x3& J)
{
float s01 = 0.5 * ((J.m[0].x + J.m[1].y) * (J.m[0].y + J.m[1].x) + J.m[0].z * J.m[2].y + J.m[1].z * J.m[2].x);
float s02 = 0.5 * ((J.m[0].x + J.m[2].z) * (J.m[0].z + J.m[2].x) + J.m[0].y * J.m[1].z + J.m[2].y * J.m[1].x);
float s12 = 0.5 * ((J.m[1].y + J.m[2].z) * (J.m[1].z + J.m[2].y) + J.m[1].x * J.m[0].z + J.m[2].x * J.m[0].y);
float s00 = J.m[0].x * J.m[0].x + J.m[0].y * J.m[1].x + J.m[0].z * J.m[2].x;
float s11 = J.m[1].x * J.m[0].y + J.m[1].y * J.m[1].y + J.m[1].z * J.m[2].y;
float s22 = J.m[2].x * J.m[0].z + J.m[2].y * J.m[1].z + J.m[2].z * J.m[2].z;
float b = +s00 + s11 + s22;
float c = -s00 * (s11 + s22) - s11 * s22 + s12 * s12 + s01 * s01 + s02 * s02;
float d = s00 * (s11 * s22 - s12 * s12) + 2.0 * s01 * s12 * s02 - s02 * s02 * s11 - s01 * s01 * s22;
const float onethird = 1.0f / 3.0f;
float xN = b * onethird;
float yN = d + xN * (c + xN * (b - xN));
float deltasqr = xN * xN + c * onethird;
float delta = -getSign(yN) * sqrt(deltasqr);
float hsqr = 4.0f * deltasqr * deltasqr * deltasqr;
float h = -2.0f * delta * deltasqr;
float yNsqr = yN * yN;
float lambda2;
if (yNsqr > hsqr)
{
float D = sqrt(yNsqr - hsqr);
lambda2 = xN + getSign(yN - D) * powf(0.5f * fabsf(yN - D), onethird)
+ getSign(yN + D) * powf(0.5f * fabsf(yN + D), onethird);
}
else if (yNsqr < hsqr)
{
float3 L = xN + 2.0 * delta * getCosines(-yN / h);
lambda2 = fminf(fmaxf(fminf(L.x, L.y), L.z), fmaxf(L.x, L.y));
}
else
{
if (h == 0.0f) lambda2 = xN;
else lambda2 = xN + delta;
}
return lambda2;
}
__device__ float getQHunt(const float3x3& jacobian)
{
float3x3 S = getStrainRateTensor(jacobian);
float3x3 O = getSpinTensor(jacobian);
float fS = FrobeniusNorm3x3(S);
float fO = FrobeniusNorm3x3(O);
return (0.5 * (fO * fO - fS * fS));
}
__device__ float getDeltaChong(const float3x3& J)
{
float3x3 S = getStrainRateTensor(J);
float3x3 O = getSpinTensor(J);
float3x3 SS = multMat3x3(S, S);
float3x3 OO = multMat3x3(O, O);
float3x3 SSpOO = addMat3x3(SS, OO);
float Q = -0.5f * Trace3x3(SSpOO);
//float Q = getQHunt(J);
float R = Det3x3(J);
Q /= 3.0f;
R /= 2.0f;
return (Q * Q * Q + R * R);
}
__device__ float getSquareRotation(const float3x3& J)
{
float3x3 O = getSpinTensor(J);
float3x3 Osq = multMat3x3(O, O);
return float(-0.5 * Trace3x3(Osq));
}
__device__ float getTurbulentViscosity(const float3x3& J, float3 v)
{
float3x3 S = getStrainRateTensor(J);
// kinetic dissipation rate
float kdr = 2.0f * sqrtf(0.001f / 100000.0f) *
(
S.m[0].x * S.m[0].x + S.m[0].y * S.m[0].y + S.m[0].z * S.m[0].z +
S.m[1].x * S.m[1].x + S.m[1].y * S.m[1].y + S.m[0].z * S.m[1].z +
S.m[1].x * S.m[1].x + S.m[1].y * S.m[1].y + S.m[1].z * S.m[1].z
);
// Kinetic Turbulent Energy
float kte = 0.5f * dot(v, v);
return ((kte * kte) / kdr);
}
__device__ float getEnstrophyProduction(const float3x3& J)
{
float3x3 S = getStrainRateTensor(J);
float3 w = getVorticity(J);
float e = S.m[0].x * w.x * w.x +
S.m[0].y * w.x * w.y +
S.m[0].z * w.x * w.z +
S.m[1].x * w.y * w.x +
S.m[1].y * w.y * w.y +
S.m[1].z * w.y * w.z +
S.m[2].x * w.z * w.x +
S.m[2].y * w.z * w.y +
S.m[2].z * w.z * w.z;
return e;
}
__device__ float getStrainProduction(const float3x3& J)
{
float3x3 S = getStrainRateTensor(J);
float e = S.m[0].x * S.m[0].x * S.m[0].x + S.m[0].x * S.m[0].y * S.m[1].x + S.m[0].x * S.m[0].z * S.m[2].x +
S.m[0].y * S.m[1].x * S.m[0].x + S.m[0].y * S.m[1].y * S.m[1].x + S.m[0].y * S.m[1].z * S.m[2].x +
S.m[0].z * S.m[2].x * S.m[0].x + S.m[0].z * S.m[2].y * S.m[1].x + S.m[0].z * S.m[2].z * S.m[2].x +
S.m[1].x * S.m[0].x * S.m[0].y + S.m[1].x * S.m[0].y * S.m[1].y + S.m[1].x * S.m[0].z * S.m[2].y +
S.m[1].y * S.m[1].x * S.m[0].y + S.m[1].y * S.m[1].y * S.m[1].y + S.m[1].y * S.m[1].z * S.m[2].y +
S.m[1].z * S.m[2].x * S.m[0].y + S.m[1].z * S.m[2].y * S.m[1].y + S.m[1].z * S.m[2].z * S.m[2].y +
S.m[2].x * S.m[0].x * S.m[0].z + S.m[2].x * S.m[0].y * S.m[1].z + S.m[2].x * S.m[0].z * S.m[2].z +
S.m[2].y * S.m[1].x * S.m[0].z + S.m[2].y * S.m[1].y * S.m[1].z + S.m[2].y * S.m[1].z * S.m[2].z +
S.m[2].z * S.m[2].x * S.m[0].z + S.m[2].z * S.m[2].y * S.m[1].z + S.m[2].z * S.m[2].z * S.m[2].z;
return e;
}
// SquareRateOfStrain == Q_S in the paper except for a factor -0.5
__device__ float getSquareRateOfStrain(const float3x3& J)
{
float3x3 S = getStrainRateTensor(J);
float3x3 Ssq = multMat3x3(S, S);
return Trace3x3(Ssq);
}
/***********************************************************************************************
* Eigensolver by Hasan et al.
* additional sorting of the eigenvalues (no positive definite tensor)
***********************************************************************************************/
__device__ inline void sort3Items(float3& v)
{
float t;
if (v.y < v.x)
{
t = v.x;
if (v.z < v.y) { v.x = v.z; v.z = t; }
else
{
if (v.z < t) { v.x = v.y; v.y = v.z; v.z = t; }
else { v.x = v.y; v.y = t; }
}
}
else
{
if (v.z < v.y)
{
t = v.z;
v.z = v.y;
if (t < v.x) { v.y = v.x; v.x = t; }
else { v.y = t; }
}
}
}
__device__ inline float3 getOrthonormalEigenvector(const float& eigenValue, const float3& vDiag, const float3& vOffDiag)
{
float3 vABC = make_float3(vDiag.x - eigenValue, vDiag.y - eigenValue, vDiag.z - eigenValue);
return normalize(make_float3((vOffDiag.x * vOffDiag.z - vABC.y * vOffDiag.y) * (vOffDiag.y * vOffDiag.z - vABC.z * vOffDiag.x),
-(vOffDiag.y * vOffDiag.z - vABC.z * vOffDiag.x) * (vOffDiag.y * vOffDiag.x - vABC.x * vOffDiag.z),
(vOffDiag.x * vOffDiag.z - vABC.y * vOffDiag.y) * (vOffDiag.y * vOffDiag.x - vABC.x * vOffDiag.z)));
}
__device__ inline void eigensolveHasan(const float3x3& J, float3& sortedEigenvalues, float3& eigenVector1, float3& eigenVector2, float3& eigenVector3)
{
const float3 vOne = make_float3(1, 1, 1);
float3 vDiag = make_float3(J.m[0].x, J.m[1].y, J.m[2].z); // xx , yy , zz
float3 vOffDiag = make_float3(J.m[0].y, J.m[0].z, J.m[1].z); // xy , xz , yz
float3 offSq = vOffDiag * vOffDiag;
float I1 = dot(vDiag, vOne);
float I2 = dot(make_float3(vDiag.x, vDiag.x, vDiag.y), make_float3(vDiag.y, vDiag.z, vDiag.z)) - dot(offSq, vOne);
float I3 = vDiag.x * vDiag.y * vDiag.z + 2.0f * vOffDiag.x * vOffDiag.y * vOffDiag.z - dot(make_float3(vDiag.z, vDiag.y, vDiag.x), offSq);
float I1_3 = I1 / 3.0f;
float I1_3Sq = I1_3 * I1_3;
float v = I1_3Sq - I2 / 3.0f;
float vInv = 1.0f / v;
float s = I1_3Sq * I1_3 - I1 * I2 / 6.0f + I3 / 2.0f;
float phi = acosf(s * vInv * sqrt(vInv)) / 3.0f;
float vSqrt2 = 2.0f * sqrt(v);
sortedEigenvalues = make_float3(I1_3 + vSqrt2 * cosf(phi), I1_3 - vSqrt2 * cosf((TUM3D_PI / 3.0f) + phi), I1_3 - vSqrt2 * cosf((TUM3D_PI / 3.0f) - phi));
sort3Items(sortedEigenvalues);
eigenVector1 = getOrthonormalEigenvector(sortedEigenvalues.x, vDiag, vOffDiag);
eigenVector2 = getOrthonormalEigenvector(sortedEigenvalues.y, vDiag, vOffDiag);
eigenVector3 = cross(eigenVector1, eigenVector2);
}
/***********************************************************************************************
* Preferential vorticity alignment (cosine of the angle between the second largest eigenvector
* of the strain rate tensor and the vorticity)
***********************************************************************************************/
__device__ float getPVA(const float3x3& J)
{
const float3 vOne = make_float3(1, 1, 1);
float3 vDiag = make_float3(J.m[0].x, J.m[1].y, J.m[2].z); // xx , yy , zz
float3 vOffDiag = make_float3(J.m[0].y, J.m[0].z, J.m[1].z); // xy , xz , yz
float3 offSq = vOffDiag * vOffDiag;
float I1 = dot(vDiag, vOne);
float I2 = dot(make_float3(vDiag.x, vDiag.x, vDiag.y), make_float3(vDiag.y, vDiag.z, vDiag.z)) - dot(offSq, vOne);
float I3 = vDiag.x * vDiag.y * vDiag.z + 2.0f * vOffDiag.x * vOffDiag.y * vOffDiag.z - dot(make_float3(vDiag.z, vDiag.y, vDiag.x), offSq);
float I1_3 = I1 / 3.0f;
float I1_3Sq = I1_3 * I1_3;
float v = I1_3Sq - I2 / 3.0f;
float vInv = 1.0f / v;
float s = I1_3Sq * I1_3 - I1 * I2 / 6.0f + I3 / 2.0f;
float phi = acosf(s * vInv * sqrt(vInv)) / 3.0f;
float vSqrt2 = 2.0f * sqrt(v);
float3 sortedEigenvalues = make_float3(I1_3 + vSqrt2 * cosf(phi), I1_3 - vSqrt2 * cosf((TUM3D_PI / 3.0f) + phi), I1_3 - vSqrt2 * cosf((TUM3D_PI / 3.0f) - phi));
sort3Items(sortedEigenvalues);
float3 e2 = getOrthonormalEigenvector(sortedEigenvalues.y, vDiag, vOffDiag);
float3 vorticity = getVorticity(J);
return abs(dot(e2, vorticity));
}
|
97c51f14ee1f0f22ece19c753340bab42fb0b93e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/nth_element.h>
#include <TAD.h>
#include <ShapeUtils.h>
#include <PointersManager.h>
#include <NativeOps.h>
#include <helpers/ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __global__ void fillUpElementKernel(void* outputBuffer, Nd4jLong* outputShapeInfo, void* inputBuffer, Nd4jLong* inputShapeInfo, Nd4jLong* pTadShape, Nd4jLong* pTadOffsets, Nd4jLong n) {
__shared__ Nd4jLong bufferLength, arrLen;
auto z = reinterpret_cast<T*>(outputBuffer);
auto x = reinterpret_cast<T*>(inputBuffer);
if (threadIdx.x == 0) {
arrLen = shape::length(pTadShape);
bufferLength = shape::length(outputShapeInfo);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (int t = tid; t < bufferLength; t += step) {
auto tX = x + pTadOffsets[t];
z[shape::getIndexOffset(t, outputShapeInfo, bufferLength)] = tX[shape::getIndexOffset(n, pTadShape, arrLen)]; //tX];
}
}
template <typename T>
void nthElementFunctor_(nd4j::LaunchContext * context, NDArray* input, Nd4jLong n, NDArray* output, bool reverse) {
NDArray::prepareSpecialUse({output}, {input});
NDArray sortedVals(*input);
Nd4jPointer params[2];
params[0] = context;
params[1] = context->getCudaStream();
if (input->isVector()) {
sort(params, nullptr, sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), reverse);
hipMemcpy(reinterpret_cast<T*>(output->specialBuffer()), reinterpret_cast<T*>(sortedVals.specialBuffer()) + n, sizeof(T), hipMemcpyDeviceToDevice);
}
else { // rank greater than 1
std::vector<int> lastDims({input->rankOf() - 1});// = ShapeUtils::evalDimsToExclude(input->rankOf(), {input->rankOf() - 1});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(sortedVals.getShapeInfo(), lastDims);
auto pTadShape = packX.specialShapeInfo();
auto pTadShapeH = packX.primaryShapeInfo();
auto pTadOffsets = packX.specialOffsets();
// auto pLastDimData = (int*) manager.replicatePointer(lastDims.data(), lastDims.size() * sizeof(int));
sortTad(params, sortedVals.buffer(), sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), lastDims.data(), lastDims.size(), pTadShape, pTadOffsets, reverse);
// manager.synchronize();
sortedVals.tickWriteDevice();
sortedVals.syncToHost();
auto stream = context->getCudaStream();
hipLaunchKernelGGL(( fillUpElementKernel<T>), dim3(32), dim3(64), 1024, *stream, output->specialBuffer(), output->specialShapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), pTadShape, pTadOffsets, n);
}
NDArray::registerSpecialUse({output}, {input});
}
void nthElementFunctor(nd4j::LaunchContext * context, NDArray* input, Nd4jLong n, NDArray* output, bool reverse) {
BUILD_SINGLE_SELECTOR(input->dataType(), nthElementFunctor_, (context, input, n, output, reverse), LIBND4J_TYPES);
}
}
}
}
| 97c51f14ee1f0f22ece19c753340bab42fb0b93e.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/nth_element.h>
#include <TAD.h>
#include <ShapeUtils.h>
#include <PointersManager.h>
#include <NativeOps.h>
#include <helpers/ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __global__ void fillUpElementKernel(void* outputBuffer, Nd4jLong* outputShapeInfo, void* inputBuffer, Nd4jLong* inputShapeInfo, Nd4jLong* pTadShape, Nd4jLong* pTadOffsets, Nd4jLong n) {
__shared__ Nd4jLong bufferLength, arrLen;
auto z = reinterpret_cast<T*>(outputBuffer);
auto x = reinterpret_cast<T*>(inputBuffer);
if (threadIdx.x == 0) {
arrLen = shape::length(pTadShape);
bufferLength = shape::length(outputShapeInfo);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (int t = tid; t < bufferLength; t += step) {
auto tX = x + pTadOffsets[t];
z[shape::getIndexOffset(t, outputShapeInfo, bufferLength)] = tX[shape::getIndexOffset(n, pTadShape, arrLen)]; //tX];
}
}
template <typename T>
void nthElementFunctor_(nd4j::LaunchContext * context, NDArray* input, Nd4jLong n, NDArray* output, bool reverse) {
NDArray::prepareSpecialUse({output}, {input});
NDArray sortedVals(*input);
Nd4jPointer params[2];
params[0] = context;
params[1] = context->getCudaStream();
if (input->isVector()) {
sort(params, nullptr, sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), reverse);
cudaMemcpy(reinterpret_cast<T*>(output->specialBuffer()), reinterpret_cast<T*>(sortedVals.specialBuffer()) + n, sizeof(T), cudaMemcpyDeviceToDevice);
}
else { // rank greater than 1
std::vector<int> lastDims({input->rankOf() - 1});// = ShapeUtils::evalDimsToExclude(input->rankOf(), {input->rankOf() - 1});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(sortedVals.getShapeInfo(), lastDims);
auto pTadShape = packX.specialShapeInfo();
auto pTadShapeH = packX.primaryShapeInfo();
auto pTadOffsets = packX.specialOffsets();
// auto pLastDimData = (int*) manager.replicatePointer(lastDims.data(), lastDims.size() * sizeof(int));
sortTad(params, sortedVals.buffer(), sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), lastDims.data(), lastDims.size(), pTadShape, pTadOffsets, reverse);
// manager.synchronize();
sortedVals.tickWriteDevice();
sortedVals.syncToHost();
auto stream = context->getCudaStream();
fillUpElementKernel<T><<<32, 64, 1024, *stream>>>(output->specialBuffer(), output->specialShapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), pTadShape, pTadOffsets, n);
}
NDArray::registerSpecialUse({output}, {input});
}
void nthElementFunctor(nd4j::LaunchContext * context, NDArray* input, Nd4jLong n, NDArray* output, bool reverse) {
BUILD_SINGLE_SELECTOR(input->dataType(), nthElementFunctor_, (context, input, n, output, reverse), LIBND4J_TYPES);
}
}
}
}
|
b7f0b3430ffc57e0704e61c2f32b97e5fe21f6a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/mish_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
__global__ void KeMishFw(const T* in, T* out, const int numel,
const float threshold) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < numel; tid += stride) {
T x = in[tid];
T sp = CalcSoftplus<T>(x, threshold);
out[tid] = x * tanh(sp);
}
}
// expf instead of exp should be used for float type, complement
// and register float kernel separatelly
__global__ void KeMishFwFP32(const float* in, float* out, const int numel,
const float threshold) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < numel; tid += stride) {
float x = in[tid];
float sp = CalcSoftplusFP32(x, threshold);
out[tid] = x * tanhf(sp);
}
}
template <typename T>
__global__ void KeMishBw(const T* in, const T* dout, T* din, const int numel,
const float threshold) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < numel; tid += stride) {
T x = in[tid];
T sp = CalcSoftplus<T>(x, threshold);
T tsp = tanh(sp);
T grad_sp = -expm1(-sp);
T grad_tsp = (static_cast<T>(1) - tsp * tsp) * grad_sp;
din[tid] = dout[tid] * (x * grad_tsp + tsp);
}
}
__global__ void KeMishBwFP32(const float* in, const float* dout, float* din,
const int numel, const float threshold) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < numel; tid += stride) {
float x = in[tid];
float sp = CalcSoftplusFP32(x, threshold);
float tsp = tanhf(sp);
float grad_sp = -expm1f(-sp);
float grad_tsp = (static_cast<float>(1) - tsp * tsp) * grad_sp;
din[tid] = dout[tid] * (x * grad_tsp + tsp);
}
}
template <typename DeviceContext, typename T>
class MishCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* out = ctx.Output<Tensor>("Out");
const float threshold = ctx.Attr<float>("threshold");
const T* x_data = x->data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
const int numel = x->numel();
platform::GpuLaunchConfig config =
platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), numel);
hipLaunchKernelGGL(( KeMishFw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0,
ctx.cuda_device_context().stream(), x_data, out_data, numel,
threshold);
}
};
template <typename DeviceContext>
class MishFP32CUDAKernel : public framework::OpKernel<float> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* out = ctx.Output<Tensor>("Out");
const float threshold = ctx.Attr<float>("threshold");
const float* x_data = x->data<float>();
float* out_data = out->mutable_data<float>(ctx.GetPlace());
const int numel = x->numel();
platform::GpuLaunchConfig config =
platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), numel);
hipLaunchKernelGGL(( KeMishFwFP32), dim3(config.block_per_grid), dim3(config.thread_per_block), 0,
ctx.cuda_device_context().stream(), x_data, out_data,
numel, threshold);
}
};
template <typename DeviceContext, typename T>
class MishGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto threshold = ctx.Attr<float>("threshold");
const T* x_data = x->data<T>();
const T* dout_data = dout->data<T>();
T* dx_data = dx->mutable_data<T>(ctx.GetPlace());
const int numel = x->numel();
platform::GpuLaunchConfig config =
platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), numel);
hipLaunchKernelGGL(( KeMishBw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0,
ctx.cuda_device_context().stream(),
x_data, dout_data, dx_data, numel, threshold);
}
};
template <typename DeviceContext>
class MishGradFP32CUDAKernel : public framework::OpKernel<float> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto threshold = ctx.Attr<float>("threshold");
const float* x_data = x->data<float>();
const float* dout_data = dout->data<float>();
float* dx_data = dx->mutable_data<float>(ctx.GetPlace());
const int numel = x->numel();
platform::GpuLaunchConfig config =
platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), numel);
hipLaunchKernelGGL(( KeMishBwFP32), dim3(config.block_per_grid), dim3(config.thread_per_block), 0,
ctx.cuda_device_context().stream(),
x_data, dout_data, dx_data, numel, threshold);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
mish, ops::MishFP32CUDAKernel<paddle::platform::CUDADeviceContext>,
ops::MishCUDAKernel<paddle::platform::CUDADeviceContext, double>)
REGISTER_OP_CUDA_KERNEL(
mish_grad, ops::MishGradFP32CUDAKernel<paddle::platform::CUDADeviceContext>,
ops::MishGradCUDAKernel<paddle::platform::CUDADeviceContext, double>)
| b7f0b3430ffc57e0704e61c2f32b97e5fe21f6a2.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/mish_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
__global__ void KeMishFw(const T* in, T* out, const int numel,
const float threshold) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < numel; tid += stride) {
T x = in[tid];
T sp = CalcSoftplus<T>(x, threshold);
out[tid] = x * tanh(sp);
}
}
// expf instead of exp should be used for float type, complement
// and register float kernel separatelly
__global__ void KeMishFwFP32(const float* in, float* out, const int numel,
const float threshold) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < numel; tid += stride) {
float x = in[tid];
float sp = CalcSoftplusFP32(x, threshold);
out[tid] = x * tanhf(sp);
}
}
template <typename T>
__global__ void KeMishBw(const T* in, const T* dout, T* din, const int numel,
const float threshold) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < numel; tid += stride) {
T x = in[tid];
T sp = CalcSoftplus<T>(x, threshold);
T tsp = tanh(sp);
T grad_sp = -expm1(-sp);
T grad_tsp = (static_cast<T>(1) - tsp * tsp) * grad_sp;
din[tid] = dout[tid] * (x * grad_tsp + tsp);
}
}
__global__ void KeMishBwFP32(const float* in, const float* dout, float* din,
const int numel, const float threshold) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < numel; tid += stride) {
float x = in[tid];
float sp = CalcSoftplusFP32(x, threshold);
float tsp = tanhf(sp);
float grad_sp = -expm1f(-sp);
float grad_tsp = (static_cast<float>(1) - tsp * tsp) * grad_sp;
din[tid] = dout[tid] * (x * grad_tsp + tsp);
}
}
template <typename DeviceContext, typename T>
class MishCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* out = ctx.Output<Tensor>("Out");
const float threshold = ctx.Attr<float>("threshold");
const T* x_data = x->data<T>();
T* out_data = out->mutable_data<T>(ctx.GetPlace());
const int numel = x->numel();
platform::GpuLaunchConfig config =
platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), numel);
KeMishFw<T><<<config.block_per_grid, config.thread_per_block, 0,
ctx.cuda_device_context().stream()>>>(x_data, out_data, numel,
threshold);
}
};
template <typename DeviceContext>
class MishFP32CUDAKernel : public framework::OpKernel<float> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* out = ctx.Output<Tensor>("Out");
const float threshold = ctx.Attr<float>("threshold");
const float* x_data = x->data<float>();
float* out_data = out->mutable_data<float>(ctx.GetPlace());
const int numel = x->numel();
platform::GpuLaunchConfig config =
platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), numel);
KeMishFwFP32<<<config.block_per_grid, config.thread_per_block, 0,
ctx.cuda_device_context().stream()>>>(x_data, out_data,
numel, threshold);
}
};
template <typename DeviceContext, typename T>
class MishGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto threshold = ctx.Attr<float>("threshold");
const T* x_data = x->data<T>();
const T* dout_data = dout->data<T>();
T* dx_data = dx->mutable_data<T>(ctx.GetPlace());
const int numel = x->numel();
platform::GpuLaunchConfig config =
platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), numel);
KeMishBw<T><<<config.block_per_grid, config.thread_per_block, 0,
ctx.cuda_device_context().stream()>>>(
x_data, dout_data, dx_data, numel, threshold);
}
};
template <typename DeviceContext>
class MishGradFP32CUDAKernel : public framework::OpKernel<float> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto threshold = ctx.Attr<float>("threshold");
const float* x_data = x->data<float>();
const float* dout_data = dout->data<float>();
float* dx_data = dx->mutable_data<float>(ctx.GetPlace());
const int numel = x->numel();
platform::GpuLaunchConfig config =
platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), numel);
KeMishBwFP32<<<config.block_per_grid, config.thread_per_block, 0,
ctx.cuda_device_context().stream()>>>(
x_data, dout_data, dx_data, numel, threshold);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
mish, ops::MishFP32CUDAKernel<paddle::platform::CUDADeviceContext>,
ops::MishCUDAKernel<paddle::platform::CUDADeviceContext, double>)
REGISTER_OP_CUDA_KERNEL(
mish_grad, ops::MishGradFP32CUDAKernel<paddle::platform::CUDADeviceContext>,
ops::MishGradCUDAKernel<paddle::platform::CUDADeviceContext, double>)
|
1c9a9f2a17abe4b58a55f628e9c724a6518d5b8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "lbm.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
// constants
//! 0 = no periodic boundaries, 1 = periodic boundaries -- non-periodic is faster
static const int PeriodicBoundaries = 0;
//! dimensionaly of domain, has to be 3
static const int D = 3;
//! size of neighbourhood of a cell, has to be 19
static const int Q = 19;
//! cell types
enum CellFlags {
//! a wet cell
CellFluid = 0,
//! a wall cell, flow bounces back
CellNoSlip,
//! fixed velocity cell
CellVelocity
};
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// we can save constants on the GPU in an extra space with a lot faster access
__constant__ float d_w[Q];
__constant__ int d_e[Q][D];
__constant__ int d_invDir[Q];
__constant__ float d_omega;
size_t __device__ index(int i, int j, int k) {
if(PeriodicBoundaries) {
i = (i+blockDim.x)%blockDim.x;
j = (j+gridDim.x)%gridDim.x;
k = (k+gridDim.y)%gridDim.y;
}
return i + blockDim.x*(j + gridDim.x*size_t(k));
}
size_t __device__ index(int i, int j, int k, int l) {
if(PeriodicBoundaries) {
i = (i+blockDim.x)%blockDim.x;
j = (j+gridDim.x)%gridDim.x;
k = (k+gridDim.y)%gridDim.y;
}
#ifdef INNER_INDEX_DISTRIBUTION
return l + Q*(i + blockDim.x*(j + gridDim.x*size_t(k)));
#else
return i + blockDim.x*(j + gridDim.x*(size_t(k) + gridDim.y*l));
#endif
}
__global__ void collideCudaKernel(float *d_cellsCur, char *d_flags, float3 *d_velocity) {
// get the current thread position
int i = threadIdx.x;
int j = blockIdx.x;
int k = blockIdx.y;
// in case we have no periodic boundaries, the threads on the edges don't have anything to do
if (!PeriodicBoundaries) {
if (i==0 || i==blockDim.x-1 || j==0 || j==gridDim.x-1 || k==0 || k==gridDim.y-1)
return;
}
// nothing to do for NoSlip cells
const int flag = d_flags[index(i,j,k)];
if (flag == CellNoSlip)
return;
// compute density and velocity in cell
float density = 0.f;
float3 u = make_float3(0.f, 0.f, 0.f);
for(int l=0; l<Q; ++l) {
const float weight = d_cellsCur[index(i,j,k,l)];
density += weight;
u.x += d_e[l][0] * weight;
u.y += d_e[l][1] * weight;
u.z += d_e[l][2] * weight;
}
// override velocity for Velocity cells
if (flag == CellVelocity) {
u = d_velocity[index(i,j,k)];
}
// collision
for(int l=0; l<Q; ++l) {
float dot = 0.f;
float uu = 0.f;
dot += d_e[l][0] * u.x;
uu += u.x * u.x;
dot += d_e[l][1] * u.y;
uu += u.y * u.y;
dot += d_e[l][2] * u.z;
uu += u.z * u.z;
float feq = d_w[l] * (density - 1.5f*uu + 3.f*dot + 4.5f*dot*dot);
d_cellsCur[index(i,j,k,l)] =
d_omega * feq + (1.0f-d_omega) * d_cellsCur[index(i,j,k,l)];
}
}
__global__ void streamCudaKernel(float *d_cellsCur, float *d_cellsLast, char *d_flags) {
// get the current thread position
int i = threadIdx.x;
int j = blockIdx.x;
int k = blockIdx.y;
// in case we have no periodic boundaries, the threads on the edges don't have anything to do
if (!PeriodicBoundaries) {
if (i==0 || i==blockDim.x-1 || j==0 || j==gridDim.x-1 || k==0 || k==gridDim.y-1)
return;
}
for(int l=0; l<Q; ++l) {
const int inv = d_invDir[l];
const int si = i+d_e[inv][0];
const int sj = j+d_e[inv][1];
const int sk = k+d_e[inv][2];
if(d_flags[index(si,sj,sk)] == CellNoSlip) {
// reflect at NoSlip cell
d_cellsCur[index(i,j,k,l)] = d_cellsLast[index(i,j,k,inv)];
}
else {
// update from neighbours
d_cellsCur[index(i,j,k,l)] = d_cellsLast[index(si,sj,sk,l)];
}
}
}
__global__ void analyzeCudaKernel(float *d_cellsCur, char *d_flags, float *d_density, float3 *d_u, float3 *d_velocity) {
// get the current thread position
int i = threadIdx.x;
int j = blockIdx.x;
int k = blockIdx.y;
// compute density and velocity in cell
float density = 0.f;
float3 u = make_float3(0.f, 0.f, 0.f);
if(d_flags[index(i,j,k)] == CellNoSlip) {
density = 1.f;
}
else {
for(int l=0; l<Q; ++l) {
const float weight = d_cellsCur[index(i,j,k,l)];
density += weight;
u.x += d_e[l][0] * weight;
u.y += d_e[l][1] * weight;
u.z += d_e[l][2] * weight;
}
}
d_density[index(i,j,k)] = density;
d_u[index(i,j,k)] = u;
}
__global__ void minMaxCudaKernel() { }
//! we need some kind of initialization of our device
void LBMD3Q19::initializeCuda() {
// get some space for our arrays
gpuErrchk (hipMalloc((void **) &d_flags, sizeof(char) * m_width * m_height * m_depth));
gpuErrchk (hipMalloc((void **) &d_velocity, sizeof(float3) * m_width * m_height * m_depth));
gpuErrchk (hipMalloc((void **) &d_u, sizeof(float3) * m_width * m_height * m_depth));
gpuErrchk (hipMalloc((void **) &d_density, sizeof(float) * m_width * m_height * m_depth));
gpuErrchk (hipMalloc((void **) &d_cells[0], sizeof(float) * m_width * m_height * m_depth * Q));
gpuErrchk (hipMalloc((void **) &d_cells[1], sizeof(float) * m_width * m_height * m_depth * Q));
// use cpyToSymbol for known sizes (LEGACY CODE - WORKS ONLY WITH CUDA <= 5.5)
gpuErrchk (hipMemcpyToSymbol(d_w, w.w, sizeof(float)*Q));
gpuErrchk (hipMemcpyToSymbol(d_invDir, invDir, sizeof(int)*Q));
for (int i=0; i<Q; i++)
gpuErrchk (hipMemcpyToSymbol(d_e, e[i].e, sizeof(int)*D, sizeof(int) * i * D, hipMemcpyHostToDevice));
}
//! collide implementation with CUDA
void LBMD3Q19::collideCuda() {
hipLaunchKernelGGL(( collideCudaKernel), dim3(dim3(m_height, m_depth)),dim3(dim3(m_width)), 0, 0, d_cells[m_current], d_flags, d_velocity);
}
//! streaming with CUDA
void LBMD3Q19::streamCuda() {
hipLaunchKernelGGL(( streamCudaKernel), dim3(dim3(m_height, m_depth)),dim3(dim3(m_width)), 0, 0, d_cells[m_current], d_cells[!m_current], d_flags);
}
//! compute densities and velocities with CUDA
void LBMD3Q19::analyzeCuda() {
hipLaunchKernelGGL(( analyzeCudaKernel), dim3(dim3(m_height, m_depth)),dim3(dim3(m_width)), 0, 0, d_cells[m_current], d_flags, d_density, d_u, d_velocity);
// we need to copy back the analyzed data to the host
gpuErrchk (hipMemcpy(m_u, d_u, sizeof(float3) * m_width * m_height * m_depth, hipMemcpyDeviceToHost));
gpuErrchk (hipMemcpy(m_density, d_density, sizeof(float) * m_width * m_height * m_depth, hipMemcpyDeviceToHost));
}
//! compute minimum and maximum density and velocity with CUDA
void LBMD3Q19::minMaxCuda() {
hipLaunchKernelGGL(( minMaxCudaKernel), dim3(dim3(m_height, m_depth)),dim3(dim3(m_width)), 0, 0, );
}
//! very dumb function that copies cells back to host
void LBMD3Q19::cpCellsDeviceToHost() {
gpuErrchk (hipMemcpy(m_cells[m_current], d_cells[m_current], sizeof(float) * m_width * m_height * m_depth * Q, hipMemcpyDeviceToHost));
gpuErrchk (hipMemcpy(m_cells[!m_current], d_cells[!m_current], sizeof(float) * m_width * m_height * m_depth * Q, hipMemcpyDeviceToHost));
}
//! free allocated data on device
void LBMD3Q19::freeCuda() {
//! each malloc needs a free
gpuErrchk (hipFree(d_flags));
gpuErrchk (hipFree(d_velocity));
gpuErrchk (hipFree(d_u));
gpuErrchk (hipFree(d_density));
gpuErrchk (hipFree(d_cells[0]));
gpuErrchk (hipFree(d_cells[1]));
}
//! this needs to be done, each time we switch our settings
void LBMD3Q19::applyCuda() {
//! copy data from host to device, the rest are constants which stay the same
gpuErrchk (hipMemcpy(d_flags, m_flags, sizeof(char) * m_width * m_height * m_depth, hipMemcpyHostToDevice));
gpuErrchk (hipMemcpy(d_velocity, m_velocity, sizeof(float) * m_width * m_height * m_depth * D, hipMemcpyHostToDevice));
gpuErrchk (hipMemcpy(d_cells[m_current], m_cells[m_current], sizeof(float) * m_width * m_height * m_depth * Q, hipMemcpyHostToDevice));
gpuErrchk (hipMemcpy(d_cells[!m_current], m_cells[!m_current], sizeof(float) * m_width * m_height * m_depth * Q, hipMemcpyHostToDevice));
//! omega can be changed, too
gpuErrchk (hipMemcpyToSymbol(d_omega, &m_omega, sizeof(float)));
}
//! http://www.cs.cmu.edu/afs/cs/academic/class/15668-s11/www/cuda-doc/html/group__CUDART__THREAD_g6e0c5163e6f959b56b6ae2eaa8483576.html
void LBMD3Q19::syncCuda() {
gpuErrchk (hipDeviceSynchronize());
}
| 1c9a9f2a17abe4b58a55f628e9c724a6518d5b8e.cu | #include "lbm.h"
#include <cuda.h>
#include <stdio.h>
// constants
//! 0 = no periodic boundaries, 1 = periodic boundaries -- non-periodic is faster
static const int PeriodicBoundaries = 0;
//! dimensionaly of domain, has to be 3
static const int D = 3;
//! size of neighbourhood of a cell, has to be 19
static const int Q = 19;
//! cell types
enum CellFlags {
//! a wet cell
CellFluid = 0,
//! a wall cell, flow bounces back
CellNoSlip,
//! fixed velocity cell
CellVelocity
};
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// we can save constants on the GPU in an extra space with a lot faster access
__constant__ float d_w[Q];
__constant__ int d_e[Q][D];
__constant__ int d_invDir[Q];
__constant__ float d_omega;
size_t __device__ index(int i, int j, int k) {
if(PeriodicBoundaries) {
i = (i+blockDim.x)%blockDim.x;
j = (j+gridDim.x)%gridDim.x;
k = (k+gridDim.y)%gridDim.y;
}
return i + blockDim.x*(j + gridDim.x*size_t(k));
}
size_t __device__ index(int i, int j, int k, int l) {
if(PeriodicBoundaries) {
i = (i+blockDim.x)%blockDim.x;
j = (j+gridDim.x)%gridDim.x;
k = (k+gridDim.y)%gridDim.y;
}
#ifdef INNER_INDEX_DISTRIBUTION
return l + Q*(i + blockDim.x*(j + gridDim.x*size_t(k)));
#else
return i + blockDim.x*(j + gridDim.x*(size_t(k) + gridDim.y*l));
#endif
}
__global__ void collideCudaKernel(float *d_cellsCur, char *d_flags, float3 *d_velocity) {
// get the current thread position
int i = threadIdx.x;
int j = blockIdx.x;
int k = blockIdx.y;
// in case we have no periodic boundaries, the threads on the edges don't have anything to do
if (!PeriodicBoundaries) {
if (i==0 || i==blockDim.x-1 || j==0 || j==gridDim.x-1 || k==0 || k==gridDim.y-1)
return;
}
// nothing to do for NoSlip cells
const int flag = d_flags[index(i,j,k)];
if (flag == CellNoSlip)
return;
// compute density and velocity in cell
float density = 0.f;
float3 u = make_float3(0.f, 0.f, 0.f);
for(int l=0; l<Q; ++l) {
const float weight = d_cellsCur[index(i,j,k,l)];
density += weight;
u.x += d_e[l][0] * weight;
u.y += d_e[l][1] * weight;
u.z += d_e[l][2] * weight;
}
// override velocity for Velocity cells
if (flag == CellVelocity) {
u = d_velocity[index(i,j,k)];
}
// collision
for(int l=0; l<Q; ++l) {
float dot = 0.f;
float uu = 0.f;
dot += d_e[l][0] * u.x;
uu += u.x * u.x;
dot += d_e[l][1] * u.y;
uu += u.y * u.y;
dot += d_e[l][2] * u.z;
uu += u.z * u.z;
float feq = d_w[l] * (density - 1.5f*uu + 3.f*dot + 4.5f*dot*dot);
d_cellsCur[index(i,j,k,l)] =
d_omega * feq + (1.0f-d_omega) * d_cellsCur[index(i,j,k,l)];
}
}
__global__ void streamCudaKernel(float *d_cellsCur, float *d_cellsLast, char *d_flags) {
// get the current thread position
int i = threadIdx.x;
int j = blockIdx.x;
int k = blockIdx.y;
// in case we have no periodic boundaries, the threads on the edges don't have anything to do
if (!PeriodicBoundaries) {
if (i==0 || i==blockDim.x-1 || j==0 || j==gridDim.x-1 || k==0 || k==gridDim.y-1)
return;
}
for(int l=0; l<Q; ++l) {
const int inv = d_invDir[l];
const int si = i+d_e[inv][0];
const int sj = j+d_e[inv][1];
const int sk = k+d_e[inv][2];
if(d_flags[index(si,sj,sk)] == CellNoSlip) {
// reflect at NoSlip cell
d_cellsCur[index(i,j,k,l)] = d_cellsLast[index(i,j,k,inv)];
}
else {
// update from neighbours
d_cellsCur[index(i,j,k,l)] = d_cellsLast[index(si,sj,sk,l)];
}
}
}
__global__ void analyzeCudaKernel(float *d_cellsCur, char *d_flags, float *d_density, float3 *d_u, float3 *d_velocity) {
// get the current thread position
int i = threadIdx.x;
int j = blockIdx.x;
int k = blockIdx.y;
// compute density and velocity in cell
float density = 0.f;
float3 u = make_float3(0.f, 0.f, 0.f);
if(d_flags[index(i,j,k)] == CellNoSlip) {
density = 1.f;
}
else {
for(int l=0; l<Q; ++l) {
const float weight = d_cellsCur[index(i,j,k,l)];
density += weight;
u.x += d_e[l][0] * weight;
u.y += d_e[l][1] * weight;
u.z += d_e[l][2] * weight;
}
}
d_density[index(i,j,k)] = density;
d_u[index(i,j,k)] = u;
}
__global__ void minMaxCudaKernel() { }
//! we need some kind of initialization of our device
void LBMD3Q19::initializeCuda() {
// get some space for our arrays
gpuErrchk (cudaMalloc((void **) &d_flags, sizeof(char) * m_width * m_height * m_depth));
gpuErrchk (cudaMalloc((void **) &d_velocity, sizeof(float3) * m_width * m_height * m_depth));
gpuErrchk (cudaMalloc((void **) &d_u, sizeof(float3) * m_width * m_height * m_depth));
gpuErrchk (cudaMalloc((void **) &d_density, sizeof(float) * m_width * m_height * m_depth));
gpuErrchk (cudaMalloc((void **) &d_cells[0], sizeof(float) * m_width * m_height * m_depth * Q));
gpuErrchk (cudaMalloc((void **) &d_cells[1], sizeof(float) * m_width * m_height * m_depth * Q));
// use cpyToSymbol for known sizes (LEGACY CODE - WORKS ONLY WITH CUDA <= 5.5)
gpuErrchk (cudaMemcpyToSymbol(d_w, w.w, sizeof(float)*Q));
gpuErrchk (cudaMemcpyToSymbol(d_invDir, invDir, sizeof(int)*Q));
for (int i=0; i<Q; i++)
gpuErrchk (cudaMemcpyToSymbol(d_e, e[i].e, sizeof(int)*D, sizeof(int) * i * D, cudaMemcpyHostToDevice));
}
//! collide implementation with CUDA
void LBMD3Q19::collideCuda() {
collideCudaKernel<<<dim3(m_height, m_depth),dim3(m_width)>>>(d_cells[m_current], d_flags, d_velocity);
}
//! streaming with CUDA
void LBMD3Q19::streamCuda() {
streamCudaKernel<<<dim3(m_height, m_depth),dim3(m_width)>>>(d_cells[m_current], d_cells[!m_current], d_flags);
}
//! compute densities and velocities with CUDA
void LBMD3Q19::analyzeCuda() {
analyzeCudaKernel<<<dim3(m_height, m_depth),dim3(m_width)>>>(d_cells[m_current], d_flags, d_density, d_u, d_velocity);
// we need to copy back the analyzed data to the host
gpuErrchk (cudaMemcpy(m_u, d_u, sizeof(float3) * m_width * m_height * m_depth, cudaMemcpyDeviceToHost));
gpuErrchk (cudaMemcpy(m_density, d_density, sizeof(float) * m_width * m_height * m_depth, cudaMemcpyDeviceToHost));
}
//! compute minimum and maximum density and velocity with CUDA
void LBMD3Q19::minMaxCuda() {
minMaxCudaKernel<<<dim3(m_height, m_depth),dim3(m_width)>>>();
}
//! very dumb function that copies cells back to host
void LBMD3Q19::cpCellsDeviceToHost() {
gpuErrchk (cudaMemcpy(m_cells[m_current], d_cells[m_current], sizeof(float) * m_width * m_height * m_depth * Q, cudaMemcpyDeviceToHost));
gpuErrchk (cudaMemcpy(m_cells[!m_current], d_cells[!m_current], sizeof(float) * m_width * m_height * m_depth * Q, cudaMemcpyDeviceToHost));
}
//! free allocated data on device
void LBMD3Q19::freeCuda() {
//! each malloc needs a free
gpuErrchk (cudaFree(d_flags));
gpuErrchk (cudaFree(d_velocity));
gpuErrchk (cudaFree(d_u));
gpuErrchk (cudaFree(d_density));
gpuErrchk (cudaFree(d_cells[0]));
gpuErrchk (cudaFree(d_cells[1]));
}
//! this needs to be done, each time we switch our settings
void LBMD3Q19::applyCuda() {
//! copy data from host to device, the rest are constants which stay the same
gpuErrchk (cudaMemcpy(d_flags, m_flags, sizeof(char) * m_width * m_height * m_depth, cudaMemcpyHostToDevice));
gpuErrchk (cudaMemcpy(d_velocity, m_velocity, sizeof(float) * m_width * m_height * m_depth * D, cudaMemcpyHostToDevice));
gpuErrchk (cudaMemcpy(d_cells[m_current], m_cells[m_current], sizeof(float) * m_width * m_height * m_depth * Q, cudaMemcpyHostToDevice));
gpuErrchk (cudaMemcpy(d_cells[!m_current], m_cells[!m_current], sizeof(float) * m_width * m_height * m_depth * Q, cudaMemcpyHostToDevice));
//! omega can be changed, too
gpuErrchk (cudaMemcpyToSymbol(d_omega, &m_omega, sizeof(float)));
}
//! http://www.cs.cmu.edu/afs/cs/academic/class/15668-s11/www/cuda-doc/html/group__CUDART__THREAD_g6e0c5163e6f959b56b6ae2eaa8483576.html
void LBMD3Q19::syncCuda() {
gpuErrchk (cudaThreadSynchronize());
}
|
ba9ea1c014a84889e08382ec8cb75e0279f30d85.hip | // !!! This is a file automatically generated by hipify!!!
#include "gc_sum.h"
#include <assert.h>
#include <hip/hip_runtime.h>
#include <iostream>
#define xcuda(stmt) { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
std::cerr << __FILE__ << ":" << __LINE__ << ": Failed to run " << #stmt << ". Reason: " << hipGetErrorString(err) << std::endl; \
exit(1); \
} \
}
gc_sum_t create_gc_sum(double *gc_win_sum, uint64_t nwins, bool gpu) {
gc_sum_t result;
result.cumsum = gc_win_sum + 1;
result.begin = 0;
result.end = nwins;
result.sum_begin = 0;
result.sum_end = gc_win_sum[nwins];
if(gpu) {
result.gpu_alloc();
} else {
result.gpu_cumsum = NULL;
}
return result;
}
void dispose_gc_sum(gc_sum_t gc_sum) {
delete [] (gc_sum.cumsum - 1);
if(gc_sum.gpu_cumsum) {
gc_sum.gpu_dispose();
}
}
void gc_sum_t::gpu_alloc() {
size_t sizeof_ = sizeof(double) * (end - begin + 1);
xcuda( hipMalloc((void**)&gpu_cumsum, sizeof_) );
xcuda( hipMemcpy(gpu_cumsum, cumsum - 1, sizeof_, hipMemcpyHostToDevice) );
}
void gc_sum_t::gpu_dispose() {
xcuda( hipFree(gpu_cumsum) );
}
gc_sum_t gc_sum_t::gpu_copy() {
gc_sum_t result = *this;
result.cumsum = gpu_cumsum + 1;
return result;
}
__device__ __host__ void gc_sum_t::split(uint64_t midpoint, gc_sum_t &left, gc_sum_t &right) const {
left.cumsum = this->cumsum;
left.gpu_cumsum = this->gpu_cumsum;
left.begin = this->begin;
left.end = this->begin + midpoint;
left.sum_begin = this->sum_begin;
left.sum_end = left.cumsum[left.end - 1];
right.cumsum = this->cumsum;
right.gpu_cumsum = this->gpu_cumsum;
right.begin = left.end;
right.end = this->end;
right.sum_begin = left.sum_begin + left.get(midpoint - 1);
right.sum_end = right.cumsum[right.end - 1];
}
double gc_sum_t::range(uint64_t begin, uint64_t end) const {
return cumsum[this->begin + end - 1] - cumsum[this->begin + begin - 1];
}
| ba9ea1c014a84889e08382ec8cb75e0279f30d85.cu | #include "gc_sum.h"
#include <assert.h>
#include <cuda.h>
#include <iostream>
#define xcuda(stmt) { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
std::cerr << __FILE__ << ":" << __LINE__ << ": Failed to run " << #stmt << ". Reason: " << cudaGetErrorString(err) << std::endl; \
exit(1); \
} \
}
gc_sum_t create_gc_sum(double *gc_win_sum, uint64_t nwins, bool gpu) {
gc_sum_t result;
result.cumsum = gc_win_sum + 1;
result.begin = 0;
result.end = nwins;
result.sum_begin = 0;
result.sum_end = gc_win_sum[nwins];
if(gpu) {
result.gpu_alloc();
} else {
result.gpu_cumsum = NULL;
}
return result;
}
void dispose_gc_sum(gc_sum_t gc_sum) {
delete [] (gc_sum.cumsum - 1);
if(gc_sum.gpu_cumsum) {
gc_sum.gpu_dispose();
}
}
void gc_sum_t::gpu_alloc() {
size_t sizeof_ = sizeof(double) * (end - begin + 1);
xcuda( cudaMalloc((void**)&gpu_cumsum, sizeof_) );
xcuda( cudaMemcpy(gpu_cumsum, cumsum - 1, sizeof_, cudaMemcpyHostToDevice) );
}
void gc_sum_t::gpu_dispose() {
xcuda( cudaFree(gpu_cumsum) );
}
gc_sum_t gc_sum_t::gpu_copy() {
gc_sum_t result = *this;
result.cumsum = gpu_cumsum + 1;
return result;
}
__device__ __host__ void gc_sum_t::split(uint64_t midpoint, gc_sum_t &left, gc_sum_t &right) const {
left.cumsum = this->cumsum;
left.gpu_cumsum = this->gpu_cumsum;
left.begin = this->begin;
left.end = this->begin + midpoint;
left.sum_begin = this->sum_begin;
left.sum_end = left.cumsum[left.end - 1];
right.cumsum = this->cumsum;
right.gpu_cumsum = this->gpu_cumsum;
right.begin = left.end;
right.end = this->end;
right.sum_begin = left.sum_begin + left.get(midpoint - 1);
right.sum_end = right.cumsum[right.end - 1];
}
double gc_sum_t::range(uint64_t begin, uint64_t end) const {
return cumsum[this->begin + end - 1] - cumsum[this->begin + begin - 1];
}
|
bc7a0bb21f1e04904b38f639eb8406aed9ce00bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
// modified from
// https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu
#include "box_iou_rotated_cuda.cuh"
#include "parrots_cuda_helper.hpp"
void box_iou_rotated_cuda_launcher(const DArrayLite boxes1,
const DArrayLite boxes2, DArrayLite ious,
const int mode_flag, const bool aligned,
hipStream_t stream) {
using scalar_t = float;
int output_size = ious.size();
int num_boxes1 = boxes1.dim(0);
int num_boxes2 = boxes2.dim(0);
hipLaunchKernelGGL(( box_iou_rotated_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
num_boxes1, num_boxes2, boxes1.ptr<scalar_t>(),
boxes2.ptr<scalar_t>(), (scalar_t*)ious.ptr<scalar_t>(), mode_flag,
aligned);
PARROTS_CUDA_CHECK(hipGetLastError());
}
| bc7a0bb21f1e04904b38f639eb8406aed9ce00bc.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
// modified from
// https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu
#include "box_iou_rotated_cuda.cuh"
#include "parrots_cuda_helper.hpp"
void box_iou_rotated_cuda_launcher(const DArrayLite boxes1,
const DArrayLite boxes2, DArrayLite ious,
const int mode_flag, const bool aligned,
cudaStream_t stream) {
using scalar_t = float;
int output_size = ious.size();
int num_boxes1 = boxes1.dim(0);
int num_boxes2 = boxes2.dim(0);
box_iou_rotated_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
num_boxes1, num_boxes2, boxes1.ptr<scalar_t>(),
boxes2.ptr<scalar_t>(), (scalar_t*)ious.ptr<scalar_t>(), mode_flag,
aligned);
PARROTS_CUDA_CHECK(cudaGetLastError());
}
|
fc6c4cef226f112e6776c76796c6eedec9caa67f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <chrono>
using namespace std;
using namespace std::chrono;
#define MAX_NUM_OF_ROWS 1024
#define MAX_NUM_OF_COLS 1024
float Matrix_1[MAX_NUM_OF_ROWS][MAX_NUM_OF_COLS];
float Matrix_2[MAX_NUM_OF_ROWS][MAX_NUM_OF_COLS];
float Matrix_output[MAX_NUM_OF_ROWS][MAX_NUM_OF_COLS];
int no_of_rows_1 = 4;
int no_of_rows_2 = 4;
int no_of_cols_1 = 4;
int no_of_cols_2 = 4;
void ReadMatrix_1_2(void);
hipError_t MultiplyWithCuda(void);
__global__ void MultiplyKernel(float *c, const float *a, const float *b, const int wc,const int hc, const int CommonDim)
{
//each thread will calculate a row by col of the two input matrcies
// and add those value and output one value to be stored in C
float sum_tmp = 0;
// calculate Row & col Index >> this calculation is
// because we need the index to jumb with the Size of the block
// when the block index increase
int Row = blockIdx.x * blockDim.x + threadIdx.x;
int Col = blockIdx.y * blockDim.y + threadIdx.y;
if ((Row < wc) && (Col < hc)) {
for (int k = 0; k < CommonDim; k++)
{
sum_tmp += a[Row*CommonDim + k] * b[k * CommonDim + Col];
}
c[Row*CommonDim + Col] = sum_tmp;
}
}
int main()
{
// read 2 Matrix from Files
ReadMatrix_1_2();
// take time snap before multiplication
high_resolution_clock::time_point t1 = high_resolution_clock::now();
//CPU Multiplication MAtrix_1 * Matrix_2
hipError_t cudaStatus = MultiplyWithCuda();
// take time snap after multiplication
high_resolution_clock::time_point t2 = high_resolution_clock::now();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// print the Time taken to Multiply two Matrices
auto duration = duration_cast<microseconds>(t2 - t1).count();
cout << "Multiplication Time CPU(us):" << duration << "\n";
//print the output matrix for testing
// for (int i = 0; i < no_of_cols_1; i++) {
// for (int j = 0; j < no_of_rows_2; j++)
// {
// cout << Matrix_output[i][j] << " ";
// }
// cout << "\n";
// }
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// this function read the two matrices from two files with the dimensions given in the global variables set at the begining of the file
void ReadMatrix_1_2(void) {
int x, y;
ifstream in_1("Matrix_1.txt", std::ifstream::in);
ifstream in_2("Matrix_2.txt", std::ifstream::in);
if (!in_1 || !in_2) {
cout << "Error! Cannot open file.\n";
return;
}
else if (no_of_cols_1 != no_of_rows_2) {
cout << "Error! Matrix dimensions is not valid for multiplication.\n";
return;
}
for (y = 0; y < no_of_cols_1; y++) {
for (x = 0; x < no_of_rows_1; x++) {
in_1 >> Matrix_1[x][y];
}
}
for (y = 0; y < no_of_cols_2; y++) {
for (x = 0; x < no_of_rows_2; x++) {
in_2 >> Matrix_2[x][y];
}
}
in_1.close();
in_2.close();
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t MultiplyWithCuda()
{
float *dev_a = 0; //Matrix_1
float *dev_b = 0; //Matrix_2
float *dev_c = 0; //Matrix_output
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, no_of_rows_1*no_of_cols_2* sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, no_of_rows_1*no_of_cols_1 * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, no_of_rows_2*no_of_cols_2 * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, Matrix_1, no_of_rows_1*no_of_cols_1 * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, Matrix_2, no_of_rows_2*no_of_cols_2 * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// no_of_threads to run in parralled no_of_rows_1*no_of_cols_2
int no_of_parrallel_threads = no_of_rows_1 * no_of_cols_2;
// theads and blocks definition without shared memory
dim3 threasPerBlock(no_of_parrallel_threads, no_of_parrallel_threads);
dim3 blocksPerGrid(1, 1);
if (no_of_parrallel_threads >= 32) // max number of threads per block (1024,512,64)
{
threasPerBlock.x = 32;
threasPerBlock.y = 32;
blocksPerGrid.x = ceil(double(no_of_rows_1)/ threasPerBlock.x);
blocksPerGrid.y = ceil(double(no_of_cols_2)/threasPerBlock.y);
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( MultiplyKernel) , dim3(blocksPerGrid), dim3(threasPerBlock) , 0, 0, dev_c, dev_a, dev_b, no_of_rows_1 ,no_of_cols_2, no_of_cols_1);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
int output_width = no_of_parrallel_threads;
cudaStatus = hipMemcpy(Matrix_output, dev_c, output_width * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| fc6c4cef226f112e6776c76796c6eedec9caa67f.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <chrono>
using namespace std;
using namespace std::chrono;
#define MAX_NUM_OF_ROWS 1024
#define MAX_NUM_OF_COLS 1024
float Matrix_1[MAX_NUM_OF_ROWS][MAX_NUM_OF_COLS];
float Matrix_2[MAX_NUM_OF_ROWS][MAX_NUM_OF_COLS];
float Matrix_output[MAX_NUM_OF_ROWS][MAX_NUM_OF_COLS];
int no_of_rows_1 = 4;
int no_of_rows_2 = 4;
int no_of_cols_1 = 4;
int no_of_cols_2 = 4;
void ReadMatrix_1_2(void);
cudaError_t MultiplyWithCuda(void);
__global__ void MultiplyKernel(float *c, const float *a, const float *b, const int wc,const int hc, const int CommonDim)
{
//each thread will calculate a row by col of the two input matrcies
// and add those value and output one value to be stored in C
float sum_tmp = 0;
// calculate Row & col Index >> this calculation is
// because we need the index to jumb with the Size of the block
// when the block index increase
int Row = blockIdx.x * blockDim.x + threadIdx.x;
int Col = blockIdx.y * blockDim.y + threadIdx.y;
if ((Row < wc) && (Col < hc)) {
for (int k = 0; k < CommonDim; k++)
{
sum_tmp += a[Row*CommonDim + k] * b[k * CommonDim + Col];
}
c[Row*CommonDim + Col] = sum_tmp;
}
}
int main()
{
// read 2 Matrix from Files
ReadMatrix_1_2();
// take time snap before multiplication
high_resolution_clock::time_point t1 = high_resolution_clock::now();
//CPU Multiplication MAtrix_1 * Matrix_2
cudaError_t cudaStatus = MultiplyWithCuda();
// take time snap after multiplication
high_resolution_clock::time_point t2 = high_resolution_clock::now();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// print the Time taken to Multiply two Matrices
auto duration = duration_cast<microseconds>(t2 - t1).count();
cout << "Multiplication Time CPU(us):" << duration << "\n";
//print the output matrix for testing
// for (int i = 0; i < no_of_cols_1; i++) {
// for (int j = 0; j < no_of_rows_2; j++)
// {
// cout << Matrix_output[i][j] << " ";
// }
// cout << "\n";
// }
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// this function read the two matrices from two files with the dimensions given in the global variables set at the begining of the file
void ReadMatrix_1_2(void) {
int x, y;
ifstream in_1("Matrix_1.txt", std::ifstream::in);
ifstream in_2("Matrix_2.txt", std::ifstream::in);
if (!in_1 || !in_2) {
cout << "Error! Cannot open file.\n";
return;
}
else if (no_of_cols_1 != no_of_rows_2) {
cout << "Error! Matrix dimensions is not valid for multiplication.\n";
return;
}
for (y = 0; y < no_of_cols_1; y++) {
for (x = 0; x < no_of_rows_1; x++) {
in_1 >> Matrix_1[x][y];
}
}
for (y = 0; y < no_of_cols_2; y++) {
for (x = 0; x < no_of_rows_2; x++) {
in_2 >> Matrix_2[x][y];
}
}
in_1.close();
in_2.close();
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t MultiplyWithCuda()
{
float *dev_a = 0; //Matrix_1
float *dev_b = 0; //Matrix_2
float *dev_c = 0; //Matrix_output
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, no_of_rows_1*no_of_cols_2* sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, no_of_rows_1*no_of_cols_1 * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, no_of_rows_2*no_of_cols_2 * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, Matrix_1, no_of_rows_1*no_of_cols_1 * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, Matrix_2, no_of_rows_2*no_of_cols_2 * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// no_of_threads to run in parralled no_of_rows_1*no_of_cols_2
int no_of_parrallel_threads = no_of_rows_1 * no_of_cols_2;
// theads and blocks definition without shared memory
dim3 threasPerBlock(no_of_parrallel_threads, no_of_parrallel_threads);
dim3 blocksPerGrid(1, 1);
if (no_of_parrallel_threads >= 32) // max number of threads per block (1024,512,64)
{
threasPerBlock.x = 32;
threasPerBlock.y = 32;
blocksPerGrid.x = ceil(double(no_of_rows_1)/ threasPerBlock.x);
blocksPerGrid.y = ceil(double(no_of_cols_2)/threasPerBlock.y);
}
// Launch a kernel on the GPU with one thread for each element.
MultiplyKernel <<< blocksPerGrid, threasPerBlock >>>(dev_c, dev_a, dev_b, no_of_rows_1 ,no_of_cols_2, no_of_cols_1);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
int output_width = no_of_parrallel_threads;
cudaStatus = cudaMemcpy(Matrix_output, dev_c, output_width * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
43751899cb195814c20fd43099dac2aec00e5969.hip | // !!! This is a file automatically generated by hipify!!!
#include "../Stopwatch.hpp"
#include <hip/hip_runtime.h>
// #include "const_common.h"
#define NUM_ELEMENTS 4096
#define CUDA_CALL(x) {
const hipError_t a = (x);
if (a != hipSuccess){
printf("\nCUDA Error: %s (err_num=%d) \n", hipGetErrorString(a), a);
hipDeviceReset();
// assert(0);
}
}
// INTERLEAVED
typedef struct
{
uint32_t a;
uint32_t b;
uint32_t c;
uint32_t d;
} INTERLEAVED_T;
typedef INTERLEAVED_T INTERLEAVED_ARRAY_T[NUM_ELEMENTS];
// NON INTERLEAVED
typedef uint32_t ARRAY_MEMBER_T[NUM_ELEMENTS];
typedef struct
{
ARRAY_MEMBER_T a;
ARRAY_MEMBER_T b;
ARRAY_MEMBER_T c;
ARRAY_MEMBER_T d;
} NON_INTERLAVED_T;
__host__ void add_test_non_interleaved_cpu(NON_INTERLAVED_T * const host_dest_ptr,
const NON_INTERLAVED_T * const host_src_ptr,
const uint32_t iter,
const uint32_t num_elements)
{
for(uint32_t tid = 0; tid < num_elements; tid++){
for(uint32_t i = 0; i < iter; i++){
host_dest_ptr->a[tid] += host_src_ptr->a[tid];
host_dest_ptr->b[tid] += host_src_ptr->b[tid];
host_dest_ptr->c[tid] += host_src_ptr->c[tid];
host_dest_ptr->d[tid] += host_src_ptr->d[tid];
}
}
}
__host__ void add_test_interleaved_cpu(INTERLEAVED_T * const host_dest_ptr,
const INTERLEAVED_T * const host_src_ptr,
const uint32_t iter,
const uint32_t num_elements)
{
for(uint32_t tid = 0; tid < num_elements; tid++){
for(uint32_t i = 0; i < iter; i++){
host_dest_ptr[tid].a += host_src_ptr[tid].a;
host_dest_ptr[tid].b += host_src_ptr[tid].b;
host_dest_ptr[tid].c += host_src_ptr[tid].c;
host_dest_ptr[tid].d += host_src_ptr[tid].d;
}
}
}
__global__ void add_kernel_interleaved(
INTERLEAVED_T * const dest_ptr,
const INTERLEAVED_T * const src_ptr,
const uint32_t iter,
const uint32_t num_elements
){
const uint32_t tid = (blockDim.x * blockIdx.x) + threadIdx.x;
if(tid < num_elements){
for(int i = 0; i < iter; i++){
dest_ptr[tid].a += src_ptr[tid].a;
dest_ptr[tid].b += src_ptr[tid].b;
dest_ptr[tid].c += src_ptr[tid].c;
dest_ptr[tid].d += src_ptr[tid].d;
}
}
}
__global__ void add_kernel_non_interleaved(
NON_INTERLAVED_T * const dest_ptr,
const NON_INTERLAVED_T * const src_ptr,
const uint32_t iter,
const uint32_t num_elements
){
const uint32_t tid = (blockDim.x * blockIdx.x) + threadIdx.x;
if(tid < num_elements){
for(int i = 0; i < iter; i++){
dest_ptr->a[tid] += src_ptr->a[tid];
dest_ptr->b[tid] += src_ptr->b[tid];
dest_ptr->c[tid] += src_ptr->c[tid];
dest_ptr->d[tid] += src_ptr->d[tid];
}
}
}
int main(int argc, char** argv){
Stopwatch stopwatch;
INTERLEAVED_T array_of_structs1[NUM_ELEMENTS];
INTERLEAVED_T array_of_structs2[NUM_ELEMENTS];
NON_INTERLAVED_T struct_of_arrays1;
NON_INTERLAVED_T struct_of_arrays2;
for(int i = 0; i < NUM_ELEMENTS; i++){
array_of_structs1[i].a = 1;
array_of_structs1[i].b = 2;
array_of_structs1[i].c = 3;
array_of_structs1[i].d = 4;
struct_of_arrays1.a[i] = 1;
struct_of_arrays1.b[i] = 2;
struct_of_arrays1.c[i] = 3;
struct_of_arrays1.d[i] = 4;
array_of_structs2[i].a = 0;
array_of_structs2[i].b = 0;
array_of_structs2[i].c = 0;
array_of_structs2[i].d = 0;
struct_of_arrays2.a[i] = 0;
struct_of_arrays2.b[i] = 0;
struct_of_arrays2.c[i] = 0;
struct_of_arrays2.d[i] = 0;
}
stopwatch.Start();
add_test_interleaved_cpu(array_of_structs2, array_of_structs1, 100, NUM_ELEMENTS);
stopwatch.Check_n_Reset("ARRAY OF STRUCTS");
add_test_non_interleaved_cpu(&struct_of_arrays2, &struct_of_arrays1, 100, NUM_ELEMENTS);
stopwatch.Check_n_Reset("STRUCT OF ARRAYS");
printf("CPU RESULTS INTERLEAVED: %d %d %d %d\n", array_of_structs2[64].a, array_of_structs2[64].b, array_of_structs2[64].c, array_of_structs2[64].d);
printf("CPU RESULTS NON INTERLEAVED: %d %d %d %d\n", struct_of_arrays2.a[64], struct_of_arrays2.b[64], struct_of_arrays2.c[64], struct_of_arrays2.d[64]);
for(int i = 0; i < NUM_ELEMENTS; i++){
array_of_structs2[i].a = 0;
array_of_structs2[i].b = 0;
array_of_structs2[i].c = 0;
array_of_structs2[i].d = 0;
struct_of_arrays2.a[i] = 0;
struct_of_arrays2.a[i] = 0;
struct_of_arrays2.a[i] = 0;
struct_of_arrays2.a[i] = 0;
}
const uint32_t num_threads = 64;
const uint32_t num_blocks = 64;
const uint32_t size_inter = NUM_ELEMENTS * sizeof(INTERLEAVED_T);
const uint32_t size_noninter = sizeof(NON_INTERLAVED_T);
// INTERLEAVED
INTERLEAVED_T * d_inter1; hipMalloc((void**)&d_inter1, size_inter);
INTERLEAVED_T * d_inter2; hipMalloc((void**)&d_inter2, size_inter);
hipDeviceSynchronize();
stopwatch.Start();
hipMemcpy(d_inter1, array_of_structs1, size_inter, hipMemcpyHostToDevice);
hipMemcpy(d_inter2, array_of_structs2, size_inter, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add_kernel_interleaved) , dim3(num_blocks), dim3(num_threads), 0, 0, array_of_structs2, array_of_structs1, 100, NUM_ELEMENTS);
hipMemcpy(array_of_structs2, d_inter2, size_inter, hipMemcpyDeviceToHost);
stopwatch.Check_n_Reset("INTERLEAVED GPU: ");
hipFree(d_inter1);
hipFree(d_inter2);
// NON INTERLEAVED
NON_INTERLAVED_T * d_noninter1; hipMalloc((void**)&d_noninter1, size_noninter);
NON_INTERLAVED_T * d_noninter2; hipMalloc((void**)&d_noninter2, size_noninter);
hipDeviceSynchronize();
stopwatch.Start();
hipMemcpy(d_noninter1, &struct_of_arrays1, size_inter, hipMemcpyHostToDevice);
hipMemcpy(d_noninter2, &struct_of_arrays2, size_inter, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add_kernel_non_interleaved) , dim3(num_blocks), dim3(num_threads), 0, 0, &struct_of_arrays2, &struct_of_arrays1, 100, NUM_ELEMENTS);
hipMemcpy(&struct_of_arrays2, d_noninter2, size_inter, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
stopwatch.Check_n_Reset("NON INTERLEAVED GPU: ");
hipFree(d_noninter1);
hipFree(d_noninter2);
for(int i = 2; i < 3; i++){
printf("GPU RESULTS INTERLEAVED: %d %d %d %d\n", array_of_structs2[i].a, array_of_structs2[i].b, array_of_structs2[i].c, array_of_structs2[i].d);
}
for(int i = 2; i < 3; i++){
printf("GPU RESULTS NON INTERLEAVED: %d %d %d %d\n", struct_of_arrays2.a[i], struct_of_arrays2.b[i], struct_of_arrays2.c[i], struct_of_arrays2.d[i]);
}
return 0;
}
| 43751899cb195814c20fd43099dac2aec00e5969.cu | #include "../Stopwatch.hpp"
#include <cuda.h>
// #include "const_common.h"
#define NUM_ELEMENTS 4096
#define CUDA_CALL(x) {
const cudaError_t a = (x);
if (a != cudaSuccess){
printf("\nCUDA Error: %s (err_num=%d) \n", cudaGetErrorString(a), a);
cudaDeviceReset();
// assert(0);
}
}
// INTERLEAVED
typedef struct
{
uint32_t a;
uint32_t b;
uint32_t c;
uint32_t d;
} INTERLEAVED_T;
typedef INTERLEAVED_T INTERLEAVED_ARRAY_T[NUM_ELEMENTS];
// NON INTERLEAVED
typedef uint32_t ARRAY_MEMBER_T[NUM_ELEMENTS];
typedef struct
{
ARRAY_MEMBER_T a;
ARRAY_MEMBER_T b;
ARRAY_MEMBER_T c;
ARRAY_MEMBER_T d;
} NON_INTERLAVED_T;
__host__ void add_test_non_interleaved_cpu(NON_INTERLAVED_T * const host_dest_ptr,
const NON_INTERLAVED_T * const host_src_ptr,
const uint32_t iter,
const uint32_t num_elements)
{
for(uint32_t tid = 0; tid < num_elements; tid++){
for(uint32_t i = 0; i < iter; i++){
host_dest_ptr->a[tid] += host_src_ptr->a[tid];
host_dest_ptr->b[tid] += host_src_ptr->b[tid];
host_dest_ptr->c[tid] += host_src_ptr->c[tid];
host_dest_ptr->d[tid] += host_src_ptr->d[tid];
}
}
}
__host__ void add_test_interleaved_cpu(INTERLEAVED_T * const host_dest_ptr,
const INTERLEAVED_T * const host_src_ptr,
const uint32_t iter,
const uint32_t num_elements)
{
for(uint32_t tid = 0; tid < num_elements; tid++){
for(uint32_t i = 0; i < iter; i++){
host_dest_ptr[tid].a += host_src_ptr[tid].a;
host_dest_ptr[tid].b += host_src_ptr[tid].b;
host_dest_ptr[tid].c += host_src_ptr[tid].c;
host_dest_ptr[tid].d += host_src_ptr[tid].d;
}
}
}
__global__ void add_kernel_interleaved(
INTERLEAVED_T * const dest_ptr,
const INTERLEAVED_T * const src_ptr,
const uint32_t iter,
const uint32_t num_elements
){
const uint32_t tid = (blockDim.x * blockIdx.x) + threadIdx.x;
if(tid < num_elements){
for(int i = 0; i < iter; i++){
dest_ptr[tid].a += src_ptr[tid].a;
dest_ptr[tid].b += src_ptr[tid].b;
dest_ptr[tid].c += src_ptr[tid].c;
dest_ptr[tid].d += src_ptr[tid].d;
}
}
}
__global__ void add_kernel_non_interleaved(
NON_INTERLAVED_T * const dest_ptr,
const NON_INTERLAVED_T * const src_ptr,
const uint32_t iter,
const uint32_t num_elements
){
const uint32_t tid = (blockDim.x * blockIdx.x) + threadIdx.x;
if(tid < num_elements){
for(int i = 0; i < iter; i++){
dest_ptr->a[tid] += src_ptr->a[tid];
dest_ptr->b[tid] += src_ptr->b[tid];
dest_ptr->c[tid] += src_ptr->c[tid];
dest_ptr->d[tid] += src_ptr->d[tid];
}
}
}
int main(int argc, char** argv){
Stopwatch stopwatch;
INTERLEAVED_T array_of_structs1[NUM_ELEMENTS];
INTERLEAVED_T array_of_structs2[NUM_ELEMENTS];
NON_INTERLAVED_T struct_of_arrays1;
NON_INTERLAVED_T struct_of_arrays2;
for(int i = 0; i < NUM_ELEMENTS; i++){
array_of_structs1[i].a = 1;
array_of_structs1[i].b = 2;
array_of_structs1[i].c = 3;
array_of_structs1[i].d = 4;
struct_of_arrays1.a[i] = 1;
struct_of_arrays1.b[i] = 2;
struct_of_arrays1.c[i] = 3;
struct_of_arrays1.d[i] = 4;
array_of_structs2[i].a = 0;
array_of_structs2[i].b = 0;
array_of_structs2[i].c = 0;
array_of_structs2[i].d = 0;
struct_of_arrays2.a[i] = 0;
struct_of_arrays2.b[i] = 0;
struct_of_arrays2.c[i] = 0;
struct_of_arrays2.d[i] = 0;
}
stopwatch.Start();
add_test_interleaved_cpu(array_of_structs2, array_of_structs1, 100, NUM_ELEMENTS);
stopwatch.Check_n_Reset("ARRAY OF STRUCTS");
add_test_non_interleaved_cpu(&struct_of_arrays2, &struct_of_arrays1, 100, NUM_ELEMENTS);
stopwatch.Check_n_Reset("STRUCT OF ARRAYS");
printf("CPU RESULTS INTERLEAVED: %d %d %d %d\n", array_of_structs2[64].a, array_of_structs2[64].b, array_of_structs2[64].c, array_of_structs2[64].d);
printf("CPU RESULTS NON INTERLEAVED: %d %d %d %d\n", struct_of_arrays2.a[64], struct_of_arrays2.b[64], struct_of_arrays2.c[64], struct_of_arrays2.d[64]);
for(int i = 0; i < NUM_ELEMENTS; i++){
array_of_structs2[i].a = 0;
array_of_structs2[i].b = 0;
array_of_structs2[i].c = 0;
array_of_structs2[i].d = 0;
struct_of_arrays2.a[i] = 0;
struct_of_arrays2.a[i] = 0;
struct_of_arrays2.a[i] = 0;
struct_of_arrays2.a[i] = 0;
}
const uint32_t num_threads = 64;
const uint32_t num_blocks = 64;
const uint32_t size_inter = NUM_ELEMENTS * sizeof(INTERLEAVED_T);
const uint32_t size_noninter = sizeof(NON_INTERLAVED_T);
// INTERLEAVED
INTERLEAVED_T * d_inter1; cudaMalloc((void**)&d_inter1, size_inter);
INTERLEAVED_T * d_inter2; cudaMalloc((void**)&d_inter2, size_inter);
cudaDeviceSynchronize();
stopwatch.Start();
cudaMemcpy(d_inter1, array_of_structs1, size_inter, cudaMemcpyHostToDevice);
cudaMemcpy(d_inter2, array_of_structs2, size_inter, cudaMemcpyHostToDevice);
add_kernel_interleaved <<<num_blocks, num_threads>>> (array_of_structs2, array_of_structs1, 100, NUM_ELEMENTS);
cudaMemcpy(array_of_structs2, d_inter2, size_inter, cudaMemcpyDeviceToHost);
stopwatch.Check_n_Reset("INTERLEAVED GPU: ");
cudaFree(d_inter1);
cudaFree(d_inter2);
// NON INTERLEAVED
NON_INTERLAVED_T * d_noninter1; cudaMalloc((void**)&d_noninter1, size_noninter);
NON_INTERLAVED_T * d_noninter2; cudaMalloc((void**)&d_noninter2, size_noninter);
cudaDeviceSynchronize();
stopwatch.Start();
cudaMemcpy(d_noninter1, &struct_of_arrays1, size_inter, cudaMemcpyHostToDevice);
cudaMemcpy(d_noninter2, &struct_of_arrays2, size_inter, cudaMemcpyHostToDevice);
add_kernel_non_interleaved <<<num_blocks, num_threads>>> (&struct_of_arrays2, &struct_of_arrays1, 100, NUM_ELEMENTS);
cudaMemcpy(&struct_of_arrays2, d_noninter2, size_inter, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
stopwatch.Check_n_Reset("NON INTERLEAVED GPU: ");
cudaFree(d_noninter1);
cudaFree(d_noninter2);
for(int i = 2; i < 3; i++){
printf("GPU RESULTS INTERLEAVED: %d %d %d %d\n", array_of_structs2[i].a, array_of_structs2[i].b, array_of_structs2[i].c, array_of_structs2[i].d);
}
for(int i = 2; i < 3; i++){
printf("GPU RESULTS NON INTERLEAVED: %d %d %d %d\n", struct_of_arrays2.a[i], struct_of_arrays2.b[i], struct_of_arrays2.c[i], struct_of_arrays2.d[i]);
}
return 0;
}
|
073cc57b441f356dc57cc89e404321b785743ee8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _DEBUG
#define NUTTY_DEBUG
#else
#undef NUTTY_DEBUG
#endif
#undef NUTTY_DEBUG
// #include <thrust/sort.h>
// #include <thrust/detail/type_traits.h>
//disable / enable optimization
#undef PROFILE
#define USE_STREAM
#define USE_OPT_RED
#define USE_OPT_SCAN
#define USE_CLIP_MASK
#define USE_HYBRID_REDUCTION
#undef USE_HYBRID_COMPACTION
#if 0
#undef USE_STREAM
#undef USE_OPT_RED
#undef USE_OPT_SCAN
#undef USE_CLIP_MASK
#undef USE_HYBRID_REDUCTION
#endif
#undef FAST_COMPACTION
#define FAST_COMP_PROCS (CTuint)64
#define FAST_COMP_LANE_SIZE 256
#include <cutil_math.h>
#include "cuKDTree.h"
static CTuint hybridChangeDepth = 17;
EventLine::EventLine(void) : toggleIndex(0), nodeIndex(NULL)
{
rawEvents = new thrust::device_vector<float>();
eventKeys = new thrust::device_vector<unsigned int>();
}
EventLine::~EventLine(void)
{
delete rawEvents;
delete eventKeys;
}
void cuKDTreeScan::SetHCDepth(uint d)
{
hybridChangeDepth = d;
}
#if 1
#include <cuda_occupancy.h>
#include "kd_scan_kernel.h"
#include "shared_kernel.h"
#include "shared_types.h"
#include <Reduce.h>
#include "nopt_reduce.h"
#include <Sort.h>
#include <Scan.h>
#include <queue>
#include <ForEach.h>
#include <Fill.h>
#include <cuda/Globals.cuh>
#include "buffer_print.h"
#include <chimera/Timer.h>
#include <fstream>
#ifdef FAST_COMPACTION
#define PATH 0
#define FUNCNAME 0
#include "generic_kernel.h"
#undef PATH
#undef FUNCNAME
#define PATH 1
#define FUNCNAME 1
#include "generic_kernel.h"
#undef PATH
#undef FUNCNAME
#define PATH 2
#define FUNCNAME 2
#include "generic_kernel.h"
#undef PATH
#undef FUNCNAME
#define PATH 3
#define FUNCNAME 3
#include "generic_kernel.h"
#undef PATH
#undef FUNCNAME
#define PATH 4
#define FUNCNAME 4
#include "generic_kernel.h"
#endif
#ifdef PROFILE
#define PROFILE_FRAMES 32
#define PROFILE_START chimera::util::HTimer timer; hipDeviceSynchronize(); timer.Start()
#define PROFILE_END hipDeviceSynchronize(); timer.Stop(); g_time += timer.GetMillis()
std::map<std::string, double> g_profileTable;
#define KERNEL_PROFILE(_name, _info) \
{ \
auto it = g_profileTable.find(std::string(#_info));\
if(it == g_profileTable.end()) { g_profileTable.insert(std::pair<std::string, double>(std::string(#_info), 0)); }\
chimera::util::HTimer timer;\
hipDeviceSynchronize();\
timer.Start();\
_name; \
hipDeviceSynchronize();\
timer.Stop();\
it = g_profileTable.find(std::string(#_info));\
it->second += timer.GetMillis();\
}
#else
#define PROFILE_START
#define PROFILE_END
#define KERNEL_PROFILE(_name, _info) _name
#endif
#undef PRINT_OUT
#ifndef _DEBUG
#undef PRINT_OUT
#endif
#ifndef PRINT_OUT
#undef PRINT_BUFFER
#undef PRINT_BUFFER_N
#undef PRINT_RAW_BUFFER
#undef PRINT_RAW_BUFFER_N
#undef ct_printf
#define PRINT_BUFFER(_name)
#define PRINT_BUFFER_N(_name, _tmp)
#define PRINT_RAW_BUFFER(_name)
#define PRINT_RAW_BUFFER_N(_name, _N)
#define ct_printf(...)
#endif
//#define NODES_GROUP_SIZE 128U
#define EVENT_GROUP_SIZE 256U
void SortEvents(EventLines* eventLine);
struct cudaErrorBuffer
{
CTuint* devMemory;
cudaErrorBuffer(void)
{
hipMalloc(&devMemory, 4 * sizeof(CTuint));
CTuint null = 0;
hipMemcpy(devMemory, &null, 4, hipMemcpyHostToDevice);
}
bool check(void)
{
CTuint hostMemory[4];
hipMemcpy(&hostMemory, devMemory, 4 * sizeof(CTuint), hipMemcpyDeviceToHost);
if(hostMemory[0])
{
__ct_printf("GOT ERROR = %d %d %d %d\n", hostMemory[0], hostMemory[1], hostMemory[2], hostMemory[3]);
//__debugbreak();
return true;
}
CTuint null = 0;
hipMemcpy(devMemory, &null, 4, hipMemcpyHostToDevice);
return false;
}
~cudaErrorBuffer(void)
{
hipFree(devMemory);
}
};
void EventLine::Resize(CTuint size)
{
if(indexedEvent.Size() >= size)
{
return;
}
size = (CTuint)(1.2 * size);
typeStartScanned.Resize(size);
scannedEventTypeStartMask.Resize(size);
scannedEventTypeStartMaskSums.Resize(size);
eventScanner.Resize(size);
typeStartScanner.Resize(size);
mask.Resize(size);
indexedEvent.Resize(size);
type.Resize(size);
nodeIndex->Resize(size);
primId.Resize(size);
ranges.Resize(size);
}
size_t EventLine::Size(void)
{
return indexedEvent.Size();
}
cuEventLine EventLine::GetPtr(CTbyte index)
{
cuEventLine events;
events.indexedEvent = indexedEvent.Begin(index)();
events.type = type.Begin(index)();
events.nodeIndex = nodeIndex->Begin(index)();
events.primId = primId.Begin(index)();
events.ranges = ranges.Begin(index)();
events.mask = mask.GetPointer();
//events.scannedEventTypeStartMask = typeStartScanner.GetPrefixSum().GetConstPointer();
events.scannedEventTypeStartMask = typeStartScanned.GetConstPointer();
//events.scannedEventTypeEndMask = scannedEventTypeEndMask.Begin()();
return events;
}
cuConstEventLine EventLine::GetConstPtr(CTbyte index)
{
cuConstEventLine events;
events.indexedEvent = indexedEvent.Begin(index)();
events.type = type.Begin(index)();
events.nodeIndex = nodeIndex->Begin(index)();
events.primId = primId.Begin(index)();
events.ranges = ranges.Begin(index)();
events.mask = mask.GetPointer();
//events.scannedEventTypeStartMask = typeStartScanner.GetPrefixSum().GetConstPointer();
events.scannedEventTypeStartMask = typeStartScanned.GetConstPointer();
//events.scannedEventTypeEndMask = scannedEventTypeEndMask.Begin()();
return events;
}
template<>
struct ShrdMemory<CTuint3>
{
__device__ CTuint3* Ptr(void)
{
extern __device__ __shared__ CTuint3 s_b4[];
return s_b4;
}
};
double g_time = 0;
void PrintEventLine(EventLine& line, CTuint l)
{
ct_printf("PrintEventLine\n");
// PRINT_BUFFER_N(line.indexedEvent[line.toggleIndex], l);
///PRINT_BUFFER_N(line.nodeIndex[line.toggleIndex], l);
// PRINT_BUFFER_N(line.prefixSum[line.toggleIndex], l);
// PRINT_BUFFER_N(line.primId[line.toggleIndex], l);
// PRINT_BUFFER_N(line.type[line.toggleIndex], l);
ct_printf("End\n");
}
template <typename Operator, typename T>
void ScanTriples(ConstTuple<3, T>& src, Tuple<3, CTuint>& scanned, Tuple<3, CTuint>& sums, CTuint N, Operator op, hipStream_t pStream)
{
static const CTuint block = 256;
ConstTuple<3, CTuint> constSums;
constSums.ts[0] = sums.ts[0];
constSums.ts[1] = sums.ts[1];
constSums.ts[2] = sums.ts[2];
CTuint grid = nutty::cuda::GetCudaGrid(N, block);
hipLaunchKernelGGL(( tripleGroupScan<block>), dim3(grid), dim3(block), 0, pStream,
src, scanned, sums, op,
N);
DEVICE_SYNC_CHECK();
CTuint sumsCount = nutty::cuda::GetCudaGrid(N, block);
if(sumsCount > 1)
{
nutty::PrefixSumOp<CTuint> _op;
hipLaunchKernelGGL(( completeScan2<1024, 3>), dim3(3), dim3(1024), 0, pStream, constSums, sums, _op, sumsCount);
DEVICE_SYNC_CHECK();
hipLaunchKernelGGL(( spreadScannedSums), dim3(grid-1), dim3(block), 0, pStream, scanned, sums, N);
DEVICE_SYNC_CHECK();
}
}
template <typename Operator, typename T>
void ScanBinaryTriples(ConstTuple<3, T>& src, Tuple<3, CTuint>& scanned, Tuple<3, CTuint>& sums, CTuint N, Operator op, hipStream_t pStream)
{
#ifdef USE_OPT_SCAN
static const CTuint block = 256;
ConstTuple<3, uchar4> _src;
_src.ts[0] = (uchar4*)src.ts[0];
_src.ts[1] = (uchar4*)src.ts[1];
_src.ts[2] = (uchar4*)src.ts[2];
ConstTuple<3, CTuint> constSums;
constSums.ts[0] = sums.ts[0];
constSums.ts[1] = sums.ts[1];
constSums.ts[2] = sums.ts[2];
Tuple<3, uint4> _scanned;
_scanned.ts[0] = (uint4*)scanned.ts[0];
_scanned.ts[1] = (uint4*)scanned.ts[1];
_scanned.ts[2] = (uint4*)scanned.ts[2];
const uint BLOCK_SIZE = 256;
const uint elemsPerBlock = 256; //more than 256?
const uint elemsPerThread = 4 * elemsPerBlock / BLOCK_SIZE;
const uint scannedElemsPerBlock = elemsPerThread * BLOCK_SIZE;
uint grid = nutty::cuda::GetCudaGrid(N, scannedElemsPerBlock);
hipLaunchKernelGGL(( binaryTripleGroupScan<block, elemsPerBlock>), dim3(grid), dim3(BLOCK_SIZE), 0, pStream,
_src, _scanned, sums, op,
N);
DEVICE_SYNC_CHECK();
if(grid > 1)
{
#if 1
nutty::PrefixSumOp<CTuint> _op;
hipLaunchKernelGGL(( completeScan2<1024, 3>), dim3(3), dim3(1024), 0, pStream, constSums, sums, _op, grid);
DEVICE_SYNC_CHECK();
#else
CTuint shrdStepElemperThread = nutty::cuda::GetCudaGrid(sumsCount, 256U);
switch(shrdStepElemperThread)
{
case 1:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 2:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 3:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 4:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 5:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 6:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 7:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 8:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 9:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 10:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 11:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 12:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
default: __ct_printf("error\n"); exit(0); break;
};
#endif
//spreadScannedSums4<<<grid-1, block, 0, pStream>>>(scanned, sums, N, scannedElemsPerBlock);
uint elems = (N - BLOCK_SIZE * elemsPerThread + (N%2));
uint g = nutty::cuda::GetCudaGrid(elems, BLOCK_SIZE);
hipLaunchKernelGGL(( spreadScannedSums4t), dim3(g), dim3(BLOCK_SIZE), 0, 0, scanned, sums, elems, scannedElemsPerBlock);
DEVICE_SYNC_CHECK();
}
#else
static const CTuint block = 128;
ConstTuple<3, CTuint> constSums;
constSums.ts[0] = sums.ts[0];
constSums.ts[1] = sums.ts[1];
constSums.ts[2] = sums.ts[2];
CTuint grid = nutty::cuda::GetCudaGrid(N, block);
const uint elemsPerThread = 1;
const uint scannedElemsPerBlock = elemsPerThread * block;
grid = nutty::cuda::GetCudaGrid(N, scannedElemsPerBlock);
hipLaunchKernelGGL(( binaryTripleGroupScanNoOpt<block>), dim3(grid), dim3(block), 0, pStream,
src, scanned, sums, op,
N);
DEVICE_SYNC_CHECK();
CTuint sumsCount = grid;
if(sumsCount > 1)
{
#if 1
nutty::PrefixSumOp<CTuint> _op;
hipLaunchKernelGGL(( completeScan2NoOpt<1024, 3>), dim3(3), dim3(1024), 0, pStream, constSums, sums, _op, sumsCount);
DEVICE_SYNC_CHECK();
#else
CTuint shrdStepElemperThread = nutty::cuda::GetCudaGrid(sumsCount, 256U);
switch(shrdStepElemperThread)
{
case 1:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 2:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 3:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 4:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 5:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 6:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 7:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 8:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 9:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 10:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 11:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
case 12:hipLaunchKernelGGL(( completeScan<256>), dim3(3), dim3(256), 0, pStream, constSums, sums, op, sumsCount); break;
default: __ct_printf("error\n"); exit(0); break;
};
#endif
hipLaunchKernelGGL(( spreadScannedSums), dim3(grid-1), dim3(block), 0, pStream, scanned, sums, N);
// const CTuint k = 2;
// CTuint elems = (N - block * elemsPerThread + (N%2)) / k;
// grid = nutty::cuda::GetCudaGrid(elems, block);
// spreadScannedSums2<<<grid, block, 0, pStream>>>(scanned, sums, elems);
DEVICE_SYNC_CHECK();
}
#endif
}
void cuKDTreeScan::InitBuffer(void)
{
CTuint primitiveCount = (CTuint)(m_orginalVertices.Size() / 3);
m_depth = (byte)min(64, max(1, (m_depth == 0xFF ? GenerateDepth(primitiveCount) : m_depth)));
m_primAABBs.Resize(primitiveCount); nutty::ZeroMem(m_primAABBs);
for(int i = 0; i < 3; ++i)
{
// m_events3[i].SetNodeIndexBuffer(&m_eventNodeIndex);
m_eventLines.eventLines[i].SetNodeIndexBuffer(&m_eventNodeIndex);
}
GrowNodeMemory();
GrowPerLevelNodeMemory(64);
GrowSplitMemory(4 * primitiveCount);
ClearBuffer();
m_dthAsyncIntCopy.Init(2);
m_dthAsyncByteCopy.Init(2);
m_dthAsyncNodesContent.Init(100);
m_gotLeaves.Resize(1);
}
void cuKDTreeScan::ClearBuffer(void)
{
nutty::ZeroMem(m_nodesBBox[0]);
nutty::ZeroMem(m_nodesBBox[1]);
nutty::ZeroMem(m_nodes_ContentCount);
nutty::ZeroMem(m_nodes_IsLeaf);
nutty::ZeroMem(m_nodes_Split);
nutty::ZeroMem(m_nodes_ContentStartAdd);
nutty::ZeroMem(m_nodes_SplitAxis);
nutty::ZeroMem(m_nodes_LeftChild);
nutty::ZeroMem(m_nodes_RightChild);
nutty::ZeroMem(m_splits_Above);
nutty::ZeroMem(m_splits_Below);
nutty::ZeroMem(m_splits_Axis);
nutty::ZeroMem(m_splits_Plane);
nutty::ZeroMem(m_leafNodesContentCount);
nutty::ZeroMem(m_leafNodesContentStart);
}
void cuKDTreeScan::GrowPerLevelNodeMemory(CTuint newSize)
{
m_activeNodesIsLeaf.Resize(newSize);
CTnodeIsLeaf_t* ptr = m_activeNodesIsLeaf.GetPointer();
hipMemcpyToSymbolAsync(d_activeNodesIsLeaf, &ptr, sizeof(CTnodeIsLeaf_t*), 0, hipMemcpyHostToDevice, m_pStream);
m_activeNodes.Resize(newSize);
m_activeNodesThisLevel.Resize(newSize);
m_newActiveNodes.Resize(newSize);
m_nodesBBox.Resize(newSize);
m_nodes_ContentStartAdd.Resize(newSize);
m_nodes_ContentCount.Resize(newSize);
m_nodes.isLeaf = m_nodes_IsLeaf.GetDevicePtr()();
m_nodes.splitAxis = m_nodes_SplitAxis.GetDevicePtr()();
m_nodes.split = m_nodes_Split.GetDevicePtr()();
m_nodes.contentStart = m_nodes_ContentStartAdd.GetDevicePtr()();
m_nodes.contentCount = m_nodes_ContentCount.GetDevicePtr()();
m_nodes.leftChild = m_nodes_LeftChild.GetDevicePtr()();
m_nodes.rightChild = m_nodes_RightChild.GetDevicePtr()();
m_nodes.nodeToLeafIndex = m_nodes_NodeIdToLeafIndex.GetDevicePtr()();
CUDA_RT_SAFE_CALLING_NO_SYNC(hipMemcpyToSymbolAsync(g_nodes, &m_nodes, sizeof(Node), 0, hipMemcpyHostToDevice, m_pStream));
}
void cuKDTreeScan::GrowNodeMemory(void)
{
size_t newSize = m_nodes_IsLeaf.Size() ? m_nodes_IsLeaf.Size() * 4 : 32;
m_nodes_IsLeaf.Resize(newSize);
m_nodes_Split.Resize(newSize);
m_nodes_NodeIdToLeafIndex.Resize(newSize);
m_nodes_SplitAxis.Resize(newSize);
m_nodes_LeftChild.Resize(newSize);
m_nodes_RightChild.Resize(newSize);
m_nodes.isLeaf = m_nodes_IsLeaf.GetDevicePtr()();
m_nodes.splitAxis = m_nodes_SplitAxis.GetDevicePtr()();
m_nodes.split = m_nodes_Split.GetDevicePtr()();
m_nodes.contentStart = m_nodes_ContentStartAdd.GetDevicePtr()();
m_nodes.contentCount = m_nodes_ContentCount.GetDevicePtr()();
m_nodes.leftChild = m_nodes_LeftChild.GetDevicePtr()();
m_nodes.rightChild = m_nodes_RightChild.GetDevicePtr()();
m_nodes.nodeToLeafIndex = m_nodes_NodeIdToLeafIndex.GetDevicePtr()();
CUDA_RT_SAFE_CALLING_NO_SYNC(hipMemcpyToSymbolAsync(g_nodes, &m_nodes, sizeof(Node), 0, hipMemcpyHostToDevice, m_pStream));
}
void cuKDTreeScan::GrowSplitMemory(CTuint eventCount)
{
m_splits_Above.Resize(eventCount);
m_splits_Below.Resize(eventCount);
m_splits_Axis.Resize(eventCount);
m_splits_Plane.Resize(eventCount);
m_splits_IndexedSplit.Resize(eventCount);
m_eventIsLeaf.Resize(eventCount);
m_splits.above = m_splits_Above.GetDevicePtr()();
m_splits.below = m_splits_Below.GetDevicePtr()();
m_splits.axis = m_splits_Axis.GetDevicePtr()();
m_splits.indexedSplit = m_splits_IndexedSplit.GetDevicePtr()();
m_splits.v = m_splits_Plane.GetDevicePtr()();
m_eventNodeIndex.Resize(eventCount);
CUDA_RT_SAFE_CALLING_NO_SYNC(hipMemcpyToSymbolAsync(g_splits, &m_splits, sizeof(Split), 0, hipMemcpyHostToDevice, m_pStream));
SplitConst splitsConst;
splitsConst.above = m_splits_Above.GetDevicePtr()();
splitsConst.below = m_splits_Below.GetDevicePtr()();
splitsConst.axis = m_splits_Axis.GetDevicePtr()();
splitsConst.indexedSplit = m_splits_IndexedSplit.GetDevicePtr()();
splitsConst.v = m_splits_Plane.GetDevicePtr()();
CUDA_RT_SAFE_CALLING_NO_SYNC(hipMemcpyToSymbolAsync(g_splitsConst, &splitsConst, sizeof(SplitConst), 0, hipMemcpyHostToDevice, m_pStream));
}
void cuKDTreeScan::PrintStatus(const char* msg /* = NULL */)
{
ct_printf("PrintStatus: %s\n", msg == NULL ? "" : msg);
PRINT_BUFFER(m_nodes_ContentCount);
PRINT_BUFFER(m_nodes_ContentStartAdd);
}
void cuKDTreeScan::ScanEventTypesTriples(CTuint eventCount)
{
CTbyte add = m_eventLines.toggleIndex;
ConstTuple<3, CTeventType_t> ptr;
ptr.ts[0] = m_eventLines.eventLines[0].type[add].GetConstPointer();
ptr.ts[1] = m_eventLines.eventLines[1].type[add].GetConstPointer();
ptr.ts[2] = m_eventLines.eventLines[2].type[add].GetConstPointer();
Tuple<3, CTuint> ptr1;
ptr1.ts[0] = m_eventLines.eventLines[0].typeStartScanned.GetPointer();
ptr1.ts[1] = m_eventLines.eventLines[1].typeStartScanned.GetPointer();
ptr1.ts[2] = m_eventLines.eventLines[2].typeStartScanned.GetPointer();
Tuple<3, CTuint> sums;
sums.ts[0] = m_eventLines.eventLines[0].scannedEventTypeStartMaskSums.GetPointer();
sums.ts[1] = m_eventLines.eventLines[1].scannedEventTypeStartMaskSums.GetPointer();
sums.ts[2] = m_eventLines.eventLines[2].scannedEventTypeStartMaskSums.GetPointer();
nutty::PrefixSumOp<CTeventType_t> op;
ScanBinaryTriples(ptr, ptr1, sums, eventCount, op, m_pStream);
// PrintBuffer(m_eventLines.eventLines[0].scannedEventTypeStartMaskSums, 7);
// PrintBuffer(m_eventLines.eventLines[0].typeStartScanned, eventCount);
// __debugbreak();
}
void cuKDTreeScan::ComputeSAH_Splits(
CTuint nodeCount,
CTuint eventCount,
const CTuint* nodesContentCount)
{
CTuint eventBlock = EVENT_GROUP_SIZE;
CTuint eventGrid = nutty::cuda::GetCudaGrid(eventCount, eventBlock);
//cuEventLineTriple tripleLine(m_events3, 0);
CTuint start = 0;
//m_pool.Reset();
//m_typeScanner.Resize(eventCount);
//m_types3.Resize(eventCount);
#if 0
for(CTbyte i = 0; i < 3; ++i)
{
// // m_eventLines.eventLines[i].Resize(eventCount);
m_eventLines.eventLines[i].ScanEventTypes(eventCount);
// //PRINT_RAW_BUFFER_N(m_eventLines.eventLines[i].typeStartScanner.GetPrefixSum(), eventCount);
// // PRINT_RAW_BUFFER(m_events3[i].tmpType);
// // OutputDebugStringA("\n");
// //nutty::ZeroMem(m_eventLines.eventLines[i].typeStartScanned);
//
}
#endif
// static EventStartScanOp<CTbyte> op0;
// for(CTbyte k = 0; k < 3; ++k)
// {
// groupScan<256U, CTbyte, CTuint, EventStartScanOp<CTbyte>> <<<eventGrid, eventBlock>>>(
// m_eventLines.eventLines[k].type[m_eventLines.toggleIndex].GetConstPointer(),
// m_eventLines.eventLines[k].typeStartScanned.GetPointer(),
// sums.GetPointer(),
// op0, eventCount);
// }
//nutty::ZeroMem(m_eventLines.eventLines[0].scannedEventTypeEndMaskSums);
ScanEventTypesTriples(eventCount);
DEVICE_SYNC_CHECK();
// PrintBuffer(m_eventLines.eventLines[0].typeStartScanned);
// PrintBuffer(m_eventLines.eventLines[1].typeStartScanned);
// PrintBuffer(m_eventLines.eventLines[2].typeStartScanned);
#if 0
for(CTbyte i = 0; i < 3; ++i)
{
nutty::HostBuffer<CTuint> tmp0(eventCount);
nutty::HostBuffer<CTuint> tmp1(eventCount);
nutty::Copy(tmp0.Begin(), m_eventLines.eventLines[i].typeStartScanner.GetPrefixSum().Begin(), m_eventLines.eventLines[i].typeStartScanner.GetPrefixSum().Begin() + eventCount);
nutty::Copy(tmp1.Begin(), m_eventLines.eventLines[i].typeStartScanned.Begin(), m_eventLines.eventLines[i].typeStartScanned.Begin() + eventCount);
for(int k = 0; k < eventCount; ++k)
{
if(tmp1[k] != tmp0[k])
{
__ct_printf("error: %d %d %d %d\n", tmp1[k], tmp0[k], k, i);
//exit(0);
const CTuint block = 512; //nutty::cuda::GetCudaBlock(N, 256U);
CTuint grid = nutty::cuda::GetCudaGrid(eventCount, block);
size_t sumSize = (eventCount % nutty::cuda::SCAN_ELEMS_PER_BLOCK) == 0 ? eventCount / nutty::cuda::SCAN_ELEMS_PER_BLOCK : (eventCount / nutty::cuda::SCAN_ELEMS_PER_BLOCK) + 1;
PRINT_RAW_BUFFER_N(m_eventLines.eventLines[i].scannedEventTypeEndMaskSums, sumSize);
PRINT_RAW_BUFFER_N(m_eventLines.eventLines[i].typeStartScanner.m_scannedSums, sumSize);
exit(0);
}
}
}
#endif
DEVICE_SYNC_CHECK();
const CTuint elemsPerThread = 1;
CTuint N = eventCount;//nutty::cuda::GetCudaGrid(eventCount, elemsPerThread);
CTuint sahBlock = EVENT_GROUP_SIZE;
CTuint sahGrid = nutty::cuda::GetCudaGrid(N, sahBlock);
// computeSAHSplits3<1, elemsPerThread><<<sahGrid, sahBlock, 0, m_pStream>>>(
// nodesContentCount,
// m_nodes_ContentStartAdd.GetConstPointer(),
// m_nodesBBox[0].GetConstPointer(),
// eventCount,
// m_eventLines.toggleIndex);
cudaComputeSAHSplits3(
nodesContentCount,
m_nodes_ContentStartAdd.GetConstPointer(),
m_nodesBBox[0].GetConstPointer(),
eventCount,
m_eventLines.toggleIndex,
sahGrid, sahBlock, m_pStream);
// computeSAHSplits3Old<<<sahGrid, sahBlock, 0, m_pStream>>>(
// nodesContentCount,
// m_nodes_ContentStartAdd.Begin()(),
// m_nodesBBox[0].Begin()(),
// eventCount,
// m_eventLines.toggleIndex);
DEVICE_SYNC_CHECK();
#if 0
for(int i = 0; i < eventCount; ++i)
{
ct_printf("%d [%d %d] id=%d Axis=%d, Plane=%f SAH=%f :: \n",
i, m_splits_Below[i], m_splits_Above[i],
m_splits_IndexedSplit[i].index,
(CTuint)m_splits_Axis[i],
m_splits_Plane[i],
(m_splits_IndexedSplit[i].sah == INVALID_SAH ? -1 : m_splits_IndexedSplit[i].sah));
//BBox bbox = m_nodesBBox[0][ m_events3[0].nodeIndex[m_events3[0].toggleIndex][i] ];
//ct_printf("%f %f %f | %f %f %f\n", bbox.m_min.x, bbox.m_min.y, bbox.m_min.z, bbox.m_max.x, bbox.m_max.y, bbox.m_max.z);
}
#endif
//start = 0;
//m_pool.Reset();
if(nodeCount == 1)
{
IndexedSAHSplit neutralSplit;
neutralSplit.index = 0;
neutralSplit.sah = FLT_MAX;
#ifdef USE_OPT_RED
ReduceOpt(m_splits_IndexedSplit.Begin(), m_splits_IndexedSplit.Begin(), eventCount, ReduceIndexedSplit(), neutralSplit, m_pStream);
//nutty::Reduce(m_splits_IndexedSplit.Begin(), m_splits_IndexedSplit.Begin() + eventCount, ReduceIndexedSplit(), neutralSplit, m_pStream);
#else
ReduceNoOpt(m_splits_IndexedSplit.Begin(), m_splits_IndexedSplit.Begin(), eventCount, ReduceIndexedSplit(), neutralSplit, m_pStream);
#endif
DEVICE_SYNC_CHECK();
}
#ifndef USE_HYBRID_REDUCTION
else if(true)
#else
else if(nodeCount < 0)
#endif
{
//m_hNodesContentCount.Resize(nodeCount);
//nutty::Copy(m_hNodesContentCount.Begin(), m_nodes_ContentCount.Begin(), nodeCount);
m_dthAsyncNodesContent.WaitForCopy();
for(CTuint i = 0; i < nodeCount; ++i)
{
CTuint cc = m_dthAsyncNodesContent[i];
CTuint length = 2 * cc;
#ifdef _DEBUG
if(cc <= MAX_ELEMENTS_PER_LEAF)
{
assert(0 && "cc <= MAX_ELEMENTS_PER_LEAF");
//start += length;
continue;
}
#endif
IndexedSAHSplit neutralSplit;
neutralSplit.index = 0;
neutralSplit.sah = FLT_MAX;
#ifdef USE_OPT_RED
//nutty::Reduce(m_splits_IndexedSplit.Begin() + start, m_splits_IndexedSplit.Begin() + start + length, ReduceIndexedSplit(), neutralSplit, m_pStream);
ReduceOpt(m_splits_IndexedSplit.Begin() + start, m_splits_IndexedSplit.Begin() + start, length, ReduceIndexedSplit(), neutralSplit, m_pStream);
#else
ReduceNoOpt(m_splits_IndexedSplit.Begin() + start, m_splits_IndexedSplit.Begin() + start, length, ReduceIndexedSplit(), neutralSplit, m_pStream);
#endif
DEVICE_SYNC_CHECK();
#ifdef PRINT_OUT
IndexedSAHSplit s = *(m_splits_IndexedSplit.Begin() + start);
std::stringstream ss;
ss << m_nodesBBox[0][i];
ct_printf("%s ", ss.str().c_str());
ct_printf("id=%d, memoryadd=%d ", s.index, start);
CTreal plane = m_splits_Plane[s.index];
CTbyte axis = m_splits_Axis[s.index];
CTuint below = m_splits_Below[s.index];
CTuint above = m_splits_Above[s.index];
ct_printf("axis=%d plane=%f sah=%f below=%d above=%d\n", (CTuint)axis, plane, s.sah, below, above);
if(IS_INVALD_SAH(s.sah))
{
for(int i = start; i < start + length; ++i)
{
ct_printf("%d [%d %d] id=%d Axis=%d, Plane=%f SAH=%f :: ",
i, m_splits_Below[i], m_splits_Above[i],
m_splits_IndexedSplit[i].index,
(CTuint)m_splits_Axis[i],
m_splits_Plane[i],
(m_splits_IndexedSplit[i].sah == INVALID_SAH ? -1 : m_splits_IndexedSplit[i].sah));
BBox bbox;// = m_nodesBBox[0][ m_events3[0].nodeIndex[m_events3[0].toggleIndex][i] ];
ct_printf("%f %f %f | %f %f %f\n", bbox.m_min.x, bbox.m_min.y, bbox.m_min.z, bbox.m_max.x, bbox.m_max.y, bbox.m_max.z);
}
__debugbreak();
}
#endif
start += length;
}
}
else
{
const CTuint blockSize = 512U;
CTuint N = nodeCount * blockSize;
CTuint reduceGrid = nodeCount;//nutty::cuda::GetCudaGrid(N, blockSize);
//cudaErrorBuffer errorBuffer;
cudaSegReduce<blockSize>(m_splits_IndexedSplit.GetPointer(), N, eventCount, reduceGrid, blockSize, m_pStream);
//segReduce<blockSize><<<reduceGrid, blockSize, 0, m_pStream>>>(m_splits_IndexedSplit.GetPointer(), N, eventCount);
DEVICE_SYNC_CHECK();
}
// for(CTuint i = 0; i < min(m_pool.GetStreamCount(), nodeCount); ++i)
// {
// nutty::cuStream& stream = m_pool.GetStream(i);
// nutty::cuEvent e = stream.RecordEvent();
// hipStreamWaitEvent(0, e.GetPointer(), 0);
// }
//
// nutty::SetDefaultStream();
}
CTuint cuKDTreeScan::CheckRangeForLeavesAndPrepareBuffer(nutty::DeviceBuffer<CTnodeIsLeaf_t>::iterator& isLeafBegin, CTuint nodeOffset, CTuint nodeRange)
{
m_leafCountScanner.Resize(nodeRange);
m_leafCountScanner.ExcBinaryScan(isLeafBegin + nodeOffset, isLeafBegin + nodeOffset + nodeRange, IsLeafOP<CTnodeIsLeaf_t>(), m_pStream);
DEVICE_SYNC_CHECK();
m_dthAsyncIntCopy.WaitForStream(m_stream);
m_dthAsyncByteCopy.WaitForStream(m_stream);
#ifndef USE_STREAM
hipDeviceSynchronize();
#endif
m_dthAsyncIntCopy.StartCopy(m_leafCountScanner.GetPrefixSum().GetConstPointer() + nodeRange - 1, 0);
m_dthAsyncByteCopy.StartCopy(isLeafBegin() + nodeOffset + nodeRange - 1, 0);
CTuint block = nutty::cuda::GetCudaBlock(nodeRange);
CTuint grid = nutty::cuda::GetCudaGrid(nodeRange, block);
DEVICE_SYNC_CHECK();
if(m_interiorCountScanned.Size() <= nodeRange)
{
m_interiorCountScanned.Resize(nodeRange);
m_maskedInteriorContent.Resize(nodeRange);
m_interiorContentScanner.Resize(nodeRange);
m_leafContentScanned.Resize(nodeRange);
}
//
// KERNEL_PROFILE(cudaCreateInteriorContentCountMasks(
// isLeafBegin() + nodeOffset,
// m_nodes_ContentCount.Begin()(),
// m_maskedInteriorContent.Begin()(), nodeRange, grid, block, m_pStream), CreateInteriorContentCountMasks);
DEVICE_SYNC_CHECK();
InteriorMaskOp op;
nutty::PrefixSumOp<CTuint> _op;
m_interiorContentScanner.ExcScanOPI(m_nodes_ContentCount.Begin(), m_nodes_ContentCount.Begin() + nodeRange, op, m_pStream);
// __ct_printf("%d\n", m);
DEVICE_SYNC_CHECK();
//brauch man nicht...
// makeOthers<<<grid, block, 0, m_pStream>>>(
//
// m_nodes_ContentStartAdd.Begin()(),
// m_interiorContentScanner.GetPrefixSum().Begin()(),
// m_leafContentScanned.Begin()(),
//
// m_leafCountScanner.GetPrefixSum().Begin()(),
// m_interiorCountScanned.Begin()(),
//
// nodeRange);
DEVICE_SYNC_CHECK();
m_dthAsyncIntCopy.WaitForCopy();
m_dthAsyncByteCopy.WaitForCopy();
CTuint leafCount = m_dthAsyncIntCopy[0] + (m_dthAsyncByteCopy[0] == 1);
DEVICE_SYNC_CHECK();
return leafCount;
}
MakeLeavesResult cuKDTreeScan::MakeLeaves(
nutty::DeviceBuffer<CTnodeIsLeaf_t>::iterator& isLeafBegin,
CTuint g_nodeOffset,
CTuint nodeOffset,
CTuint nodeCount,
CTuint eventCount,
CTuint currentLeafCount,
CTuint leafContentOffset,
CTuint initNodeToLeafIndex,
CTbyte gotLeaves)
{
CTuint leafCount = 0;
if(gotLeaves)
{
leafCount = CheckRangeForLeavesAndPrepareBuffer(isLeafBegin, nodeOffset, nodeCount);
DEVICE_SYNC_CHECK();
}
if(!leafCount)
{
MakeLeavesResult result;
result.leafCount = 0;
result.interiorPrimitiveCount = eventCount/2;
result.leafPrimitiveCount = 0;
return result;
}
m_dthAsyncIntCopy.WaitForStream(m_stream);
m_dthAsyncByteCopy.WaitForStream(m_stream);
#ifndef USE_STREAM
hipDeviceSynchronize();
#endif
//m_dthAsyncIntCopy.StartCopy(m_leafContentScanned.GetConstPointer() + nodeCount - 1, 0);
m_dthAsyncIntCopy.StartCopy(m_interiorContentScanner.GetPrefixSum().GetConstPointer() + nodeCount - 1, 0);
m_dthAsyncIntCopy.StartCopy(m_nodes_ContentCount.GetConstPointer() + nodeCount - 1, 1);
m_dthAsyncByteCopy.StartCopy(m_activeNodesIsLeaf.GetConstPointer() + nodeCount + nodeOffset - 1, 0);
m_leafNodesContentStart.Resize(currentLeafCount + leafCount);
m_leafNodesContentCount.Resize(currentLeafCount + leafCount);
const CTuint eventBlock = EVENT_GROUP_SIZE;
CTuint eventGrid = nutty::cuda::GetCudaGrid(eventCount, eventBlock);
CTuint nodeBlock = nutty::cuda::GetCudaBlock(nodeCount);
CTuint nodeGrid = nutty::cuda::GetCudaGrid(nodeCount, nodeBlock);
#if 1
// m_eventIsLeafScanner.Resize(eventCount);
// m_eventIsLeafScanner.ExcScan(m_eventIsLeaf.Begin(), m_eventIsLeaf.Begin() + eventCount, TypeOp<CTbyte>());
m_eventIsLeafScanned.Resize(eventCount);
m_eventIsLeafScannedSums.Resize(eventCount/256 + 256);
// binaryGroupScan<256><<<eventGrid, eventBlock, 0, m_pStream>>>(
// m_eventIsLeaf.GetConstPointer(), m_eventIsLeafScanned.GetPointer(), m_eventIsLeafScannedSums.GetPointer(), TypeOp<CTeventIsLeaf_t>(), eventCount);
#ifndef USE_OPT_SCAN
cudaBinaryGroupScan<256>(m_eventIsLeaf.GetConstPointer(),
m_eventIsLeafScanned.GetPointer(), m_eventIsLeafScannedSums.GetPointer(),
TypeOp<CTeventIsLeaf_t>(), eventCount, eventGrid, eventBlock, m_pStream);
DEVICE_SYNC_CHECK();
CTuint sumsCount = nutty::cuda::GetCudaGrid(eventCount, EVENT_GROUP_SIZE);
if(sumsCount > 1)
{
nutty::PrefixSumOp<CTuint> _op;
//completeScan<256><<<1, 256, 0, m_pStream>>>(m_eventIsLeafScannedSums.GetConstPointer(), m_eventIsLeafScannedSums.GetPointer(), _op, sumsCount);
cudaCompleteScan<256>(m_eventIsLeafScannedSums.GetConstPointer(), m_eventIsLeafScannedSums.GetPointer(), _op, sumsCount, m_pStream);
DEVICE_SYNC_CHECK();
cudaSpreadScannedSumsSingle(
m_eventIsLeafScanned.GetPointer(), m_eventIsLeafScannedSums.GetConstPointer(), eventCount, eventGrid-1, eventBlock, m_pStream);
}
#else
cudaBinaryGroupScan<256>(m_eventIsLeaf.GetConstPointer(),
m_eventIsLeafScanned.GetPointer(), m_eventIsLeafScannedSums.GetPointer(),
TypeOp<CTeventIsLeaf_t>(), eventCount, eventGrid, eventBlock, m_pStream);
DEVICE_SYNC_CHECK();
CTuint sumsCount = nutty::cuda::GetCudaGrid(eventCount, EVENT_GROUP_SIZE);
if(sumsCount > 1)
{
nutty::PrefixSumOp<CTuint> _op;
//completeScan<256><<<1, 256, 0, m_pStream>>>(m_eventIsLeafScannedSums.GetConstPointer(), m_eventIsLeafScannedSums.GetPointer(), _op, sumsCount);
cudaCompleteScan<256>(m_eventIsLeafScannedSums.GetConstPointer(), m_eventIsLeafScannedSums.GetPointer(), _op, sumsCount, m_pStream);
DEVICE_SYNC_CHECK();
cudaSpreadScannedSumsSingle(
m_eventIsLeafScanned.GetPointer(), m_eventIsLeafScannedSums.GetConstPointer(), eventCount, eventGrid-1, eventBlock, m_pStream);
}
#endif
#endif
DEVICE_SYNC_CHECK();
if(m_leafNodesContent.Size() < leafContentOffset + eventCount/2)
{
m_leafNodesContent.Resize(leafContentOffset + eventCount/2);
}
DEVICE_SYNC_CHECK();
cudaCompactMakeLeavesData(
isLeafBegin() + nodeOffset,
m_nodes_ContentStartAdd.GetPointer(),
m_eventIsLeafScanned.GetConstPointer(),
m_nodes_ContentCount.GetPointer(),
m_eventIsLeaf.GetPointer(),
m_leafCountScanner.GetPrefixSum().GetConstPointer(),
m_activeNodes.GetPointer(),
m_leafCountScanner.GetPrefixSum().GetConstPointer(),
m_interiorContentScanner.GetPrefixSum().GetConstPointer(),
m_nodesBBox[1].GetPointer(),
m_leafNodesContent.GetPointer(),
m_nodes_NodeIdToLeafIndex.GetPointer(),
m_newNodesContentCount.GetPointer(),
m_newNodesContentStartAdd.GetPointer(),
m_leafNodesContentStart.GetPointer(),
m_leafNodesContentCount.GetPointer(),
m_newActiveNodes.GetPointer(),
m_nodesBBox[0].GetPointer(),
g_nodeOffset,
leafContentOffset,
currentLeafCount,
nodeCount,
m_eventLines.toggleIndex,
eventCount,
eventGrid, eventBlock, m_pStream);
DEVICE_SYNC_CHECK();
m_eventLines.Toggle();
m_dthAsyncIntCopy.WaitForCopy();
m_dthAsyncByteCopy.WaitForCopy();
CTuint copyDistance = nodeCount - leafCount;
if(copyDistance)
{
CUDA_RT_SAFE_CALLING_SYNC(hipMemcpyAsync(m_nodes_ContentCount.GetPointer(), m_newNodesContentCount.GetPointer(), copyDistance * sizeof(CTuint), hipMemcpyDeviceToDevice, m_pStream));
CUDA_RT_SAFE_CALLING_SYNC(hipMemcpyAsync(m_nodes_ContentStartAdd.GetPointer(), m_newNodesContentStartAdd.GetPointer(), copyDistance * sizeof(CTuint), hipMemcpyDeviceToDevice, m_pStream));
CUDA_RT_SAFE_CALLING_SYNC(hipMemcpyAsync(m_activeNodes.GetPointer(), m_newActiveNodes.GetPointer(), copyDistance * sizeof(CTuint), hipMemcpyDeviceToDevice, m_pStream));
}
CTuint interiorPrimCount = m_dthAsyncIntCopy[0] + (m_dthAsyncByteCopy[0] == 0) * m_dthAsyncIntCopy[1];
//if((int)interiorPrimCount < 0)
{
ct_printf("interiorPrimCount = %d %d %d %d\n", interiorPrimCount, m_dthAsyncIntCopy[0], (m_dthAsyncByteCopy[0] == 0), m_dthAsyncIntCopy[1]);
// __debugbreak();
}
CTuint leafPrimCount = eventCount/2 - interiorPrimCount;
leafPrimCount = leafPrimCount > eventCount/2 ? 0 : leafPrimCount;
MakeLeavesResult result;
result.leafCount = leafCount;
result.interiorPrimitiveCount = interiorPrimCount;
result.leafPrimitiveCount = leafPrimCount;
DEVICE_SYNC_CHECK();
return result;
}
void ClipMask::Resize(size_t size, hipStream_t pStream)
{
if(mask[0].Size() >= size) return;
size = (CTuint)(1.2 * size);
//mask3.Resize(size);
mask3Scanner.Resize(size);
for(int i = 0; i < 3; ++i)
{
scannedMask[i].Resize(size);
scannedSums[i].Resize(size);
mask[i].Resize(size);
newSplits[i].Resize(size);
index[i].Resize(size);
elemsPerTile[i].Resize(size);
scannedOverlappingMasks[i].Resize(size);
scannedOverlappingSums[i].Resize(size);
// maskScanner[i].Resize(size);
}
cuClipMaskArray mm;
GetPtr(mm);
hipMemcpyToSymbolAsync(g_clipArray, &mm, sizeof(cuClipMaskArray), 0, hipMemcpyHostToDevice, pStream);
cuConstClipMask cmss[3];
GetConstPtr(cmss[0], 0);
GetConstPtr(cmss[1], 1);
GetConstPtr(cmss[2], 2);
hipMemcpyToSymbolAsync(cms, &cmss, 3 * sizeof(cuConstClipMask), 0, hipMemcpyHostToDevice, pStream);
}
void EventLine::ScanEvents(CTuint length)
{
__ct_printf("fatal error: ScanEvents not working\n");
exit(-1);
//eventScanner.ExcScan(mask.Begin(), mask.Begin() + length, nutty::PrefixSumOp<CTbyte>());
}
struct ClipMaskPrefixSum3OP
{
__device__ CTuint3 operator()(CTbyte3 elem)
{
CTuint3 v;
v.x = isSet(elem.x) ? 1 : 0;
v.y = isSet(elem.y) ? 1 : 0;
v.z = isSet(elem.z) ? 1 : 0;
return v;
}
__device__ __host__ CTbyte3 GetNeutral(void)
{
CTbyte3 v = {0};
return v;
}
};
void ClipMask::ScanMasks(CTuint length)
{
// for(CTbyte i = 0; i < 3; ++i)
// {
// maskScanner[i].ExcScan(mask[i].Begin(), mask[i].Begin() + length, ClipMaskPrefixSumOP());
// }
//mask3Scanner.ExcScan(mask3.Begin(), mask3.End(), ClipMaskPrefixSum3OP());
}
void EventLine::CompactClippedEvents(CTuint length)
{
// PREPARE_KERNEL(length)
// compactEventLine<<<grid, block>>>(GetDst(), GetSrc(), mask.Begin()(), eventScanner.GetPrefixSum().Begin()(), length);
// }
}
void EventLine::ScanEventTypes(CTuint eventCount)
{
EventStartScanOp<CTbyte> op0;
CTbyte add = toggleIndex;
typeStartScanner.ExcScan(type.Begin(add), type.Begin(add) + eventCount, op0);
}
void EventLines::BindToConstantMemory(hipStream_t pStream)
{
cuEventLineTriple src;//(eventLines, 0);
src.lines[0] = eventLines[0].GetPtr(0);
src.lines[1] = eventLines[1].GetPtr(0);
src.lines[2] = eventLines[2].GetPtr(0);
cuEventLineTriple dst;//(eventLines, 1);
dst.lines[0] = eventLines[0].GetPtr(1);
dst.lines[1] = eventLines[1].GetPtr(1);
dst.lines[2] = eventLines[2].GetPtr(1);
// hipMemcpyToSymbol(g_eventTriples, &src, sizeof(cuEventLineTriple));
// hipMemcpyToSymbol(g_eventTriples, &dst, sizeof(cuEventLineTriple), sizeof(cuEventLineTriple));
hipMemcpyToSymbolAsync(g_eventTriples, &src, sizeof(cuEventLineTriple), 0, hipMemcpyHostToDevice, pStream);
hipMemcpyToSymbolAsync(g_eventTriples, &dst, sizeof(cuEventLineTriple), sizeof(cuEventLineTriple), hipMemcpyHostToDevice, pStream);
// cuConstEventLineTriple constSrc;//(eventLines, 0);
// src.lines[0] = eventLines[0].GetPtr(0);
// src.lines[1] = eventLines[1].GetPtr(0);
// src.lines[2] = eventLines[2].GetPtr(0);
//
// hipMemcpyToSymbolAsync(g_eventSrcTriples, &constSrc, sizeof(cuConstEventLineTriple), 0, hipMemcpyHostToDevice);
// hipMemcpyToSymbolAsync(g_eventDstTriples, &dst, sizeof(cuEventLineTriple), 0, hipMemcpyHostToDevice);
}
// void EventLines::BindToggleIndexToConstantMemory(void)
// {
// CTbyte dst = ((toggleIndex+1)%2);
// hipMemcpyToSymbol(g_eventSrcIndex, &toggleIndex, sizeof(CTbyte));
// hipMemcpyToSymbol(g_eventDstIndex, &dst, sizeof(CTbyte));
// }
void cuKDTreeScan::ScanClipMaskTriples(CTuint eventCount)
{
#ifdef FAST_COMPACTION
ConstTuple<3, CTuint> ptr;
const CTuint blockSize = FAST_COMP_LANE_SIZE;
CTuint tilesPerProc = nutty::cuda::GetCudaGrid(eventCount, FAST_COMP_PROCS * FAST_COMP_LANE_SIZE);
CTuint threads = FAST_COMP_PROCS * FAST_COMP_LANE_SIZE;
CTuint grid = nutty::cuda::GetCudaGrid(threads, blockSize);
hipLaunchKernelGGL(( countValidElementsPerTile2<blockSize>), dim3(grid), dim3(blockSize), 0, m_pStream, tilesPerProc, eventCount);
// PrintBuffer(m_clipsMask.mask[0], 2 * blockSize);
// PrintBuffer(m_clipsMask.elemsPerTile[0], FAST_COMP_PROCS);
// PrintBuffer(m_clipsMask.elemsPerTile[1], FAST_COMP_PROCS);
// PrintBuffer(m_clipsMask.elemsPerTile[2], FAST_COMP_PROCS);
ptr.ts[0] = (const CTuint*)m_clipsMask.elemsPerTile[0].GetConstPointer();
ptr.ts[1] = (const CTuint*)m_clipsMask.elemsPerTile[1].GetConstPointer();
ptr.ts[2] = (const CTuint*)m_clipsMask.elemsPerTile[2].GetConstPointer();
#else
ConstTuple<3, CTclipMask_t> ptr;
ptr.ts[0] = m_clipsMask.mask[0].GetConstPointer();
ptr.ts[1] = m_clipsMask.mask[1].GetConstPointer();
ptr.ts[2] = m_clipsMask.mask[2].GetConstPointer();
#endif
Tuple<3, CTuint> ptr1;
ptr1.ts[0] = m_clipsMask.scannedMask[0].GetPointer();
ptr1.ts[1] = m_clipsMask.scannedMask[1].GetPointer();
ptr1.ts[2] = m_clipsMask.scannedMask[2].GetPointer();
Tuple<3, CTuint> sums;
sums.ts[0] = m_clipsMask.scannedSums[0].GetPointer();
sums.ts[1] = m_clipsMask.scannedSums[1].GetPointer();
sums.ts[2] = m_clipsMask.scannedSums[2].GetPointer();
#ifndef FAST_COMPACTION
ClipMaskPrefixSumOP op;
ScanBinaryTriples(ptr, ptr1, sums, eventCount, op, m_pStream);
// ClipMaskIsOverlappingOP __op;
// ptr1.ts[0] = m_clipsMask.scannedOverlappingMasks[0].GetPointer();
// ptr1.ts[1] = m_clipsMask.scannedOverlappingMasks[1].GetPointer();
// ptr1.ts[2] = m_clipsMask.scannedOverlappingMasks[2].GetPointer();
// sums.ts[0] = m_clipsMask.scannedOverlappingSums[0].GetPointer();
// sums.ts[1] = m_clipsMask.scannedOverlappingSums[1].GetPointer();
// sums.ts[2] = m_clipsMask.scannedOverlappingSums[2].GetPointer();
// ScanTriples(ptr, ptr1, sums, eventCount, __op, m_pStream);
// //PrintBuffer(m_clipsMask.scannedOverlappingMasks[0], eventCount);
// __ct_printf("scannedOverlappingMasks x: %d \n", m_clipsMask.scannedOverlappingMasks[0][eventCount-1]);
// __ct_printf("scannedOverlappingMasks y: %d \n", m_clipsMask.scannedOverlappingMasks[1][eventCount-1]);
// __ct_printf("scannedOverlappingMasks z: %d \n\n", m_clipsMask.scannedOverlappingMasks[2][eventCount-1]);
#else
nutty::PrefixSumOp<CTuint> op;
ScanTriples(ptr, ptr1, sums, FAST_COMP_PROCS, op, m_pStream);
// hipDeviceSynchronize();
/*
for(int i = 0; i < 3; ++i)
{
int cpuSum = 0;
for(int p = 0; p < eventCount; ++p)
{
cpuSum += m_clipsMask.mask[i][p] > 0;
}
int sum = 0;
for(int g = 0; g < warps; ++g)
{
sum += m_clipsMask.elemsPerTile[i][g];
}
ct_printf("cpuSum=%d sum=%d, %d + %d = %d\n",
cpuSum, sum, m_clipsMask.scannedMask[i][warps-1], m_clipsMask.elemsPerTile[i][warps-1], m_clipsMask.scannedMask[i][warps-1] + m_clipsMask.elemsPerTile[i][warps-1]);
}
PrintBuffer(m_clipsMask.elemsPerTile[0], warps);
PrintBuffer(m_clipsMask.elemsPerTile[1], warps);
PrintBuffer(m_clipsMask.elemsPerTile[2], warps);
*/
// PrintBuffer(m_clipsMask.scannedMask[0], FAST_COMP_PROCS);
// PrintBuffer(m_clipsMask.scannedMask[1], FAST_COMP_PROCS);
// PrintBuffer(m_clipsMask.scannedMask[2], FAST_COMP_PROCS);
#endif
//m_clipsMask.maskScanner[0].ExcScan(m_clipsMask.mask[0].Begin(), m_clipsMask.mask[0].Begin() + eventCount, op, m_pStream);
// m_clipsMask.maskScanner[1].ExcScan(m_clipsMask.mask[1].Begin(), m_clipsMask.mask[1].Begin() + eventCount, op, m_pStream);
// m_clipsMask.maskScanner[2].ExcScan(m_clipsMask.mask[2].Begin(), m_clipsMask.mask[2].Begin() + eventCount, op, m_pStream);
}
CT_RESULT cuKDTreeScan::Update(void)
{
float* rawEvents[3];
unsigned int* rawEventkeys[3];
CTuint primitiveCount = (CTuint)(m_currentTransformedVertices.Size() / 3);
if(!m_initialized)
{
InitBuffer();
m_initialized = true;
//CUDA_RT_SAFE_CALLING_NO_SYNC(hipDeviceSetLimit(hipLimitMallocHeapSize, 1024 * 1024 * 1024));
}
for(CTbyte i = 0; i < 3; ++i)
{
m_eventLines.eventLines[i].rawEvents->resize(2 * primitiveCount);
m_eventLines.eventLines[i].eventKeys->resize(2 * primitiveCount);
rawEventkeys[i] = m_eventLines.eventLines[i].eventKeys->data().get();
rawEvents[i] = m_eventLines.eventLines[i].rawEvents->data().get();
}
//ClearBuffer();
// static bool staticc = true;
KERNEL_PROFILE(cudaCreateTriangleAABBs(m_currentTransformedVertices.GetPointer(), m_primAABBs.GetPointer(), primitiveCount, m_pStream), init);
//PrintBuffer(m_primAABBs);
// if(staticc)
{
DEVICE_SYNC_CHECK();
static float3 max3f = {FLT_MAX, FLT_MAX, FLT_MAX};
static float3 min3f = -max3f;
BBox bboxN;
bboxN.m_min = max3f;
bboxN.m_max = min3f;
m_sceneBBox.Resize(m_primAABBs.Size()/2);
nutty::Reduce(m_sceneBBox.Begin(), m_primAABBs.Begin(), m_primAABBs.End(), ReduceBBox(), bboxN, m_pStream);
//staticc = false;
}
DEVICE_SYNC_CHECK();
CTuint elementBlock = nutty::cuda::GetCudaBlock(primitiveCount);
CTuint elementGrid = nutty::cuda::GetCudaGrid(primitiveCount, elementBlock);
m_eventLines.Resize(2 * primitiveCount, m_pStream);
#ifdef PROFILE
chimera::util::HTimer g_timer;
hipDeviceSynchronize();
g_timer.Start();
g_time = 0;
#endif
m_eventLines.toggleIndex = 0;
KERNEL_PROFILE(
cudaCreateEventsAndInit3(
m_primAABBs.GetConstPointer(),
m_sceneBBox.GetConstPointer(),
m_activeNodes.GetPointer(),
m_nodes_NodeIdToLeafIndex.GetPointer(),
m_nodes_IsLeaf.GetPointer(),
m_nodes_ContentCount.GetPointer(),
m_nodesBBox[0].GetPointer(),
rawEvents,
rawEventkeys,
primitiveCount, elementGrid, elementBlock, m_pStream)
,init
);
DEVICE_SYNC_CHECK();
KERNEL_PROFILE(SortEvents(&m_eventLines), init);
DEVICE_SYNC_CHECK();
// for(CTbyte i = 0; i < 3; ++i)
// {
// nutty::Sort(
// nutty::DevicePtr_Cast<IndexedEvent>(m_eventLines.eventLines[i].GetPtr(0).indexedEvent),
// nutty::DevicePtr_Cast<IndexedEvent>(m_eventLines.eventLines[i].GetPtr(0).indexedEvent + 2 * primitiveCount),
// EventSort(),
// m_pStream);
// }
// thrust::host_vector<float> h_vec(primitiveCount *2);
// thrust::host_vector<unsigned int> h_vecK(primitiveCount *2);
// h_vec = *m_eventLines.eventLines[2].rawEvents;
// h_vecK = *m_eventLines.eventLines[2].eventKeys;
// for(int i = 0; i < primitiveCount *2; ++i)
// {
// __ct_printf("%f %d -- ", h_vec[i], h_vecK[i]);
// }
// __ct_printf("\n\n");
// PrintBuffer(m_eventLines.eventLines[2].indexedEvent[0]);
//reorderEvent3<<<2 * elementGrid, elementBlock, 0, m_pStream>>>(2 * primitiveCount);
KERNEL_PROFILE(cudaReorderEvent3(2 * primitiveCount, 2 * elementGrid, elementBlock, rawEvents, rawEventkeys, m_pStream), init);
DEVICE_SYNC_CHECK();
CTuint g_interiorNodesCountOnThisLevel = 1;
CTuint g_currentInteriorNodesCount = 1;
CTuint g_currentLeafCount = 0;
CTuint g_leafContentOffset = 0;
CTuint g_childNodeOffset = 1;
CTuint g_nodeOffset = 0;
CTuint g_entries = 1;
CTuint eventCount = 2 * primitiveCount;
//CTuint hybridChangeDepth = (3 * m_depth) / 4; //log((2*eventCount)/3);
m_eventLines.Toggle();
CTuint eventSum = 0;
for(CTbyte d = 0; d < m_depth; ++d)
{
// static int i = 0;
//__ct_printf("New Level=%d Events=%d (Frame=%d)\n", d, eventCount, ++i);
const static CTbyte null = 0;
CUDA_RT_SAFE_CALLING_SYNC(hipMemcpyAsync(m_gotLeaves.GetPointer(), &null, sizeof(CTbyte), hipMemcpyHostToDevice, m_pStream));
CUDA_RT_SAFE_CALLING_SYNC(hipMemcpyToSymbolAsync(d_nodeOffset, &g_interiorNodesCountOnThisLevel, sizeof(CTuint), 0, hipMemcpyHostToDevice, m_pStream));
CTuint nodeCount = g_interiorNodesCountOnThisLevel;
CTuint nodeBlock = nutty::cuda::GetCudaBlock(nodeCount);
CTuint nodeGrid = nutty::cuda::GetCudaGrid(nodeCount, nodeBlock);
CTuint eventBlock = EVENT_GROUP_SIZE;//nutty::cuda::GetCudaBlock(eventCount, 256U);
CTuint eventGrid = nutty::cuda::GetCudaGrid(eventCount, eventBlock);
DEVICE_SYNC_CHECK();
#ifndef USE_HYBRID_REDUCTION
m_dthAsyncNodesContent.WaitForStream(m_stream);
m_dthAsyncNodesContent.Resize(nodeCount);
m_dthAsyncNodesContent.StartCopy(m_nodes_ContentCount.GetConstPointer(), 0, nodeCount);
#endif
#if 0
m_hNodesContentCount.Resize(nodeCount);
nutty::Copy(m_hNodesContentCount.Begin(), m_nodes_ContentCount.Begin(), nodeCount);
// PrintBuffer(m_hNodesContentCount, nodeCount);
// for(int i = 0; i < nodeCount; ++i)
// {
// if(m_hNodesContentCount[i] > 500000 || m_hNodesContentCount[i] <= MAX_ELEMENTS_PER_LEAF)
// {
// exit(0);
// }
// }
//PrintBuffer(m_nodes_ContentCount, nodeCount);
PRINT_BUFFER_N(m_nodes_ContentCount, nodeCount);
#endif
//m_pool.ClearEvents();
KERNEL_PROFILE(
ComputeSAH_Splits(
nodeCount,
eventCount,
m_nodes_ContentCount.Begin()()), ComputeSAH_Splits);
DEVICE_SYNC_CHECK();
#if 0
//grad nicht ntig...
hipLaunchKernelGGL(( makeLeafIfBadSplitOrLessThanMaxElements), dim3(nodeGrid), dim3(nodeBlock), 0, m_pStream,
m_nodes,
m_nodes_IsLeaf.GetPointer() + g_nodeOffset,
m_activeNodes.GetPointer(),
m_activeNodesIsLeaf.GetPointer(),
m_splits,
d == m_depth-1,
nodeCount);
DEVICE_SYNC_CHECK();
#endif
m_newNodesContentCount.Resize(m_nodes_ContentCount.Size());
m_newNodesContentStartAdd.Resize(m_nodes_ContentCount.Size());
m_lastNodeContentStartAdd.Resize(m_newNodesContentStartAdd.Size());
CUDA_RT_SAFE_CALLING_SYNC(hipMemcpyAsync(m_lastNodeContentStartAdd.GetPointer(), m_nodes_ContentStartAdd.GetPointer(), nodeCount * sizeof(CTuint), hipMemcpyDeviceToDevice, m_pStream));
MakeLeavesResult leavesRes; // = MakeLeaves(m_activeNodesIsLeaf.Begin(), g_nodeOffset, 0, nodeCount, eventCount, g_currentLeafCount, g_leafContentOffset, 0);
leavesRes.leafCount = 0;
leavesRes.interiorPrimitiveCount = eventCount/2;
leavesRes.leafPrimitiveCount = 0;
CTuint lastLeaves = leavesRes.leafCount;
primitiveCount = leavesRes.interiorPrimitiveCount;
if(leavesRes.leafCount) //assert(!leavesRes.leafCount && "currently not working");
{
OutputDebugStringA("leavesRes.leafCount currently not working\n");
exit(0);
}
DEVICE_SYNC_CHECK();
CTuint count = eventCount;
if(leavesRes.interiorPrimitiveCount)
{
CTuint block = EVENT_GROUP_SIZE;
CTuint grid = nutty::cuda::GetCudaGrid(count, block);
m_eventLines.Resize(2 * count, m_pStream);
m_clipsMask.Resize(2 * count, m_pStream);
// nutty::ZeroMem(m_clipsMask.mask[0]);
// nutty::ZeroMem(m_clipsMask.mask[1]);
// nutty::ZeroMem(m_clipsMask.mask[2]);
// nutty::ZeroMem(m_clipsMask.maskScanner[0].GetPrefixSum());
// nutty::ZeroMem(m_clipsMask.maskScanner[1].GetPrefixSum());
// nutty::ZeroMem(m_clipsMask.maskScanner[2].GetPrefixSum());
// CTuint tb = 32;
// CTuint tg = nutty::cuda::GetCudaGrid(count, tb);
KERNEL_PROFILE(cudaCreateClipMask(
m_nodes_ContentStartAdd.GetPointer(),
m_nodes_ContentCount.GetPointer(),
count,
m_eventLines.toggleIndex, grid, block, m_pStream), ClippingAndPartitioning);
#ifndef USE_CLIP_MASK
KERNEL_PROFILE(cudaClipEventsMask(m_nodes_ContentStartAdd.GetPointer(),
m_nodes_ContentCount.GetPointer(),
count,
m_eventLines.toggleIndex, grid, block, m_pStream), ClippingAndPartitioning);
#endif
// std::ofstream file("mask.txt", std::ios::app);
//
// for(int axis = 0; axis < 3; ++axis)
// {
// nutty::HostBuffer<CTuint> tmp(2 * count);
// nutty::Copy(tmp.Begin(), m_clipsMask.mask[axis].Begin(), m_clipsMask.mask[axis].Begin() + 2 * count);
// for(int i = 0; i < 2 * count; ++i)
// {
// file << (int)tmp[i] << " ";
// }
// file << "NA ";
// }
//file << "NL\n";
//
// createClipMask<<<grid, block, 0, m_pStream>>>(
// m_nodes_ContentStartAdd.GetPointer(),
// m_nodes_ContentCount.GetPointer(),
// count,
// m_eventLines.toggleIndex);
// clipEvents3<<<grid, block, 0, m_pStream>>>(
// m_nodes_ContentStartAdd.GetPointer(),
// m_nodes_ContentCount.GetPointer(),
// count,
// m_eventLines.toggleIndex);
// CTuint toggleSave = m_eventLines.toggleIndex;
// CTuint prefixSums[3];
// for(int k = 0; k < 3; ++k)
// {
// nutty::HostBuffer<CTuint> srcEventScan(2 * count);
// nutty::Copy(srcEventScan.Begin(), m_clipsMask.mask[k].Begin(), m_clipsMask.mask[k].Begin() + 2 * count);
// prefixSums[k] = 0;
// for(int i = 0; i < srcEventScan.Size(); ++i)
// {
// prefixSums[k] += srcEventScan[i] > 0;
// }
// }
DEVICE_SYNC_CHECK();
//m_clipsMask.ScanMasks(2 * count);
KERNEL_PROFILE(ScanClipMaskTriples(2 * count), ClippingAndPartitioning);
//m_clipsMask.mask3Scanner.ExcScan(m_clipsMask.mask3.Begin(), m_clipsMask.mask3.Begin() + 2 * count, ClipMaskPrefixSum3OP());
DEVICE_SYNC_CHECK();
#ifndef USE_STREAM
hipDeviceSynchronize();
#endif
m_dthAsyncIntCopy.WaitForStream(m_stream);
#ifndef FAST_COMPACTION
//m_dthAsyncByteCopy.WaitForStream(m_stream);
m_dthAsyncIntCopy.StartCopy((CTuint*)(m_clipsMask.scannedMask[0].GetConstPointer() + 2 * count - 1), 1);
m_dthAsyncIntCopy.StartCopy((CTuint*)(m_clipsMask.mask[0].GetPointer() + 2 * count - 1), 0);
#else
const CTuint blockSize = FAST_COMP_LANE_SIZE;
CTuint threads = FAST_COMP_PROCS * FAST_COMP_LANE_SIZE;
CTuint tilesPerProc = nutty::cuda::GetCudaGrid(2 * count, threads);
CTuint activeWarps = nutty::cuda::GetCudaGrid(2 * count, tilesPerProc * FAST_COMP_LANE_SIZE);
CTuint _grid = nutty::cuda::GetCudaGrid(threads, blockSize);
//int tiles = 2*eventCount / TILE_SIZE + (2*eventCount % TILE_SIZE == 0 ? 0 : 1);
m_dthAsyncIntCopy.StartCopy((CTuint*)(m_clipsMask.scannedMask[0].GetConstPointer() + min(activeWarps, FAST_COMP_PROCS) - 1), 1);
m_dthAsyncIntCopy.StartCopy((CTuint*)(m_clipsMask.elemsPerTile[0].GetConstPointer() + min(activeWarps, FAST_COMP_PROCS) - 1), 0);
#endif
CTuint childCount = (nodeCount - leavesRes.leafCount) * 2;
CTuint thisLevelNodesLeft = nodeCount - leavesRes.leafCount;
#ifndef FAST_COMPACTION
CTuint _block = EVENT_GROUP_SIZE;
CTuint _grid = nutty::cuda::GetCudaGrid(3 * 2 * count, block);
// compactEventLineV2<<<_grid, _block, 0, m_pStream>>>(
// 2 * count,
// m_eventLines.toggleIndex);
// chimera::util::HTimer tt;
//
// hipDeviceSynchronize();
// tt.Start();
nodeBlock = nutty::cuda::GetCudaBlock(thisLevelNodesLeft);
nodeGrid = nutty::cuda::GetCudaGrid(thisLevelNodesLeft, nodeBlock);
// if(nodeCount > 1024)
// {
// KERNEL_PROFILE(cudaCompactEventLineV2Buffered(2 * count, m_eventLines.toggleIndex, _grid, m_pStream), tb);
//
// }
// else
// {
// _grid = nutty::cuda::GetCudaGrid(3 * 2 * count, block);
// KERNEL_PROFILE(cudaCompactEventLineV2(3 * 2 * count, m_eventLines.toggleIndex, _grid, _block, m_pStream), ta);
// }
//if(nodeCount > 15)
{
#ifndef USE_HYBRID_COMPACTION
CTuint N = 3 * 2 * count;
_grid = nutty::cuda::GetCudaGrid(N, block);
KERNEL_PROFILE(cudaCompactEventLineV2(N, m_eventLines.toggleIndex, _grid, _block, m_pStream), ClippingAndPartitioning);
#else
if(d >= hybridChangeDepth)
{
__ct_printf("%d %d\n", d, hybridChangeDepth);
KERNEL_PROFILE(cudaCompactEventLineV2Buffered(2 * count, m_eventLines.toggleIndex, _grid, m_pStream), ClippingAndPartitioning);
}
else
{
CTuint N = 3 * 2 * count;
_grid = nutty::cuda::GetCudaGrid(N, block);
KERNEL_PROFILE(cudaCompactEventLineV2(N, m_eventLines.toggleIndex, _grid, _block, m_pStream), ClippingAndPartitioning);
}
#endif
}
//else
{
// _grid = nutty::cuda::GetCudaGrid(2 * count, block);
// KERNEL_PROFILE(cudaCompactEventLineV2Buffered(2 * count, m_eventLines.toggleIndex, _grid, _block, m_pStream), cudaCompactEventLineV21);
}
/* PrintBuffer(m_clipsMask.mask[0], 2 * count);*/
// Tuple<3, CTuint> ptr1;
// ptr1.ts[0] = m_clipsMask.scannedOverlappingMasks[0].GetPointer();
// ptr1.ts[1] = m_clipsMask.scannedOverlappingMasks[1].GetPointer();
// ptr1.ts[2] = m_clipsMask.scannedOverlappingMasks[2].GetPointer();
// CTuint b = EVENT_GROUP_SIZE;
//
// CTuint g = nutty::cuda::GetCudaGrid(count, block);
// insertClippedEventsSplitAxis<EVENT_GROUP_SIZE><<<g, b, 0, m_pStream>>>(ptr1, m_nodes_ContentStartAdd.GetConstPointer(), m_eventLines.toggleIndex, count);
DEVICE_SYNC_CHECK();
// hipDeviceSynchronize();
// tt.Stop();
/* __ct_printf("%f (%d)\n", tt.GetMillis(), d);*/
// nutty::cuEvent e = m_stream.RecordEvent();
// CUDA_DRIVER_SAFE_CALLING_SYNC(hipStreamWaitEvent(m_compactStreams[0].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(hipStreamWaitEvent(m_compactStreams[1].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(hipStreamWaitEvent(m_compactStreams[2].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(hipStreamWaitEvent(m_compactStreams[3].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(hipStreamWaitEvent(m_compactStreams[4].GetPointer(), e.GetPointer(), 0));
// hipEventDestroy(e.Free());
//
// optimizedcompactEventLineV3Type0<<<_grid, _block, 0, m_compactStreams[0].GetPointer()>>>(m_eventLines.toggleIndex, 2 * count);
// optimizedcompactEventLineV3Type1<<<_grid, _block, 0, m_compactStreams[1].GetPointer()>>>(m_eventLines.toggleIndex, 2 * count);
// optimizedcompactEventLineV3Type2<<<_grid, _block, 0, m_compactStreams[2].GetPointer()>>>(m_eventLines.toggleIndex, 2 * count);
// optimizedcompactEventLineV3Type3<<<_grid, _block, 0, m_compactStreams[3].GetPointer()>>>(m_eventLines.toggleIndex, 2 * count);
// optimizedcompactEventLineV3Type4<<<_grid, _block, 0, m_compactStreams[4].GetPointer()>>>(m_eventLines.toggleIndex, 2 * count);
// DEVICE_SYNC_CHECK();
//
// m_stream.WaitEvent(m_compactStreams[0].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[1].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[2].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[3].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[4].RecordEvent());
#else
//optimizedcompactEventLineV2<blockSize/32><<<_grid, blockSize>>>(m_eventLines.toggleIndex, tilesPerProc, activeWarps, 2 * count);
//optimizedcompactEventLineV3<blockSize><<<_grid, blockSize>>>(m_eventLines.toggleIndex, tilesPerProc, activeWarps, 2 * count);
// nutty::cuEvent e = m_stream.RecordEvent();
// CUDA_DRIVER_SAFE_CALLING_SYNC(hipStreamWaitEvent(m_compactStreams[0].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(hipStreamWaitEvent(m_compactStreams[1].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(hipStreamWaitEvent(m_compactStreams[2].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(hipStreamWaitEvent(m_compactStreams[3].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(hipStreamWaitEvent(m_compactStreams[4].GetPointer(), e.GetPointer(), 0));
// hipEventDestroy(e.Free());
//
// optimizedcompactEventLineV3Type0<blockSize><<<_grid, blockSize, 0, m_compactStreams[0].GetPointer()>>>(m_eventLines.toggleIndex, tilesPerProc, activeWarps, 2 * count);
// optimizedcompactEventLineV3Type1<blockSize><<<_grid, blockSize, 0, m_compactStreams[1].GetPointer()>>>(m_eventLines.toggleIndex, tilesPerProc, activeWarps, 2 * count);
// optimizedcompactEventLineV3Type2<blockSize><<<_grid, blockSize, 0, m_compactStreams[2].GetPointer()>>>(m_eventLines.toggleIndex, tilesPerProc, activeWarps, 2 * count);
// optimizedcompactEventLineV3Type3<blockSize><<<_grid, blockSize, 0, m_compactStreams[3].GetPointer()>>>(m_eventLines.toggleIndex, tilesPerProc, activeWarps, 2 * count);
// optimizedcompactEventLineV3Type4<blockSize><<<_grid, blockSize, 0, m_compactStreams[4].GetPointer()>>>(m_eventLines.toggleIndex, tilesPerProc, activeWarps, 2 * count);
// DEVICE_SYNC_CHECK();
//
// m_stream.WaitEvent(m_compactStreams[0].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[1].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[2].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[3].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[4].RecordEvent());
#endif
m_eventLines.Toggle();
g_leafContentOffset += leavesRes.leafPrimitiveCount;
// if(lastLeaves)
// {
// // setActiveNodesMask<1><<<nodeGrid, nodeBlock, 0, m_pStream>>>(
// // m_activeNodesThisLevel.Begin()(),
// // m_activeNodesIsLeaf.Begin()(),
// // m_interiorCountScanned.Begin()(),
// // 0,
// // nodeCount);
// KERNEL_PROFILE(cudaSetActiveNodesMask(
// m_activeNodesThisLevel.Begin()(),
// m_activeNodesIsLeaf.Begin()(),
// m_interiorCountScanned.Begin()(),
// 0,
// nodeCount, nodeGrid, nodeBlock, m_pStream), cudaSetActiveNodesMask);
// }
//merge into compact events?
KERNEL_PROFILE(cudaInitInteriorNodes(
m_activeNodes.GetConstPointer(),
m_activeNodesThisLevel.GetConstPointer(),
m_nodesBBox[0].GetConstPointer(),
m_nodesBBox[1].GetPointer(),
m_nodes_ContentCount.GetPointer(),
m_newNodesContentCount.GetPointer(),
m_newActiveNodes.GetPointer(),
m_activeNodesIsLeaf.GetPointer() + nodeCount,
g_childNodeOffset,
g_nodeOffset,
thisLevelNodesLeft,
m_lastNodeContentStartAdd.GetPointer(),
m_gotLeaves.GetPointer(),
m_depth == d+1,
leavesRes.leafCount,
nodeGrid, nodeBlock, m_pStream), CreateChildNodes);
CUDA_RT_SAFE_CALLING_SYNC(hipMemcpyAsync(m_activeNodes.GetPointer(), m_newActiveNodes.GetPointer(), childCount * sizeof(CTuint), hipMemcpyDeviceToDevice, m_pStream));
CUDA_RT_SAFE_CALLING_SYNC(hipMemcpyAsync(m_nodes_ContentCount.GetPointer(), m_newNodesContentCount.GetPointer(), childCount * sizeof(CTuint), hipMemcpyDeviceToDevice, m_pStream));
DEVICE_SYNC_CHECK();
m_dthAsyncIntCopy.WaitForCopy();
// #ifndef FAST_COMPACTION
// m_dthAsyncByteCopy.WaitForCopy();
// #endif
eventCount = m_dthAsyncIntCopy[1] +
#ifndef FAST_COMPACTION
isSet(m_dthAsyncIntCopy[0]);// - ccLeft; //m_clipsMask.scannedMask[0][2 * count - 1] + isSet(m_clipsMask.mask[0][2 * count - 1]);
#else
m_dthAsyncIntCopy[0];
#endif
//static uint totalEventCount = 0;
//totalEventCount += 2 * count;
//__ct_printf("%d - %f \n", totalEventCount, eventCount / (float)(2 * count));
//__ct_printf("eventCount=%d %d %d\n", eventCount, m_dthAsyncIntCopy[0], m_dthAsyncIntCopy[1]);
// if(nodeCount > 15)
// {
// for(int a = 0; a < 1; ++a)
// {
// CTuint index = m_eventLines.toggleIndex;
// // PrintBuffer(m_eventLines.eventLines[a].indexedEvent[index], eventCount);
// // PrintBuffer(m_eventLines.eventLines[a].primId[index], eventCount);
// // PrintBuffer(m_eventLines.eventLines[a].ranges[index], eventCount);
// // PrintBuffer(m_eventLines.eventLines[a].type[index], eventCount);
// PrintBuffer((*m_eventLines.eventLines[a].nodeIndex).Get(index), eventCount, "\n");
// __ct_printf("\n\n");
// }
// printf("hel");
// }
// if(eventCount == 0)
// {
// __ct_printf("cuKDTreeScan: FATAL ERROR eventCount %d \n", eventCount);
// exit(0);
// }
// PrintBuffer(m_eventLines.eventLines[0].nodeIndex->Get(m_eventLines.toggleIndex), eventCount);
// PrintBuffer(m_eventLines.eventLines[0].mask, 2 * count);
// PrintBuffer(m_clipsMask.scannedMask[0], 2 * count);
//exit(0);
m_dthAsyncByteCopy.WaitForStream(m_stream);
m_dthAsyncByteCopy.StartCopy(m_gotLeaves.GetConstPointer(), 0);
eventBlock = EVENT_GROUP_SIZE;
eventGrid = nutty::cuda::GetCudaGrid(eventCount, eventBlock);
//merge into compact events?
KERNEL_PROFILE(cudaSetEventsBelongToLeafAndSetNodeIndex(
m_activeNodesIsLeaf.GetPointer() + nodeCount,
m_eventIsLeaf.GetPointer(),
m_nodes_NodeIdToLeafIndex.GetPointer() + g_childNodeOffset,
eventCount,
2 * nodeCount,
m_eventLines.toggleIndex,
eventGrid, eventBlock, m_pStream), 0cudaSetEventsBelongToLeafAndSetNodeIndex);
DEVICE_SYNC_CHECK();
//PROFILE_END;
//if(!m_dthAsyncByteCopy[0])
{
m_interiorContentScanner.Resize(childCount);
KERNEL_PROFILE(m_interiorContentScanner.ExcScan(m_nodes_ContentCount.Begin(), m_nodes_ContentCount.Begin() + childCount, nutty::PrefixSumOp<CTuint>(), m_pStream), 0m_interiorContentScanner);
DEVICE_SYNC_CHECK();
CUDA_RT_SAFE_CALLING_SYNC(hipMemcpyAsync(
m_nodes_ContentStartAdd.GetPointer(), m_interiorContentScanner.GetPrefixSum().GetConstPointer(), childCount * sizeof(CTuint), hipMemcpyDeviceToDevice, m_pStream));
//nutty::Copy(m_nodes_ContentStartAdd.Begin(), m_interiorContentScanner.GetPrefixSum().Begin(), m_interiorContentScanner.GetPrefixSum().Begin() + childCount);
DEVICE_SYNC_CHECK();
}
m_dthAsyncByteCopy.WaitForCopy();
DEVICE_SYNC_CHECK();
KERNEL_PROFILE(leavesRes = MakeLeaves(
m_activeNodesIsLeaf.Begin(),
g_childNodeOffset,
nodeCount,
childCount,
eventCount,
g_currentLeafCount + lastLeaves,
g_leafContentOffset, 1,
m_dthAsyncByteCopy[0]), MakeLeaves);
DEVICE_SYNC_CHECK();
eventCount = 2 * leavesRes.interiorPrimitiveCount;
}
else
{
//todo
for(CTuint i = 0; i < nodeCount; ++i)
{
m_nodes_IsLeaf.Insert(g_nodeOffset + i, (CTbyte)1);
}
__ct_printf("errr not good...\n");
}
g_entries += 2 * nodeCount;
eventSum += eventCount;
g_interiorNodesCountOnThisLevel = 2 * (nodeCount - lastLeaves) - leavesRes.leafCount;
g_currentInteriorNodesCount += g_interiorNodesCountOnThisLevel;
g_nodeOffset = g_childNodeOffset;
g_childNodeOffset += 2 * (nodeCount);
//update globals
g_leafContentOffset += leavesRes.leafPrimitiveCount;
g_currentLeafCount += lastLeaves + leavesRes.leafCount;
ct_printf(
"g_nodeOffset=%d g_childNodeOffset=%d g_leafContentOffset=%d g_interiorNodesCountOnThisLevel=%d g_currentInteriorNodesCount=%d g_currentLeafCount=%d\nCreated '%d' Leaves, Interior Nodes '%d'\n",
g_nodeOffset, g_childNodeOffset, g_leafContentOffset, g_interiorNodesCountOnThisLevel, g_currentInteriorNodesCount, g_currentLeafCount, lastLeaves + leavesRes.leafCount, g_interiorNodesCountOnThisLevel);
DEVICE_SYNC_CHECK();
if(!leavesRes.leafCount)
{
hipMemcpyAsync(m_nodesBBox[0].GetPointer(), m_nodesBBox[1].GetConstPointer(), g_interiorNodesCountOnThisLevel * sizeof(BBox), hipMemcpyDeviceToDevice, m_pStream);
//nutty::Copy(m_nodesBBox[0].Begin(), m_nodesBBox[1].Begin(), m_nodesBBox[1].Begin() + g_interiorNodesCountOnThisLevel);
}
if(eventCount == 0 || g_interiorNodesCountOnThisLevel == 0) //all nodes are leaf nodes
{
//primitiveCount = lastCnt;
break;
}
if(d < m_depth-1) //are we not done?
{
//check if we need more memory
if(eventCount > m_splits_Above.Size())
{
__ct_printf("need memory...\n");
GrowSplitMemory(2 * eventCount);
}
if(m_activeNodes.Size() < g_interiorNodesCountOnThisLevel + 2 * g_interiorNodesCountOnThisLevel)
{
__ct_printf("need memory...\n");
GrowPerLevelNodeMemory(4 * 2 * g_interiorNodesCountOnThisLevel);
}
if(m_nodes_IsLeaf.Size() < (g_childNodeOffset + 2 * g_interiorNodesCountOnThisLevel))
{
__ct_printf("need memory...\n");
GrowNodeMemory();
}
}
}
#ifdef PROFILE
static int frame = 0;
hipDeviceSynchronize();
g_timer.Stop();
__ct_printf("Total: %f, Section: %f \n", g_timer.GetMillis(), g_time);
if(frame == PROFILE_FRAMES)
{
for(auto it = g_profileTable.begin(); it != g_profileTable.end(); ++it)
{
__ct_printf("%s\n", it->first.c_str());
}
for(auto it = g_profileTable.begin(); it != g_profileTable.end(); ++it)
{
__ct_printf("%f\n", it->second / (float)PROFILE_FRAMES);
}
g_profileTable.clear();
exit(0);
}
frame++;
#endif
m_interiorNodesCount = g_currentInteriorNodesCount;
m_leafNodesCount = g_currentLeafCount;
//CTuint allNodeCount = m_interiorNodesCount + m_leafNodesCount;
#ifdef _DEBUG
// CUDA_RT_SAFE_CALLING_NO_SYNC(hipStreamSynchronize(m_pStream));
__ct_printf("%d\n", eventSum);
ct_printf("Tree Summary:\n");
PRINT_BUFFER(m_nodes_IsLeaf);
PRINT_BUFFER(m_nodes_Split);
PRINT_BUFFER(m_nodes_SplitAxis);
PRINT_BUFFER(m_nodes_LeftChild);
PRINT_BUFFER(m_nodes_RightChild);
PRINT_BUFFER(m_leafNodesContentCount);
PRINT_BUFFER(m_leafNodesContentStart);
PRINT_BUFFER(m_nodes_NodeIdToLeafIndex);
if(m_leafNodesContent.Size() < 1024)
{
PRINT_BUFFER(m_leafNodesContent);
}
else
{
ct_printf("skipping content '%d' elements...\n", m_leafNodesContent.Size());
}
#endif
DEVICE_SYNC_CHECK();
#ifdef _DEBUG
ValidateTree();
#endif
return CT_SUCCESS;
}
void cuKDTreeScan::ValidateTree(void)
{
std::queue<CTuint> queue;
queue.push(0);
while(!queue.empty())
{
CTuint node = queue.front();
queue.pop();
ct_printf("%d ", node);
if(!m_nodes_IsLeaf[node])
{
ct_printf("\n");
//assertions are happening here if we are out of bounds
CTuint left = m_nodes_LeftChild[node];
CTuint right = m_nodes_RightChild[node];
if(left < node || right < node)
{
assert(0 && "fuck");
}
queue.push(left);
queue.push(right);
}
else
{
CTuint leafIndex = m_nodes_NodeIdToLeafIndex[node];
ct_printf(" - %d\n", leafIndex);
}
}
}
#else
CT_RESULT cuKDTreeScan::Update(void)
{
return CT_INVALID_OPERATION;
}
#endif | 073cc57b441f356dc57cc89e404321b785743ee8.cu | #ifdef _DEBUG
#define NUTTY_DEBUG
#else
#undef NUTTY_DEBUG
#endif
#undef NUTTY_DEBUG
// #include <thrust/sort.h>
// #include <thrust/detail/type_traits.h>
//disable / enable optimization
#undef PROFILE
#define USE_STREAM
#define USE_OPT_RED
#define USE_OPT_SCAN
#define USE_CLIP_MASK
#define USE_HYBRID_REDUCTION
#undef USE_HYBRID_COMPACTION
#if 0
#undef USE_STREAM
#undef USE_OPT_RED
#undef USE_OPT_SCAN
#undef USE_CLIP_MASK
#undef USE_HYBRID_REDUCTION
#endif
#undef FAST_COMPACTION
#define FAST_COMP_PROCS (CTuint)64
#define FAST_COMP_LANE_SIZE 256
#include <cutil_math.h>
#include "cuKDTree.h"
static CTuint hybridChangeDepth = 17;
EventLine::EventLine(void) : toggleIndex(0), nodeIndex(NULL)
{
rawEvents = new thrust::device_vector<float>();
eventKeys = new thrust::device_vector<unsigned int>();
}
EventLine::~EventLine(void)
{
delete rawEvents;
delete eventKeys;
}
void cuKDTreeScan::SetHCDepth(uint d)
{
hybridChangeDepth = d;
}
#if 1
#include <cuda_occupancy.h>
#include "kd_scan_kernel.h"
#include "shared_kernel.h"
#include "shared_types.h"
#include <Reduce.h>
#include "nopt_reduce.h"
#include <Sort.h>
#include <Scan.h>
#include <queue>
#include <ForEach.h>
#include <Fill.h>
#include <cuda/Globals.cuh>
#include "buffer_print.h"
#include <chimera/Timer.h>
#include <fstream>
#ifdef FAST_COMPACTION
#define PATH 0
#define FUNCNAME 0
#include "generic_kernel.h"
#undef PATH
#undef FUNCNAME
#define PATH 1
#define FUNCNAME 1
#include "generic_kernel.h"
#undef PATH
#undef FUNCNAME
#define PATH 2
#define FUNCNAME 2
#include "generic_kernel.h"
#undef PATH
#undef FUNCNAME
#define PATH 3
#define FUNCNAME 3
#include "generic_kernel.h"
#undef PATH
#undef FUNCNAME
#define PATH 4
#define FUNCNAME 4
#include "generic_kernel.h"
#endif
#ifdef PROFILE
#define PROFILE_FRAMES 32
#define PROFILE_START chimera::util::HTimer timer; cudaDeviceSynchronize(); timer.Start()
#define PROFILE_END cudaDeviceSynchronize(); timer.Stop(); g_time += timer.GetMillis()
std::map<std::string, double> g_profileTable;
#define KERNEL_PROFILE(_name, _info) \
{ \
auto it = g_profileTable.find(std::string(#_info));\
if(it == g_profileTable.end()) { g_profileTable.insert(std::pair<std::string, double>(std::string(#_info), 0)); }\
chimera::util::HTimer timer;\
cudaDeviceSynchronize();\
timer.Start();\
_name; \
cudaDeviceSynchronize();\
timer.Stop();\
it = g_profileTable.find(std::string(#_info));\
it->second += timer.GetMillis();\
}
#else
#define PROFILE_START
#define PROFILE_END
#define KERNEL_PROFILE(_name, _info) _name
#endif
#undef PRINT_OUT
#ifndef _DEBUG
#undef PRINT_OUT
#endif
#ifndef PRINT_OUT
#undef PRINT_BUFFER
#undef PRINT_BUFFER_N
#undef PRINT_RAW_BUFFER
#undef PRINT_RAW_BUFFER_N
#undef ct_printf
#define PRINT_BUFFER(_name)
#define PRINT_BUFFER_N(_name, _tmp)
#define PRINT_RAW_BUFFER(_name)
#define PRINT_RAW_BUFFER_N(_name, _N)
#define ct_printf(...)
#endif
//#define NODES_GROUP_SIZE 128U
#define EVENT_GROUP_SIZE 256U
void SortEvents(EventLines* eventLine);
struct cudaErrorBuffer
{
CTuint* devMemory;
cudaErrorBuffer(void)
{
cudaMalloc(&devMemory, 4 * sizeof(CTuint));
CTuint null = 0;
cudaMemcpy(devMemory, &null, 4, cudaMemcpyHostToDevice);
}
bool check(void)
{
CTuint hostMemory[4];
cudaMemcpy(&hostMemory, devMemory, 4 * sizeof(CTuint), cudaMemcpyDeviceToHost);
if(hostMemory[0])
{
__ct_printf("GOT ERROR = %d %d %d %d\n", hostMemory[0], hostMemory[1], hostMemory[2], hostMemory[3]);
//__debugbreak();
return true;
}
CTuint null = 0;
cudaMemcpy(devMemory, &null, 4, cudaMemcpyHostToDevice);
return false;
}
~cudaErrorBuffer(void)
{
cudaFree(devMemory);
}
};
void EventLine::Resize(CTuint size)
{
if(indexedEvent.Size() >= size)
{
return;
}
size = (CTuint)(1.2 * size);
typeStartScanned.Resize(size);
scannedEventTypeStartMask.Resize(size);
scannedEventTypeStartMaskSums.Resize(size);
eventScanner.Resize(size);
typeStartScanner.Resize(size);
mask.Resize(size);
indexedEvent.Resize(size);
type.Resize(size);
nodeIndex->Resize(size);
primId.Resize(size);
ranges.Resize(size);
}
size_t EventLine::Size(void)
{
return indexedEvent.Size();
}
cuEventLine EventLine::GetPtr(CTbyte index)
{
cuEventLine events;
events.indexedEvent = indexedEvent.Begin(index)();
events.type = type.Begin(index)();
events.nodeIndex = nodeIndex->Begin(index)();
events.primId = primId.Begin(index)();
events.ranges = ranges.Begin(index)();
events.mask = mask.GetPointer();
//events.scannedEventTypeStartMask = typeStartScanner.GetPrefixSum().GetConstPointer();
events.scannedEventTypeStartMask = typeStartScanned.GetConstPointer();
//events.scannedEventTypeEndMask = scannedEventTypeEndMask.Begin()();
return events;
}
cuConstEventLine EventLine::GetConstPtr(CTbyte index)
{
cuConstEventLine events;
events.indexedEvent = indexedEvent.Begin(index)();
events.type = type.Begin(index)();
events.nodeIndex = nodeIndex->Begin(index)();
events.primId = primId.Begin(index)();
events.ranges = ranges.Begin(index)();
events.mask = mask.GetPointer();
//events.scannedEventTypeStartMask = typeStartScanner.GetPrefixSum().GetConstPointer();
events.scannedEventTypeStartMask = typeStartScanned.GetConstPointer();
//events.scannedEventTypeEndMask = scannedEventTypeEndMask.Begin()();
return events;
}
template<>
struct ShrdMemory<CTuint3>
{
__device__ CTuint3* Ptr(void)
{
extern __device__ __shared__ CTuint3 s_b4[];
return s_b4;
}
};
double g_time = 0;
void PrintEventLine(EventLine& line, CTuint l)
{
ct_printf("PrintEventLine\n");
// PRINT_BUFFER_N(line.indexedEvent[line.toggleIndex], l);
///PRINT_BUFFER_N(line.nodeIndex[line.toggleIndex], l);
// PRINT_BUFFER_N(line.prefixSum[line.toggleIndex], l);
// PRINT_BUFFER_N(line.primId[line.toggleIndex], l);
// PRINT_BUFFER_N(line.type[line.toggleIndex], l);
ct_printf("End\n");
}
template <typename Operator, typename T>
void ScanTriples(ConstTuple<3, T>& src, Tuple<3, CTuint>& scanned, Tuple<3, CTuint>& sums, CTuint N, Operator op, cudaStream_t pStream)
{
static const CTuint block = 256;
ConstTuple<3, CTuint> constSums;
constSums.ts[0] = sums.ts[0];
constSums.ts[1] = sums.ts[1];
constSums.ts[2] = sums.ts[2];
CTuint grid = nutty::cuda::GetCudaGrid(N, block);
tripleGroupScan<block><<<grid, block, 0, pStream>>>(
src, scanned, sums, op,
N);
DEVICE_SYNC_CHECK();
CTuint sumsCount = nutty::cuda::GetCudaGrid(N, block);
if(sumsCount > 1)
{
nutty::PrefixSumOp<CTuint> _op;
completeScan2<1024, 3><<<3, 1024, 0, pStream>>>(constSums, sums, _op, sumsCount);
DEVICE_SYNC_CHECK();
spreadScannedSums<<<grid-1, block, 0, pStream>>>(scanned, sums, N);
DEVICE_SYNC_CHECK();
}
}
template <typename Operator, typename T>
void ScanBinaryTriples(ConstTuple<3, T>& src, Tuple<3, CTuint>& scanned, Tuple<3, CTuint>& sums, CTuint N, Operator op, cudaStream_t pStream)
{
#ifdef USE_OPT_SCAN
static const CTuint block = 256;
ConstTuple<3, uchar4> _src;
_src.ts[0] = (uchar4*)src.ts[0];
_src.ts[1] = (uchar4*)src.ts[1];
_src.ts[2] = (uchar4*)src.ts[2];
ConstTuple<3, CTuint> constSums;
constSums.ts[0] = sums.ts[0];
constSums.ts[1] = sums.ts[1];
constSums.ts[2] = sums.ts[2];
Tuple<3, uint4> _scanned;
_scanned.ts[0] = (uint4*)scanned.ts[0];
_scanned.ts[1] = (uint4*)scanned.ts[1];
_scanned.ts[2] = (uint4*)scanned.ts[2];
const uint BLOCK_SIZE = 256;
const uint elemsPerBlock = 256; //more than 256?
const uint elemsPerThread = 4 * elemsPerBlock / BLOCK_SIZE;
const uint scannedElemsPerBlock = elemsPerThread * BLOCK_SIZE;
uint grid = nutty::cuda::GetCudaGrid(N, scannedElemsPerBlock);
binaryTripleGroupScan<block, elemsPerBlock><<<grid, BLOCK_SIZE, 0, pStream>>>(
_src, _scanned, sums, op,
N);
DEVICE_SYNC_CHECK();
if(grid > 1)
{
#if 1
nutty::PrefixSumOp<CTuint> _op;
completeScan2<1024, 3><<<3, 1024, 0, pStream>>>(constSums, sums, _op, grid);
DEVICE_SYNC_CHECK();
#else
CTuint shrdStepElemperThread = nutty::cuda::GetCudaGrid(sumsCount, 256U);
switch(shrdStepElemperThread)
{
case 1: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 2: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 3: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 4: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 5: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 6: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 7: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 8: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 9: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 10: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 11: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 12: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
default: __ct_printf("error\n"); exit(0); break;
};
#endif
//spreadScannedSums4<<<grid-1, block, 0, pStream>>>(scanned, sums, N, scannedElemsPerBlock);
uint elems = (N - BLOCK_SIZE * elemsPerThread + (N%2));
uint g = nutty::cuda::GetCudaGrid(elems, BLOCK_SIZE);
spreadScannedSums4t<<<g, BLOCK_SIZE>>>(scanned, sums, elems, scannedElemsPerBlock);
DEVICE_SYNC_CHECK();
}
#else
static const CTuint block = 128;
ConstTuple<3, CTuint> constSums;
constSums.ts[0] = sums.ts[0];
constSums.ts[1] = sums.ts[1];
constSums.ts[2] = sums.ts[2];
CTuint grid = nutty::cuda::GetCudaGrid(N, block);
const uint elemsPerThread = 1;
const uint scannedElemsPerBlock = elemsPerThread * block;
grid = nutty::cuda::GetCudaGrid(N, scannedElemsPerBlock);
binaryTripleGroupScanNoOpt<block><<<grid, block, 0, pStream>>>(
src, scanned, sums, op,
N);
DEVICE_SYNC_CHECK();
CTuint sumsCount = grid;
if(sumsCount > 1)
{
#if 1
nutty::PrefixSumOp<CTuint> _op;
completeScan2NoOpt<1024, 3><<<3, 1024, 0, pStream>>>(constSums, sums, _op, sumsCount);
DEVICE_SYNC_CHECK();
#else
CTuint shrdStepElemperThread = nutty::cuda::GetCudaGrid(sumsCount, 256U);
switch(shrdStepElemperThread)
{
case 1: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 2: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 3: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 4: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 5: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 6: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 7: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 8: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 9: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 10: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 11: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
case 12: completeScan<256><<<3, 256, 0, pStream>>>(constSums, sums, op, sumsCount); break;
default: __ct_printf("error\n"); exit(0); break;
};
#endif
spreadScannedSums<<<grid-1, block, 0, pStream>>>(scanned, sums, N);
// const CTuint k = 2;
// CTuint elems = (N - block * elemsPerThread + (N%2)) / k;
// grid = nutty::cuda::GetCudaGrid(elems, block);
// spreadScannedSums2<<<grid, block, 0, pStream>>>(scanned, sums, elems);
DEVICE_SYNC_CHECK();
}
#endif
}
void cuKDTreeScan::InitBuffer(void)
{
CTuint primitiveCount = (CTuint)(m_orginalVertices.Size() / 3);
m_depth = (byte)min(64, max(1, (m_depth == 0xFF ? GenerateDepth(primitiveCount) : m_depth)));
m_primAABBs.Resize(primitiveCount); nutty::ZeroMem(m_primAABBs);
for(int i = 0; i < 3; ++i)
{
// m_events3[i].SetNodeIndexBuffer(&m_eventNodeIndex);
m_eventLines.eventLines[i].SetNodeIndexBuffer(&m_eventNodeIndex);
}
GrowNodeMemory();
GrowPerLevelNodeMemory(64);
GrowSplitMemory(4 * primitiveCount);
ClearBuffer();
m_dthAsyncIntCopy.Init(2);
m_dthAsyncByteCopy.Init(2);
m_dthAsyncNodesContent.Init(100);
m_gotLeaves.Resize(1);
}
void cuKDTreeScan::ClearBuffer(void)
{
nutty::ZeroMem(m_nodesBBox[0]);
nutty::ZeroMem(m_nodesBBox[1]);
nutty::ZeroMem(m_nodes_ContentCount);
nutty::ZeroMem(m_nodes_IsLeaf);
nutty::ZeroMem(m_nodes_Split);
nutty::ZeroMem(m_nodes_ContentStartAdd);
nutty::ZeroMem(m_nodes_SplitAxis);
nutty::ZeroMem(m_nodes_LeftChild);
nutty::ZeroMem(m_nodes_RightChild);
nutty::ZeroMem(m_splits_Above);
nutty::ZeroMem(m_splits_Below);
nutty::ZeroMem(m_splits_Axis);
nutty::ZeroMem(m_splits_Plane);
nutty::ZeroMem(m_leafNodesContentCount);
nutty::ZeroMem(m_leafNodesContentStart);
}
void cuKDTreeScan::GrowPerLevelNodeMemory(CTuint newSize)
{
m_activeNodesIsLeaf.Resize(newSize);
CTnodeIsLeaf_t* ptr = m_activeNodesIsLeaf.GetPointer();
cudaMemcpyToSymbolAsync(d_activeNodesIsLeaf, &ptr, sizeof(CTnodeIsLeaf_t*), 0, cudaMemcpyHostToDevice, m_pStream);
m_activeNodes.Resize(newSize);
m_activeNodesThisLevel.Resize(newSize);
m_newActiveNodes.Resize(newSize);
m_nodesBBox.Resize(newSize);
m_nodes_ContentStartAdd.Resize(newSize);
m_nodes_ContentCount.Resize(newSize);
m_nodes.isLeaf = m_nodes_IsLeaf.GetDevicePtr()();
m_nodes.splitAxis = m_nodes_SplitAxis.GetDevicePtr()();
m_nodes.split = m_nodes_Split.GetDevicePtr()();
m_nodes.contentStart = m_nodes_ContentStartAdd.GetDevicePtr()();
m_nodes.contentCount = m_nodes_ContentCount.GetDevicePtr()();
m_nodes.leftChild = m_nodes_LeftChild.GetDevicePtr()();
m_nodes.rightChild = m_nodes_RightChild.GetDevicePtr()();
m_nodes.nodeToLeafIndex = m_nodes_NodeIdToLeafIndex.GetDevicePtr()();
CUDA_RT_SAFE_CALLING_NO_SYNC(cudaMemcpyToSymbolAsync(g_nodes, &m_nodes, sizeof(Node), 0, cudaMemcpyHostToDevice, m_pStream));
}
void cuKDTreeScan::GrowNodeMemory(void)
{
size_t newSize = m_nodes_IsLeaf.Size() ? m_nodes_IsLeaf.Size() * 4 : 32;
m_nodes_IsLeaf.Resize(newSize);
m_nodes_Split.Resize(newSize);
m_nodes_NodeIdToLeafIndex.Resize(newSize);
m_nodes_SplitAxis.Resize(newSize);
m_nodes_LeftChild.Resize(newSize);
m_nodes_RightChild.Resize(newSize);
m_nodes.isLeaf = m_nodes_IsLeaf.GetDevicePtr()();
m_nodes.splitAxis = m_nodes_SplitAxis.GetDevicePtr()();
m_nodes.split = m_nodes_Split.GetDevicePtr()();
m_nodes.contentStart = m_nodes_ContentStartAdd.GetDevicePtr()();
m_nodes.contentCount = m_nodes_ContentCount.GetDevicePtr()();
m_nodes.leftChild = m_nodes_LeftChild.GetDevicePtr()();
m_nodes.rightChild = m_nodes_RightChild.GetDevicePtr()();
m_nodes.nodeToLeafIndex = m_nodes_NodeIdToLeafIndex.GetDevicePtr()();
CUDA_RT_SAFE_CALLING_NO_SYNC(cudaMemcpyToSymbolAsync(g_nodes, &m_nodes, sizeof(Node), 0, cudaMemcpyHostToDevice, m_pStream));
}
void cuKDTreeScan::GrowSplitMemory(CTuint eventCount)
{
m_splits_Above.Resize(eventCount);
m_splits_Below.Resize(eventCount);
m_splits_Axis.Resize(eventCount);
m_splits_Plane.Resize(eventCount);
m_splits_IndexedSplit.Resize(eventCount);
m_eventIsLeaf.Resize(eventCount);
m_splits.above = m_splits_Above.GetDevicePtr()();
m_splits.below = m_splits_Below.GetDevicePtr()();
m_splits.axis = m_splits_Axis.GetDevicePtr()();
m_splits.indexedSplit = m_splits_IndexedSplit.GetDevicePtr()();
m_splits.v = m_splits_Plane.GetDevicePtr()();
m_eventNodeIndex.Resize(eventCount);
CUDA_RT_SAFE_CALLING_NO_SYNC(cudaMemcpyToSymbolAsync(g_splits, &m_splits, sizeof(Split), 0, cudaMemcpyHostToDevice, m_pStream));
SplitConst splitsConst;
splitsConst.above = m_splits_Above.GetDevicePtr()();
splitsConst.below = m_splits_Below.GetDevicePtr()();
splitsConst.axis = m_splits_Axis.GetDevicePtr()();
splitsConst.indexedSplit = m_splits_IndexedSplit.GetDevicePtr()();
splitsConst.v = m_splits_Plane.GetDevicePtr()();
CUDA_RT_SAFE_CALLING_NO_SYNC(cudaMemcpyToSymbolAsync(g_splitsConst, &splitsConst, sizeof(SplitConst), 0, cudaMemcpyHostToDevice, m_pStream));
}
void cuKDTreeScan::PrintStatus(const char* msg /* = NULL */)
{
ct_printf("PrintStatus: %s\n", msg == NULL ? "" : msg);
PRINT_BUFFER(m_nodes_ContentCount);
PRINT_BUFFER(m_nodes_ContentStartAdd);
}
void cuKDTreeScan::ScanEventTypesTriples(CTuint eventCount)
{
CTbyte add = m_eventLines.toggleIndex;
ConstTuple<3, CTeventType_t> ptr;
ptr.ts[0] = m_eventLines.eventLines[0].type[add].GetConstPointer();
ptr.ts[1] = m_eventLines.eventLines[1].type[add].GetConstPointer();
ptr.ts[2] = m_eventLines.eventLines[2].type[add].GetConstPointer();
Tuple<3, CTuint> ptr1;
ptr1.ts[0] = m_eventLines.eventLines[0].typeStartScanned.GetPointer();
ptr1.ts[1] = m_eventLines.eventLines[1].typeStartScanned.GetPointer();
ptr1.ts[2] = m_eventLines.eventLines[2].typeStartScanned.GetPointer();
Tuple<3, CTuint> sums;
sums.ts[0] = m_eventLines.eventLines[0].scannedEventTypeStartMaskSums.GetPointer();
sums.ts[1] = m_eventLines.eventLines[1].scannedEventTypeStartMaskSums.GetPointer();
sums.ts[2] = m_eventLines.eventLines[2].scannedEventTypeStartMaskSums.GetPointer();
nutty::PrefixSumOp<CTeventType_t> op;
ScanBinaryTriples(ptr, ptr1, sums, eventCount, op, m_pStream);
// PrintBuffer(m_eventLines.eventLines[0].scannedEventTypeStartMaskSums, 7);
// PrintBuffer(m_eventLines.eventLines[0].typeStartScanned, eventCount);
// __debugbreak();
}
void cuKDTreeScan::ComputeSAH_Splits(
CTuint nodeCount,
CTuint eventCount,
const CTuint* nodesContentCount)
{
CTuint eventBlock = EVENT_GROUP_SIZE;
CTuint eventGrid = nutty::cuda::GetCudaGrid(eventCount, eventBlock);
//cuEventLineTriple tripleLine(m_events3, 0);
CTuint start = 0;
//m_pool.Reset();
//m_typeScanner.Resize(eventCount);
//m_types3.Resize(eventCount);
#if 0
for(CTbyte i = 0; i < 3; ++i)
{
// // m_eventLines.eventLines[i].Resize(eventCount);
m_eventLines.eventLines[i].ScanEventTypes(eventCount);
// //PRINT_RAW_BUFFER_N(m_eventLines.eventLines[i].typeStartScanner.GetPrefixSum(), eventCount);
// // PRINT_RAW_BUFFER(m_events3[i].tmpType);
// // OutputDebugStringA("\n");
// //nutty::ZeroMem(m_eventLines.eventLines[i].typeStartScanned);
//
}
#endif
// static EventStartScanOp<CTbyte> op0;
// for(CTbyte k = 0; k < 3; ++k)
// {
// groupScan<256U, CTbyte, CTuint, EventStartScanOp<CTbyte>> <<<eventGrid, eventBlock>>>(
// m_eventLines.eventLines[k].type[m_eventLines.toggleIndex].GetConstPointer(),
// m_eventLines.eventLines[k].typeStartScanned.GetPointer(),
// sums.GetPointer(),
// op0, eventCount);
// }
//nutty::ZeroMem(m_eventLines.eventLines[0].scannedEventTypeEndMaskSums);
ScanEventTypesTriples(eventCount);
DEVICE_SYNC_CHECK();
// PrintBuffer(m_eventLines.eventLines[0].typeStartScanned);
// PrintBuffer(m_eventLines.eventLines[1].typeStartScanned);
// PrintBuffer(m_eventLines.eventLines[2].typeStartScanned);
#if 0
for(CTbyte i = 0; i < 3; ++i)
{
nutty::HostBuffer<CTuint> tmp0(eventCount);
nutty::HostBuffer<CTuint> tmp1(eventCount);
nutty::Copy(tmp0.Begin(), m_eventLines.eventLines[i].typeStartScanner.GetPrefixSum().Begin(), m_eventLines.eventLines[i].typeStartScanner.GetPrefixSum().Begin() + eventCount);
nutty::Copy(tmp1.Begin(), m_eventLines.eventLines[i].typeStartScanned.Begin(), m_eventLines.eventLines[i].typeStartScanned.Begin() + eventCount);
for(int k = 0; k < eventCount; ++k)
{
if(tmp1[k] != tmp0[k])
{
__ct_printf("error: %d %d %d %d\n", tmp1[k], tmp0[k], k, i);
//exit(0);
const CTuint block = 512; //nutty::cuda::GetCudaBlock(N, 256U);
CTuint grid = nutty::cuda::GetCudaGrid(eventCount, block);
size_t sumSize = (eventCount % nutty::cuda::SCAN_ELEMS_PER_BLOCK) == 0 ? eventCount / nutty::cuda::SCAN_ELEMS_PER_BLOCK : (eventCount / nutty::cuda::SCAN_ELEMS_PER_BLOCK) + 1;
PRINT_RAW_BUFFER_N(m_eventLines.eventLines[i].scannedEventTypeEndMaskSums, sumSize);
PRINT_RAW_BUFFER_N(m_eventLines.eventLines[i].typeStartScanner.m_scannedSums, sumSize);
exit(0);
}
}
}
#endif
DEVICE_SYNC_CHECK();
const CTuint elemsPerThread = 1;
CTuint N = eventCount;//nutty::cuda::GetCudaGrid(eventCount, elemsPerThread);
CTuint sahBlock = EVENT_GROUP_SIZE;
CTuint sahGrid = nutty::cuda::GetCudaGrid(N, sahBlock);
// computeSAHSplits3<1, elemsPerThread><<<sahGrid, sahBlock, 0, m_pStream>>>(
// nodesContentCount,
// m_nodes_ContentStartAdd.GetConstPointer(),
// m_nodesBBox[0].GetConstPointer(),
// eventCount,
// m_eventLines.toggleIndex);
cudaComputeSAHSplits3(
nodesContentCount,
m_nodes_ContentStartAdd.GetConstPointer(),
m_nodesBBox[0].GetConstPointer(),
eventCount,
m_eventLines.toggleIndex,
sahGrid, sahBlock, m_pStream);
// computeSAHSplits3Old<<<sahGrid, sahBlock, 0, m_pStream>>>(
// nodesContentCount,
// m_nodes_ContentStartAdd.Begin()(),
// m_nodesBBox[0].Begin()(),
// eventCount,
// m_eventLines.toggleIndex);
DEVICE_SYNC_CHECK();
#if 0
for(int i = 0; i < eventCount; ++i)
{
ct_printf("%d [%d %d] id=%d Axis=%d, Plane=%f SAH=%f :: \n",
i, m_splits_Below[i], m_splits_Above[i],
m_splits_IndexedSplit[i].index,
(CTuint)m_splits_Axis[i],
m_splits_Plane[i],
(m_splits_IndexedSplit[i].sah == INVALID_SAH ? -1 : m_splits_IndexedSplit[i].sah));
//BBox bbox = m_nodesBBox[0][ m_events3[0].nodeIndex[m_events3[0].toggleIndex][i] ];
//ct_printf("%f %f %f | %f %f %f\n", bbox.m_min.x, bbox.m_min.y, bbox.m_min.z, bbox.m_max.x, bbox.m_max.y, bbox.m_max.z);
}
#endif
//start = 0;
//m_pool.Reset();
if(nodeCount == 1)
{
IndexedSAHSplit neutralSplit;
neutralSplit.index = 0;
neutralSplit.sah = FLT_MAX;
#ifdef USE_OPT_RED
ReduceOpt(m_splits_IndexedSplit.Begin(), m_splits_IndexedSplit.Begin(), eventCount, ReduceIndexedSplit(), neutralSplit, m_pStream);
//nutty::Reduce(m_splits_IndexedSplit.Begin(), m_splits_IndexedSplit.Begin() + eventCount, ReduceIndexedSplit(), neutralSplit, m_pStream);
#else
ReduceNoOpt(m_splits_IndexedSplit.Begin(), m_splits_IndexedSplit.Begin(), eventCount, ReduceIndexedSplit(), neutralSplit, m_pStream);
#endif
DEVICE_SYNC_CHECK();
}
#ifndef USE_HYBRID_REDUCTION
else if(true)
#else
else if(nodeCount < 0)
#endif
{
//m_hNodesContentCount.Resize(nodeCount);
//nutty::Copy(m_hNodesContentCount.Begin(), m_nodes_ContentCount.Begin(), nodeCount);
m_dthAsyncNodesContent.WaitForCopy();
for(CTuint i = 0; i < nodeCount; ++i)
{
CTuint cc = m_dthAsyncNodesContent[i];
CTuint length = 2 * cc;
#ifdef _DEBUG
if(cc <= MAX_ELEMENTS_PER_LEAF)
{
assert(0 && "cc <= MAX_ELEMENTS_PER_LEAF");
//start += length;
continue;
}
#endif
IndexedSAHSplit neutralSplit;
neutralSplit.index = 0;
neutralSplit.sah = FLT_MAX;
#ifdef USE_OPT_RED
//nutty::Reduce(m_splits_IndexedSplit.Begin() + start, m_splits_IndexedSplit.Begin() + start + length, ReduceIndexedSplit(), neutralSplit, m_pStream);
ReduceOpt(m_splits_IndexedSplit.Begin() + start, m_splits_IndexedSplit.Begin() + start, length, ReduceIndexedSplit(), neutralSplit, m_pStream);
#else
ReduceNoOpt(m_splits_IndexedSplit.Begin() + start, m_splits_IndexedSplit.Begin() + start, length, ReduceIndexedSplit(), neutralSplit, m_pStream);
#endif
DEVICE_SYNC_CHECK();
#ifdef PRINT_OUT
IndexedSAHSplit s = *(m_splits_IndexedSplit.Begin() + start);
std::stringstream ss;
ss << m_nodesBBox[0][i];
ct_printf("%s ", ss.str().c_str());
ct_printf("id=%d, memoryadd=%d ", s.index, start);
CTreal plane = m_splits_Plane[s.index];
CTbyte axis = m_splits_Axis[s.index];
CTuint below = m_splits_Below[s.index];
CTuint above = m_splits_Above[s.index];
ct_printf("axis=%d plane=%f sah=%f below=%d above=%d\n", (CTuint)axis, plane, s.sah, below, above);
if(IS_INVALD_SAH(s.sah))
{
for(int i = start; i < start + length; ++i)
{
ct_printf("%d [%d %d] id=%d Axis=%d, Plane=%f SAH=%f :: ",
i, m_splits_Below[i], m_splits_Above[i],
m_splits_IndexedSplit[i].index,
(CTuint)m_splits_Axis[i],
m_splits_Plane[i],
(m_splits_IndexedSplit[i].sah == INVALID_SAH ? -1 : m_splits_IndexedSplit[i].sah));
BBox bbox;// = m_nodesBBox[0][ m_events3[0].nodeIndex[m_events3[0].toggleIndex][i] ];
ct_printf("%f %f %f | %f %f %f\n", bbox.m_min.x, bbox.m_min.y, bbox.m_min.z, bbox.m_max.x, bbox.m_max.y, bbox.m_max.z);
}
__debugbreak();
}
#endif
start += length;
}
}
else
{
const CTuint blockSize = 512U;
CTuint N = nodeCount * blockSize;
CTuint reduceGrid = nodeCount;//nutty::cuda::GetCudaGrid(N, blockSize);
//cudaErrorBuffer errorBuffer;
cudaSegReduce<blockSize>(m_splits_IndexedSplit.GetPointer(), N, eventCount, reduceGrid, blockSize, m_pStream);
//segReduce<blockSize><<<reduceGrid, blockSize, 0, m_pStream>>>(m_splits_IndexedSplit.GetPointer(), N, eventCount);
DEVICE_SYNC_CHECK();
}
// for(CTuint i = 0; i < min(m_pool.GetStreamCount(), nodeCount); ++i)
// {
// nutty::cuStream& stream = m_pool.GetStream(i);
// nutty::cuEvent e = stream.RecordEvent();
// cudaStreamWaitEvent(0, e.GetPointer(), 0);
// }
//
// nutty::SetDefaultStream();
}
CTuint cuKDTreeScan::CheckRangeForLeavesAndPrepareBuffer(nutty::DeviceBuffer<CTnodeIsLeaf_t>::iterator& isLeafBegin, CTuint nodeOffset, CTuint nodeRange)
{
m_leafCountScanner.Resize(nodeRange);
m_leafCountScanner.ExcBinaryScan(isLeafBegin + nodeOffset, isLeafBegin + nodeOffset + nodeRange, IsLeafOP<CTnodeIsLeaf_t>(), m_pStream);
DEVICE_SYNC_CHECK();
m_dthAsyncIntCopy.WaitForStream(m_stream);
m_dthAsyncByteCopy.WaitForStream(m_stream);
#ifndef USE_STREAM
cudaDeviceSynchronize();
#endif
m_dthAsyncIntCopy.StartCopy(m_leafCountScanner.GetPrefixSum().GetConstPointer() + nodeRange - 1, 0);
m_dthAsyncByteCopy.StartCopy(isLeafBegin() + nodeOffset + nodeRange - 1, 0);
CTuint block = nutty::cuda::GetCudaBlock(nodeRange);
CTuint grid = nutty::cuda::GetCudaGrid(nodeRange, block);
DEVICE_SYNC_CHECK();
if(m_interiorCountScanned.Size() <= nodeRange)
{
m_interiorCountScanned.Resize(nodeRange);
m_maskedInteriorContent.Resize(nodeRange);
m_interiorContentScanner.Resize(nodeRange);
m_leafContentScanned.Resize(nodeRange);
}
//
// KERNEL_PROFILE(cudaCreateInteriorContentCountMasks(
// isLeafBegin() + nodeOffset,
// m_nodes_ContentCount.Begin()(),
// m_maskedInteriorContent.Begin()(), nodeRange, grid, block, m_pStream), CreateInteriorContentCountMasks);
DEVICE_SYNC_CHECK();
InteriorMaskOp op;
nutty::PrefixSumOp<CTuint> _op;
m_interiorContentScanner.ExcScanOPI(m_nodes_ContentCount.Begin(), m_nodes_ContentCount.Begin() + nodeRange, op, m_pStream);
// __ct_printf("%d\n", m);
DEVICE_SYNC_CHECK();
//brauch man nicht...
// makeOthers<<<grid, block, 0, m_pStream>>>(
//
// m_nodes_ContentStartAdd.Begin()(),
// m_interiorContentScanner.GetPrefixSum().Begin()(),
// m_leafContentScanned.Begin()(),
//
// m_leafCountScanner.GetPrefixSum().Begin()(),
// m_interiorCountScanned.Begin()(),
//
// nodeRange);
DEVICE_SYNC_CHECK();
m_dthAsyncIntCopy.WaitForCopy();
m_dthAsyncByteCopy.WaitForCopy();
CTuint leafCount = m_dthAsyncIntCopy[0] + (m_dthAsyncByteCopy[0] == 1);
DEVICE_SYNC_CHECK();
return leafCount;
}
MakeLeavesResult cuKDTreeScan::MakeLeaves(
nutty::DeviceBuffer<CTnodeIsLeaf_t>::iterator& isLeafBegin,
CTuint g_nodeOffset,
CTuint nodeOffset,
CTuint nodeCount,
CTuint eventCount,
CTuint currentLeafCount,
CTuint leafContentOffset,
CTuint initNodeToLeafIndex,
CTbyte gotLeaves)
{
CTuint leafCount = 0;
if(gotLeaves)
{
leafCount = CheckRangeForLeavesAndPrepareBuffer(isLeafBegin, nodeOffset, nodeCount);
DEVICE_SYNC_CHECK();
}
if(!leafCount)
{
MakeLeavesResult result;
result.leafCount = 0;
result.interiorPrimitiveCount = eventCount/2;
result.leafPrimitiveCount = 0;
return result;
}
m_dthAsyncIntCopy.WaitForStream(m_stream);
m_dthAsyncByteCopy.WaitForStream(m_stream);
#ifndef USE_STREAM
cudaDeviceSynchronize();
#endif
//m_dthAsyncIntCopy.StartCopy(m_leafContentScanned.GetConstPointer() + nodeCount - 1, 0);
m_dthAsyncIntCopy.StartCopy(m_interiorContentScanner.GetPrefixSum().GetConstPointer() + nodeCount - 1, 0);
m_dthAsyncIntCopy.StartCopy(m_nodes_ContentCount.GetConstPointer() + nodeCount - 1, 1);
m_dthAsyncByteCopy.StartCopy(m_activeNodesIsLeaf.GetConstPointer() + nodeCount + nodeOffset - 1, 0);
m_leafNodesContentStart.Resize(currentLeafCount + leafCount);
m_leafNodesContentCount.Resize(currentLeafCount + leafCount);
const CTuint eventBlock = EVENT_GROUP_SIZE;
CTuint eventGrid = nutty::cuda::GetCudaGrid(eventCount, eventBlock);
CTuint nodeBlock = nutty::cuda::GetCudaBlock(nodeCount);
CTuint nodeGrid = nutty::cuda::GetCudaGrid(nodeCount, nodeBlock);
#if 1
// m_eventIsLeafScanner.Resize(eventCount);
// m_eventIsLeafScanner.ExcScan(m_eventIsLeaf.Begin(), m_eventIsLeaf.Begin() + eventCount, TypeOp<CTbyte>());
m_eventIsLeafScanned.Resize(eventCount);
m_eventIsLeafScannedSums.Resize(eventCount/256 + 256);
// binaryGroupScan<256><<<eventGrid, eventBlock, 0, m_pStream>>>(
// m_eventIsLeaf.GetConstPointer(), m_eventIsLeafScanned.GetPointer(), m_eventIsLeafScannedSums.GetPointer(), TypeOp<CTeventIsLeaf_t>(), eventCount);
#ifndef USE_OPT_SCAN
cudaBinaryGroupScan<256>(m_eventIsLeaf.GetConstPointer(),
m_eventIsLeafScanned.GetPointer(), m_eventIsLeafScannedSums.GetPointer(),
TypeOp<CTeventIsLeaf_t>(), eventCount, eventGrid, eventBlock, m_pStream);
DEVICE_SYNC_CHECK();
CTuint sumsCount = nutty::cuda::GetCudaGrid(eventCount, EVENT_GROUP_SIZE);
if(sumsCount > 1)
{
nutty::PrefixSumOp<CTuint> _op;
//completeScan<256><<<1, 256, 0, m_pStream>>>(m_eventIsLeafScannedSums.GetConstPointer(), m_eventIsLeafScannedSums.GetPointer(), _op, sumsCount);
cudaCompleteScan<256>(m_eventIsLeafScannedSums.GetConstPointer(), m_eventIsLeafScannedSums.GetPointer(), _op, sumsCount, m_pStream);
DEVICE_SYNC_CHECK();
cudaSpreadScannedSumsSingle(
m_eventIsLeafScanned.GetPointer(), m_eventIsLeafScannedSums.GetConstPointer(), eventCount, eventGrid-1, eventBlock, m_pStream);
}
#else
cudaBinaryGroupScan<256>(m_eventIsLeaf.GetConstPointer(),
m_eventIsLeafScanned.GetPointer(), m_eventIsLeafScannedSums.GetPointer(),
TypeOp<CTeventIsLeaf_t>(), eventCount, eventGrid, eventBlock, m_pStream);
DEVICE_SYNC_CHECK();
CTuint sumsCount = nutty::cuda::GetCudaGrid(eventCount, EVENT_GROUP_SIZE);
if(sumsCount > 1)
{
nutty::PrefixSumOp<CTuint> _op;
//completeScan<256><<<1, 256, 0, m_pStream>>>(m_eventIsLeafScannedSums.GetConstPointer(), m_eventIsLeafScannedSums.GetPointer(), _op, sumsCount);
cudaCompleteScan<256>(m_eventIsLeafScannedSums.GetConstPointer(), m_eventIsLeafScannedSums.GetPointer(), _op, sumsCount, m_pStream);
DEVICE_SYNC_CHECK();
cudaSpreadScannedSumsSingle(
m_eventIsLeafScanned.GetPointer(), m_eventIsLeafScannedSums.GetConstPointer(), eventCount, eventGrid-1, eventBlock, m_pStream);
}
#endif
#endif
DEVICE_SYNC_CHECK();
if(m_leafNodesContent.Size() < leafContentOffset + eventCount/2)
{
m_leafNodesContent.Resize(leafContentOffset + eventCount/2);
}
DEVICE_SYNC_CHECK();
cudaCompactMakeLeavesData(
isLeafBegin() + nodeOffset,
m_nodes_ContentStartAdd.GetPointer(),
m_eventIsLeafScanned.GetConstPointer(),
m_nodes_ContentCount.GetPointer(),
m_eventIsLeaf.GetPointer(),
m_leafCountScanner.GetPrefixSum().GetConstPointer(),
m_activeNodes.GetPointer(),
m_leafCountScanner.GetPrefixSum().GetConstPointer(),
m_interiorContentScanner.GetPrefixSum().GetConstPointer(),
m_nodesBBox[1].GetPointer(),
m_leafNodesContent.GetPointer(),
m_nodes_NodeIdToLeafIndex.GetPointer(),
m_newNodesContentCount.GetPointer(),
m_newNodesContentStartAdd.GetPointer(),
m_leafNodesContentStart.GetPointer(),
m_leafNodesContentCount.GetPointer(),
m_newActiveNodes.GetPointer(),
m_nodesBBox[0].GetPointer(),
g_nodeOffset,
leafContentOffset,
currentLeafCount,
nodeCount,
m_eventLines.toggleIndex,
eventCount,
eventGrid, eventBlock, m_pStream);
DEVICE_SYNC_CHECK();
m_eventLines.Toggle();
m_dthAsyncIntCopy.WaitForCopy();
m_dthAsyncByteCopy.WaitForCopy();
CTuint copyDistance = nodeCount - leafCount;
if(copyDistance)
{
CUDA_RT_SAFE_CALLING_SYNC(cudaMemcpyAsync(m_nodes_ContentCount.GetPointer(), m_newNodesContentCount.GetPointer(), copyDistance * sizeof(CTuint), cudaMemcpyDeviceToDevice, m_pStream));
CUDA_RT_SAFE_CALLING_SYNC(cudaMemcpyAsync(m_nodes_ContentStartAdd.GetPointer(), m_newNodesContentStartAdd.GetPointer(), copyDistance * sizeof(CTuint), cudaMemcpyDeviceToDevice, m_pStream));
CUDA_RT_SAFE_CALLING_SYNC(cudaMemcpyAsync(m_activeNodes.GetPointer(), m_newActiveNodes.GetPointer(), copyDistance * sizeof(CTuint), cudaMemcpyDeviceToDevice, m_pStream));
}
CTuint interiorPrimCount = m_dthAsyncIntCopy[0] + (m_dthAsyncByteCopy[0] == 0) * m_dthAsyncIntCopy[1];
//if((int)interiorPrimCount < 0)
{
ct_printf("interiorPrimCount = %d %d %d %d\n", interiorPrimCount, m_dthAsyncIntCopy[0], (m_dthAsyncByteCopy[0] == 0), m_dthAsyncIntCopy[1]);
// __debugbreak();
}
CTuint leafPrimCount = eventCount/2 - interiorPrimCount;
leafPrimCount = leafPrimCount > eventCount/2 ? 0 : leafPrimCount;
MakeLeavesResult result;
result.leafCount = leafCount;
result.interiorPrimitiveCount = interiorPrimCount;
result.leafPrimitiveCount = leafPrimCount;
DEVICE_SYNC_CHECK();
return result;
}
void ClipMask::Resize(size_t size, cudaStream_t pStream)
{
if(mask[0].Size() >= size) return;
size = (CTuint)(1.2 * size);
//mask3.Resize(size);
mask3Scanner.Resize(size);
for(int i = 0; i < 3; ++i)
{
scannedMask[i].Resize(size);
scannedSums[i].Resize(size);
mask[i].Resize(size);
newSplits[i].Resize(size);
index[i].Resize(size);
elemsPerTile[i].Resize(size);
scannedOverlappingMasks[i].Resize(size);
scannedOverlappingSums[i].Resize(size);
// maskScanner[i].Resize(size);
}
cuClipMaskArray mm;
GetPtr(mm);
cudaMemcpyToSymbolAsync(g_clipArray, &mm, sizeof(cuClipMaskArray), 0, cudaMemcpyHostToDevice, pStream);
cuConstClipMask cmss[3];
GetConstPtr(cmss[0], 0);
GetConstPtr(cmss[1], 1);
GetConstPtr(cmss[2], 2);
cudaMemcpyToSymbolAsync(cms, &cmss, 3 * sizeof(cuConstClipMask), 0, cudaMemcpyHostToDevice, pStream);
}
void EventLine::ScanEvents(CTuint length)
{
__ct_printf("fatal error: ScanEvents not working\n");
exit(-1);
//eventScanner.ExcScan(mask.Begin(), mask.Begin() + length, nutty::PrefixSumOp<CTbyte>());
}
struct ClipMaskPrefixSum3OP
{
__device__ CTuint3 operator()(CTbyte3 elem)
{
CTuint3 v;
v.x = isSet(elem.x) ? 1 : 0;
v.y = isSet(elem.y) ? 1 : 0;
v.z = isSet(elem.z) ? 1 : 0;
return v;
}
__device__ __host__ CTbyte3 GetNeutral(void)
{
CTbyte3 v = {0};
return v;
}
};
void ClipMask::ScanMasks(CTuint length)
{
// for(CTbyte i = 0; i < 3; ++i)
// {
// maskScanner[i].ExcScan(mask[i].Begin(), mask[i].Begin() + length, ClipMaskPrefixSumOP());
// }
//mask3Scanner.ExcScan(mask3.Begin(), mask3.End(), ClipMaskPrefixSum3OP());
}
void EventLine::CompactClippedEvents(CTuint length)
{
// PREPARE_KERNEL(length)
// compactEventLine<<<grid, block>>>(GetDst(), GetSrc(), mask.Begin()(), eventScanner.GetPrefixSum().Begin()(), length);
// }
}
void EventLine::ScanEventTypes(CTuint eventCount)
{
EventStartScanOp<CTbyte> op0;
CTbyte add = toggleIndex;
typeStartScanner.ExcScan(type.Begin(add), type.Begin(add) + eventCount, op0);
}
void EventLines::BindToConstantMemory(cudaStream_t pStream)
{
cuEventLineTriple src;//(eventLines, 0);
src.lines[0] = eventLines[0].GetPtr(0);
src.lines[1] = eventLines[1].GetPtr(0);
src.lines[2] = eventLines[2].GetPtr(0);
cuEventLineTriple dst;//(eventLines, 1);
dst.lines[0] = eventLines[0].GetPtr(1);
dst.lines[1] = eventLines[1].GetPtr(1);
dst.lines[2] = eventLines[2].GetPtr(1);
// cudaMemcpyToSymbol(g_eventTriples, &src, sizeof(cuEventLineTriple));
// cudaMemcpyToSymbol(g_eventTriples, &dst, sizeof(cuEventLineTriple), sizeof(cuEventLineTriple));
cudaMemcpyToSymbolAsync(g_eventTriples, &src, sizeof(cuEventLineTriple), 0, cudaMemcpyHostToDevice, pStream);
cudaMemcpyToSymbolAsync(g_eventTriples, &dst, sizeof(cuEventLineTriple), sizeof(cuEventLineTriple), cudaMemcpyHostToDevice, pStream);
// cuConstEventLineTriple constSrc;//(eventLines, 0);
// src.lines[0] = eventLines[0].GetPtr(0);
// src.lines[1] = eventLines[1].GetPtr(0);
// src.lines[2] = eventLines[2].GetPtr(0);
//
// cudaMemcpyToSymbolAsync(g_eventSrcTriples, &constSrc, sizeof(cuConstEventLineTriple), 0, cudaMemcpyHostToDevice);
// cudaMemcpyToSymbolAsync(g_eventDstTriples, &dst, sizeof(cuEventLineTriple), 0, cudaMemcpyHostToDevice);
}
// void EventLines::BindToggleIndexToConstantMemory(void)
// {
// CTbyte dst = ((toggleIndex+1)%2);
// cudaMemcpyToSymbol(g_eventSrcIndex, &toggleIndex, sizeof(CTbyte));
// cudaMemcpyToSymbol(g_eventDstIndex, &dst, sizeof(CTbyte));
// }
void cuKDTreeScan::ScanClipMaskTriples(CTuint eventCount)
{
#ifdef FAST_COMPACTION
ConstTuple<3, CTuint> ptr;
const CTuint blockSize = FAST_COMP_LANE_SIZE;
CTuint tilesPerProc = nutty::cuda::GetCudaGrid(eventCount, FAST_COMP_PROCS * FAST_COMP_LANE_SIZE);
CTuint threads = FAST_COMP_PROCS * FAST_COMP_LANE_SIZE;
CTuint grid = nutty::cuda::GetCudaGrid(threads, blockSize);
countValidElementsPerTile2<blockSize><<<grid, blockSize, 0, m_pStream>>>(tilesPerProc, eventCount);
// PrintBuffer(m_clipsMask.mask[0], 2 * blockSize);
// PrintBuffer(m_clipsMask.elemsPerTile[0], FAST_COMP_PROCS);
// PrintBuffer(m_clipsMask.elemsPerTile[1], FAST_COMP_PROCS);
// PrintBuffer(m_clipsMask.elemsPerTile[2], FAST_COMP_PROCS);
ptr.ts[0] = (const CTuint*)m_clipsMask.elemsPerTile[0].GetConstPointer();
ptr.ts[1] = (const CTuint*)m_clipsMask.elemsPerTile[1].GetConstPointer();
ptr.ts[2] = (const CTuint*)m_clipsMask.elemsPerTile[2].GetConstPointer();
#else
ConstTuple<3, CTclipMask_t> ptr;
ptr.ts[0] = m_clipsMask.mask[0].GetConstPointer();
ptr.ts[1] = m_clipsMask.mask[1].GetConstPointer();
ptr.ts[2] = m_clipsMask.mask[2].GetConstPointer();
#endif
Tuple<3, CTuint> ptr1;
ptr1.ts[0] = m_clipsMask.scannedMask[0].GetPointer();
ptr1.ts[1] = m_clipsMask.scannedMask[1].GetPointer();
ptr1.ts[2] = m_clipsMask.scannedMask[2].GetPointer();
Tuple<3, CTuint> sums;
sums.ts[0] = m_clipsMask.scannedSums[0].GetPointer();
sums.ts[1] = m_clipsMask.scannedSums[1].GetPointer();
sums.ts[2] = m_clipsMask.scannedSums[2].GetPointer();
#ifndef FAST_COMPACTION
ClipMaskPrefixSumOP op;
ScanBinaryTriples(ptr, ptr1, sums, eventCount, op, m_pStream);
// ClipMaskIsOverlappingOP __op;
// ptr1.ts[0] = m_clipsMask.scannedOverlappingMasks[0].GetPointer();
// ptr1.ts[1] = m_clipsMask.scannedOverlappingMasks[1].GetPointer();
// ptr1.ts[2] = m_clipsMask.scannedOverlappingMasks[2].GetPointer();
// sums.ts[0] = m_clipsMask.scannedOverlappingSums[0].GetPointer();
// sums.ts[1] = m_clipsMask.scannedOverlappingSums[1].GetPointer();
// sums.ts[2] = m_clipsMask.scannedOverlappingSums[2].GetPointer();
// ScanTriples(ptr, ptr1, sums, eventCount, __op, m_pStream);
// //PrintBuffer(m_clipsMask.scannedOverlappingMasks[0], eventCount);
// __ct_printf("scannedOverlappingMasks x: %d \n", m_clipsMask.scannedOverlappingMasks[0][eventCount-1]);
// __ct_printf("scannedOverlappingMasks y: %d \n", m_clipsMask.scannedOverlappingMasks[1][eventCount-1]);
// __ct_printf("scannedOverlappingMasks z: %d \n\n", m_clipsMask.scannedOverlappingMasks[2][eventCount-1]);
#else
nutty::PrefixSumOp<CTuint> op;
ScanTriples(ptr, ptr1, sums, FAST_COMP_PROCS, op, m_pStream);
// cudaDeviceSynchronize();
/*
for(int i = 0; i < 3; ++i)
{
int cpuSum = 0;
for(int p = 0; p < eventCount; ++p)
{
cpuSum += m_clipsMask.mask[i][p] > 0;
}
int sum = 0;
for(int g = 0; g < warps; ++g)
{
sum += m_clipsMask.elemsPerTile[i][g];
}
ct_printf("cpuSum=%d sum=%d, %d + %d = %d\n",
cpuSum, sum, m_clipsMask.scannedMask[i][warps-1], m_clipsMask.elemsPerTile[i][warps-1], m_clipsMask.scannedMask[i][warps-1] + m_clipsMask.elemsPerTile[i][warps-1]);
}
PrintBuffer(m_clipsMask.elemsPerTile[0], warps);
PrintBuffer(m_clipsMask.elemsPerTile[1], warps);
PrintBuffer(m_clipsMask.elemsPerTile[2], warps);
*/
// PrintBuffer(m_clipsMask.scannedMask[0], FAST_COMP_PROCS);
// PrintBuffer(m_clipsMask.scannedMask[1], FAST_COMP_PROCS);
// PrintBuffer(m_clipsMask.scannedMask[2], FAST_COMP_PROCS);
#endif
//m_clipsMask.maskScanner[0].ExcScan(m_clipsMask.mask[0].Begin(), m_clipsMask.mask[0].Begin() + eventCount, op, m_pStream);
// m_clipsMask.maskScanner[1].ExcScan(m_clipsMask.mask[1].Begin(), m_clipsMask.mask[1].Begin() + eventCount, op, m_pStream);
// m_clipsMask.maskScanner[2].ExcScan(m_clipsMask.mask[2].Begin(), m_clipsMask.mask[2].Begin() + eventCount, op, m_pStream);
}
CT_RESULT cuKDTreeScan::Update(void)
{
float* rawEvents[3];
unsigned int* rawEventkeys[3];
CTuint primitiveCount = (CTuint)(m_currentTransformedVertices.Size() / 3);
if(!m_initialized)
{
InitBuffer();
m_initialized = true;
//CUDA_RT_SAFE_CALLING_NO_SYNC(cudaDeviceSetLimit(cudaLimitMallocHeapSize, 1024 * 1024 * 1024));
}
for(CTbyte i = 0; i < 3; ++i)
{
m_eventLines.eventLines[i].rawEvents->resize(2 * primitiveCount);
m_eventLines.eventLines[i].eventKeys->resize(2 * primitiveCount);
rawEventkeys[i] = m_eventLines.eventLines[i].eventKeys->data().get();
rawEvents[i] = m_eventLines.eventLines[i].rawEvents->data().get();
}
//ClearBuffer();
// static bool staticc = true;
KERNEL_PROFILE(cudaCreateTriangleAABBs(m_currentTransformedVertices.GetPointer(), m_primAABBs.GetPointer(), primitiveCount, m_pStream), init);
//PrintBuffer(m_primAABBs);
// if(staticc)
{
DEVICE_SYNC_CHECK();
static float3 max3f = {FLT_MAX, FLT_MAX, FLT_MAX};
static float3 min3f = -max3f;
BBox bboxN;
bboxN.m_min = max3f;
bboxN.m_max = min3f;
m_sceneBBox.Resize(m_primAABBs.Size()/2);
nutty::Reduce(m_sceneBBox.Begin(), m_primAABBs.Begin(), m_primAABBs.End(), ReduceBBox(), bboxN, m_pStream);
//staticc = false;
}
DEVICE_SYNC_CHECK();
CTuint elementBlock = nutty::cuda::GetCudaBlock(primitiveCount);
CTuint elementGrid = nutty::cuda::GetCudaGrid(primitiveCount, elementBlock);
m_eventLines.Resize(2 * primitiveCount, m_pStream);
#ifdef PROFILE
chimera::util::HTimer g_timer;
cudaDeviceSynchronize();
g_timer.Start();
g_time = 0;
#endif
m_eventLines.toggleIndex = 0;
KERNEL_PROFILE(
cudaCreateEventsAndInit3(
m_primAABBs.GetConstPointer(),
m_sceneBBox.GetConstPointer(),
m_activeNodes.GetPointer(),
m_nodes_NodeIdToLeafIndex.GetPointer(),
m_nodes_IsLeaf.GetPointer(),
m_nodes_ContentCount.GetPointer(),
m_nodesBBox[0].GetPointer(),
rawEvents,
rawEventkeys,
primitiveCount, elementGrid, elementBlock, m_pStream)
,init
);
DEVICE_SYNC_CHECK();
KERNEL_PROFILE(SortEvents(&m_eventLines), init);
DEVICE_SYNC_CHECK();
// for(CTbyte i = 0; i < 3; ++i)
// {
// nutty::Sort(
// nutty::DevicePtr_Cast<IndexedEvent>(m_eventLines.eventLines[i].GetPtr(0).indexedEvent),
// nutty::DevicePtr_Cast<IndexedEvent>(m_eventLines.eventLines[i].GetPtr(0).indexedEvent + 2 * primitiveCount),
// EventSort(),
// m_pStream);
// }
// thrust::host_vector<float> h_vec(primitiveCount *2);
// thrust::host_vector<unsigned int> h_vecK(primitiveCount *2);
// h_vec = *m_eventLines.eventLines[2].rawEvents;
// h_vecK = *m_eventLines.eventLines[2].eventKeys;
// for(int i = 0; i < primitiveCount *2; ++i)
// {
// __ct_printf("%f %d -- ", h_vec[i], h_vecK[i]);
// }
// __ct_printf("\n\n");
// PrintBuffer(m_eventLines.eventLines[2].indexedEvent[0]);
//reorderEvent3<<<2 * elementGrid, elementBlock, 0, m_pStream>>>(2 * primitiveCount);
KERNEL_PROFILE(cudaReorderEvent3(2 * primitiveCount, 2 * elementGrid, elementBlock, rawEvents, rawEventkeys, m_pStream), init);
DEVICE_SYNC_CHECK();
CTuint g_interiorNodesCountOnThisLevel = 1;
CTuint g_currentInteriorNodesCount = 1;
CTuint g_currentLeafCount = 0;
CTuint g_leafContentOffset = 0;
CTuint g_childNodeOffset = 1;
CTuint g_nodeOffset = 0;
CTuint g_entries = 1;
CTuint eventCount = 2 * primitiveCount;
//CTuint hybridChangeDepth = (3 * m_depth) / 4; //log((2*eventCount)/3);
m_eventLines.Toggle();
CTuint eventSum = 0;
for(CTbyte d = 0; d < m_depth; ++d)
{
// static int i = 0;
//__ct_printf("New Level=%d Events=%d (Frame=%d)\n", d, eventCount, ++i);
const static CTbyte null = 0;
CUDA_RT_SAFE_CALLING_SYNC(cudaMemcpyAsync(m_gotLeaves.GetPointer(), &null, sizeof(CTbyte), cudaMemcpyHostToDevice, m_pStream));
CUDA_RT_SAFE_CALLING_SYNC(cudaMemcpyToSymbolAsync(d_nodeOffset, &g_interiorNodesCountOnThisLevel, sizeof(CTuint), 0, cudaMemcpyHostToDevice, m_pStream));
CTuint nodeCount = g_interiorNodesCountOnThisLevel;
CTuint nodeBlock = nutty::cuda::GetCudaBlock(nodeCount);
CTuint nodeGrid = nutty::cuda::GetCudaGrid(nodeCount, nodeBlock);
CTuint eventBlock = EVENT_GROUP_SIZE;//nutty::cuda::GetCudaBlock(eventCount, 256U);
CTuint eventGrid = nutty::cuda::GetCudaGrid(eventCount, eventBlock);
DEVICE_SYNC_CHECK();
#ifndef USE_HYBRID_REDUCTION
m_dthAsyncNodesContent.WaitForStream(m_stream);
m_dthAsyncNodesContent.Resize(nodeCount);
m_dthAsyncNodesContent.StartCopy(m_nodes_ContentCount.GetConstPointer(), 0, nodeCount);
#endif
#if 0
m_hNodesContentCount.Resize(nodeCount);
nutty::Copy(m_hNodesContentCount.Begin(), m_nodes_ContentCount.Begin(), nodeCount);
// PrintBuffer(m_hNodesContentCount, nodeCount);
// for(int i = 0; i < nodeCount; ++i)
// {
// if(m_hNodesContentCount[i] > 500000 || m_hNodesContentCount[i] <= MAX_ELEMENTS_PER_LEAF)
// {
// exit(0);
// }
// }
//PrintBuffer(m_nodes_ContentCount, nodeCount);
PRINT_BUFFER_N(m_nodes_ContentCount, nodeCount);
#endif
//m_pool.ClearEvents();
KERNEL_PROFILE(
ComputeSAH_Splits(
nodeCount,
eventCount,
m_nodes_ContentCount.Begin()()), ComputeSAH_Splits);
DEVICE_SYNC_CHECK();
#if 0
//grad nicht nötig...
makeLeafIfBadSplitOrLessThanMaxElements<<<nodeGrid, nodeBlock, 0, m_pStream>>>(
m_nodes,
m_nodes_IsLeaf.GetPointer() + g_nodeOffset,
m_activeNodes.GetPointer(),
m_activeNodesIsLeaf.GetPointer(),
m_splits,
d == m_depth-1,
nodeCount);
DEVICE_SYNC_CHECK();
#endif
m_newNodesContentCount.Resize(m_nodes_ContentCount.Size());
m_newNodesContentStartAdd.Resize(m_nodes_ContentCount.Size());
m_lastNodeContentStartAdd.Resize(m_newNodesContentStartAdd.Size());
CUDA_RT_SAFE_CALLING_SYNC(cudaMemcpyAsync(m_lastNodeContentStartAdd.GetPointer(), m_nodes_ContentStartAdd.GetPointer(), nodeCount * sizeof(CTuint), cudaMemcpyDeviceToDevice, m_pStream));
MakeLeavesResult leavesRes; // = MakeLeaves(m_activeNodesIsLeaf.Begin(), g_nodeOffset, 0, nodeCount, eventCount, g_currentLeafCount, g_leafContentOffset, 0);
leavesRes.leafCount = 0;
leavesRes.interiorPrimitiveCount = eventCount/2;
leavesRes.leafPrimitiveCount = 0;
CTuint lastLeaves = leavesRes.leafCount;
primitiveCount = leavesRes.interiorPrimitiveCount;
if(leavesRes.leafCount) //assert(!leavesRes.leafCount && "currently not working");
{
OutputDebugStringA("leavesRes.leafCount currently not working\n");
exit(0);
}
DEVICE_SYNC_CHECK();
CTuint count = eventCount;
if(leavesRes.interiorPrimitiveCount)
{
CTuint block = EVENT_GROUP_SIZE;
CTuint grid = nutty::cuda::GetCudaGrid(count, block);
m_eventLines.Resize(2 * count, m_pStream);
m_clipsMask.Resize(2 * count, m_pStream);
// nutty::ZeroMem(m_clipsMask.mask[0]);
// nutty::ZeroMem(m_clipsMask.mask[1]);
// nutty::ZeroMem(m_clipsMask.mask[2]);
// nutty::ZeroMem(m_clipsMask.maskScanner[0].GetPrefixSum());
// nutty::ZeroMem(m_clipsMask.maskScanner[1].GetPrefixSum());
// nutty::ZeroMem(m_clipsMask.maskScanner[2].GetPrefixSum());
// CTuint tb = 32;
// CTuint tg = nutty::cuda::GetCudaGrid(count, tb);
KERNEL_PROFILE(cudaCreateClipMask(
m_nodes_ContentStartAdd.GetPointer(),
m_nodes_ContentCount.GetPointer(),
count,
m_eventLines.toggleIndex, grid, block, m_pStream), ClippingAndPartitioning);
#ifndef USE_CLIP_MASK
KERNEL_PROFILE(cudaClipEventsMask(m_nodes_ContentStartAdd.GetPointer(),
m_nodes_ContentCount.GetPointer(),
count,
m_eventLines.toggleIndex, grid, block, m_pStream), ClippingAndPartitioning);
#endif
// std::ofstream file("mask.txt", std::ios::app);
//
// for(int axis = 0; axis < 3; ++axis)
// {
// nutty::HostBuffer<CTuint> tmp(2 * count);
// nutty::Copy(tmp.Begin(), m_clipsMask.mask[axis].Begin(), m_clipsMask.mask[axis].Begin() + 2 * count);
// for(int i = 0; i < 2 * count; ++i)
// {
// file << (int)tmp[i] << " ";
// }
// file << "NA ";
// }
//file << "NL\n";
//
// createClipMask<<<grid, block, 0, m_pStream>>>(
// m_nodes_ContentStartAdd.GetPointer(),
// m_nodes_ContentCount.GetPointer(),
// count,
// m_eventLines.toggleIndex);
// clipEvents3<<<grid, block, 0, m_pStream>>>(
// m_nodes_ContentStartAdd.GetPointer(),
// m_nodes_ContentCount.GetPointer(),
// count,
// m_eventLines.toggleIndex);
// CTuint toggleSave = m_eventLines.toggleIndex;
// CTuint prefixSums[3];
// for(int k = 0; k < 3; ++k)
// {
// nutty::HostBuffer<CTuint> srcEventScan(2 * count);
// nutty::Copy(srcEventScan.Begin(), m_clipsMask.mask[k].Begin(), m_clipsMask.mask[k].Begin() + 2 * count);
// prefixSums[k] = 0;
// for(int i = 0; i < srcEventScan.Size(); ++i)
// {
// prefixSums[k] += srcEventScan[i] > 0;
// }
// }
DEVICE_SYNC_CHECK();
//m_clipsMask.ScanMasks(2 * count);
KERNEL_PROFILE(ScanClipMaskTriples(2 * count), ClippingAndPartitioning);
//m_clipsMask.mask3Scanner.ExcScan(m_clipsMask.mask3.Begin(), m_clipsMask.mask3.Begin() + 2 * count, ClipMaskPrefixSum3OP());
DEVICE_SYNC_CHECK();
#ifndef USE_STREAM
cudaDeviceSynchronize();
#endif
m_dthAsyncIntCopy.WaitForStream(m_stream);
#ifndef FAST_COMPACTION
//m_dthAsyncByteCopy.WaitForStream(m_stream);
m_dthAsyncIntCopy.StartCopy((CTuint*)(m_clipsMask.scannedMask[0].GetConstPointer() + 2 * count - 1), 1);
m_dthAsyncIntCopy.StartCopy((CTuint*)(m_clipsMask.mask[0].GetPointer() + 2 * count - 1), 0);
#else
const CTuint blockSize = FAST_COMP_LANE_SIZE;
CTuint threads = FAST_COMP_PROCS * FAST_COMP_LANE_SIZE;
CTuint tilesPerProc = nutty::cuda::GetCudaGrid(2 * count, threads);
CTuint activeWarps = nutty::cuda::GetCudaGrid(2 * count, tilesPerProc * FAST_COMP_LANE_SIZE);
CTuint _grid = nutty::cuda::GetCudaGrid(threads, blockSize);
//int tiles = 2*eventCount / TILE_SIZE + (2*eventCount % TILE_SIZE == 0 ? 0 : 1);
m_dthAsyncIntCopy.StartCopy((CTuint*)(m_clipsMask.scannedMask[0].GetConstPointer() + min(activeWarps, FAST_COMP_PROCS) - 1), 1);
m_dthAsyncIntCopy.StartCopy((CTuint*)(m_clipsMask.elemsPerTile[0].GetConstPointer() + min(activeWarps, FAST_COMP_PROCS) - 1), 0);
#endif
CTuint childCount = (nodeCount - leavesRes.leafCount) * 2;
CTuint thisLevelNodesLeft = nodeCount - leavesRes.leafCount;
#ifndef FAST_COMPACTION
CTuint _block = EVENT_GROUP_SIZE;
CTuint _grid = nutty::cuda::GetCudaGrid(3 * 2 * count, block);
// compactEventLineV2<<<_grid, _block, 0, m_pStream>>>(
// 2 * count,
// m_eventLines.toggleIndex);
// chimera::util::HTimer tt;
//
// cudaDeviceSynchronize();
// tt.Start();
nodeBlock = nutty::cuda::GetCudaBlock(thisLevelNodesLeft);
nodeGrid = nutty::cuda::GetCudaGrid(thisLevelNodesLeft, nodeBlock);
// if(nodeCount > 1024)
// {
// KERNEL_PROFILE(cudaCompactEventLineV2Buffered(2 * count, m_eventLines.toggleIndex, _grid, m_pStream), tb);
//
// }
// else
// {
// _grid = nutty::cuda::GetCudaGrid(3 * 2 * count, block);
// KERNEL_PROFILE(cudaCompactEventLineV2(3 * 2 * count, m_eventLines.toggleIndex, _grid, _block, m_pStream), ta);
// }
//if(nodeCount > 15)
{
#ifndef USE_HYBRID_COMPACTION
CTuint N = 3 * 2 * count;
_grid = nutty::cuda::GetCudaGrid(N, block);
KERNEL_PROFILE(cudaCompactEventLineV2(N, m_eventLines.toggleIndex, _grid, _block, m_pStream), ClippingAndPartitioning);
#else
if(d >= hybridChangeDepth)
{
__ct_printf("%d %d\n", d, hybridChangeDepth);
KERNEL_PROFILE(cudaCompactEventLineV2Buffered(2 * count, m_eventLines.toggleIndex, _grid, m_pStream), ClippingAndPartitioning);
}
else
{
CTuint N = 3 * 2 * count;
_grid = nutty::cuda::GetCudaGrid(N, block);
KERNEL_PROFILE(cudaCompactEventLineV2(N, m_eventLines.toggleIndex, _grid, _block, m_pStream), ClippingAndPartitioning);
}
#endif
}
//else
{
// _grid = nutty::cuda::GetCudaGrid(2 * count, block);
// KERNEL_PROFILE(cudaCompactEventLineV2Buffered(2 * count, m_eventLines.toggleIndex, _grid, _block, m_pStream), cudaCompactEventLineV21);
}
/* PrintBuffer(m_clipsMask.mask[0], 2 * count);*/
// Tuple<3, CTuint> ptr1;
// ptr1.ts[0] = m_clipsMask.scannedOverlappingMasks[0].GetPointer();
// ptr1.ts[1] = m_clipsMask.scannedOverlappingMasks[1].GetPointer();
// ptr1.ts[2] = m_clipsMask.scannedOverlappingMasks[2].GetPointer();
// CTuint b = EVENT_GROUP_SIZE;
//
// CTuint g = nutty::cuda::GetCudaGrid(count, block);
// insertClippedEventsSplitAxis<EVENT_GROUP_SIZE><<<g, b, 0, m_pStream>>>(ptr1, m_nodes_ContentStartAdd.GetConstPointer(), m_eventLines.toggleIndex, count);
DEVICE_SYNC_CHECK();
// cudaDeviceSynchronize();
// tt.Stop();
/* __ct_printf("%f (%d)\n", tt.GetMillis(), d);*/
// nutty::cuEvent e = m_stream.RecordEvent();
// CUDA_DRIVER_SAFE_CALLING_SYNC(cuStreamWaitEvent(m_compactStreams[0].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(cuStreamWaitEvent(m_compactStreams[1].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(cuStreamWaitEvent(m_compactStreams[2].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(cuStreamWaitEvent(m_compactStreams[3].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(cuStreamWaitEvent(m_compactStreams[4].GetPointer(), e.GetPointer(), 0));
// cuEventDestroy(e.Free());
//
// optimizedcompactEventLineV3Type0<<<_grid, _block, 0, m_compactStreams[0].GetPointer()>>>(m_eventLines.toggleIndex, 2 * count);
// optimizedcompactEventLineV3Type1<<<_grid, _block, 0, m_compactStreams[1].GetPointer()>>>(m_eventLines.toggleIndex, 2 * count);
// optimizedcompactEventLineV3Type2<<<_grid, _block, 0, m_compactStreams[2].GetPointer()>>>(m_eventLines.toggleIndex, 2 * count);
// optimizedcompactEventLineV3Type3<<<_grid, _block, 0, m_compactStreams[3].GetPointer()>>>(m_eventLines.toggleIndex, 2 * count);
// optimizedcompactEventLineV3Type4<<<_grid, _block, 0, m_compactStreams[4].GetPointer()>>>(m_eventLines.toggleIndex, 2 * count);
// DEVICE_SYNC_CHECK();
//
// m_stream.WaitEvent(m_compactStreams[0].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[1].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[2].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[3].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[4].RecordEvent());
#else
//optimizedcompactEventLineV2<blockSize/32><<<_grid, blockSize>>>(m_eventLines.toggleIndex, tilesPerProc, activeWarps, 2 * count);
//optimizedcompactEventLineV3<blockSize><<<_grid, blockSize>>>(m_eventLines.toggleIndex, tilesPerProc, activeWarps, 2 * count);
// nutty::cuEvent e = m_stream.RecordEvent();
// CUDA_DRIVER_SAFE_CALLING_SYNC(cuStreamWaitEvent(m_compactStreams[0].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(cuStreamWaitEvent(m_compactStreams[1].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(cuStreamWaitEvent(m_compactStreams[2].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(cuStreamWaitEvent(m_compactStreams[3].GetPointer(), e.GetPointer(), 0));
// CUDA_DRIVER_SAFE_CALLING_SYNC(cuStreamWaitEvent(m_compactStreams[4].GetPointer(), e.GetPointer(), 0));
// cuEventDestroy(e.Free());
//
// optimizedcompactEventLineV3Type0<blockSize><<<_grid, blockSize, 0, m_compactStreams[0].GetPointer()>>>(m_eventLines.toggleIndex, tilesPerProc, activeWarps, 2 * count);
// optimizedcompactEventLineV3Type1<blockSize><<<_grid, blockSize, 0, m_compactStreams[1].GetPointer()>>>(m_eventLines.toggleIndex, tilesPerProc, activeWarps, 2 * count);
// optimizedcompactEventLineV3Type2<blockSize><<<_grid, blockSize, 0, m_compactStreams[2].GetPointer()>>>(m_eventLines.toggleIndex, tilesPerProc, activeWarps, 2 * count);
// optimizedcompactEventLineV3Type3<blockSize><<<_grid, blockSize, 0, m_compactStreams[3].GetPointer()>>>(m_eventLines.toggleIndex, tilesPerProc, activeWarps, 2 * count);
// optimizedcompactEventLineV3Type4<blockSize><<<_grid, blockSize, 0, m_compactStreams[4].GetPointer()>>>(m_eventLines.toggleIndex, tilesPerProc, activeWarps, 2 * count);
// DEVICE_SYNC_CHECK();
//
// m_stream.WaitEvent(m_compactStreams[0].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[1].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[2].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[3].RecordEvent());
// m_stream.WaitEvent(m_compactStreams[4].RecordEvent());
#endif
m_eventLines.Toggle();
g_leafContentOffset += leavesRes.leafPrimitiveCount;
// if(lastLeaves)
// {
// // setActiveNodesMask<1><<<nodeGrid, nodeBlock, 0, m_pStream>>>(
// // m_activeNodesThisLevel.Begin()(),
// // m_activeNodesIsLeaf.Begin()(),
// // m_interiorCountScanned.Begin()(),
// // 0,
// // nodeCount);
// KERNEL_PROFILE(cudaSetActiveNodesMask(
// m_activeNodesThisLevel.Begin()(),
// m_activeNodesIsLeaf.Begin()(),
// m_interiorCountScanned.Begin()(),
// 0,
// nodeCount, nodeGrid, nodeBlock, m_pStream), cudaSetActiveNodesMask);
// }
//merge into compact events?
KERNEL_PROFILE(cudaInitInteriorNodes(
m_activeNodes.GetConstPointer(),
m_activeNodesThisLevel.GetConstPointer(),
m_nodesBBox[0].GetConstPointer(),
m_nodesBBox[1].GetPointer(),
m_nodes_ContentCount.GetPointer(),
m_newNodesContentCount.GetPointer(),
m_newActiveNodes.GetPointer(),
m_activeNodesIsLeaf.GetPointer() + nodeCount,
g_childNodeOffset,
g_nodeOffset,
thisLevelNodesLeft,
m_lastNodeContentStartAdd.GetPointer(),
m_gotLeaves.GetPointer(),
m_depth == d+1,
leavesRes.leafCount,
nodeGrid, nodeBlock, m_pStream), CreateChildNodes);
CUDA_RT_SAFE_CALLING_SYNC(cudaMemcpyAsync(m_activeNodes.GetPointer(), m_newActiveNodes.GetPointer(), childCount * sizeof(CTuint), cudaMemcpyDeviceToDevice, m_pStream));
CUDA_RT_SAFE_CALLING_SYNC(cudaMemcpyAsync(m_nodes_ContentCount.GetPointer(), m_newNodesContentCount.GetPointer(), childCount * sizeof(CTuint), cudaMemcpyDeviceToDevice, m_pStream));
DEVICE_SYNC_CHECK();
m_dthAsyncIntCopy.WaitForCopy();
// #ifndef FAST_COMPACTION
// m_dthAsyncByteCopy.WaitForCopy();
// #endif
eventCount = m_dthAsyncIntCopy[1] +
#ifndef FAST_COMPACTION
isSet(m_dthAsyncIntCopy[0]);// - ccLeft; //m_clipsMask.scannedMask[0][2 * count - 1] + isSet(m_clipsMask.mask[0][2 * count - 1]);
#else
m_dthAsyncIntCopy[0];
#endif
//static uint totalEventCount = 0;
//totalEventCount += 2 * count;
//__ct_printf("%d - %f \n", totalEventCount, eventCount / (float)(2 * count));
//__ct_printf("eventCount=%d %d %d\n", eventCount, m_dthAsyncIntCopy[0], m_dthAsyncIntCopy[1]);
// if(nodeCount > 15)
// {
// for(int a = 0; a < 1; ++a)
// {
// CTuint index = m_eventLines.toggleIndex;
// // PrintBuffer(m_eventLines.eventLines[a].indexedEvent[index], eventCount);
// // PrintBuffer(m_eventLines.eventLines[a].primId[index], eventCount);
// // PrintBuffer(m_eventLines.eventLines[a].ranges[index], eventCount);
// // PrintBuffer(m_eventLines.eventLines[a].type[index], eventCount);
// PrintBuffer((*m_eventLines.eventLines[a].nodeIndex).Get(index), eventCount, "\n");
// __ct_printf("\n\n");
// }
// printf("hel");
// }
// if(eventCount == 0)
// {
// __ct_printf("cuKDTreeScan: FATAL ERROR eventCount %d \n", eventCount);
// exit(0);
// }
// PrintBuffer(m_eventLines.eventLines[0].nodeIndex->Get(m_eventLines.toggleIndex), eventCount);
// PrintBuffer(m_eventLines.eventLines[0].mask, 2 * count);
// PrintBuffer(m_clipsMask.scannedMask[0], 2 * count);
//exit(0);
m_dthAsyncByteCopy.WaitForStream(m_stream);
m_dthAsyncByteCopy.StartCopy(m_gotLeaves.GetConstPointer(), 0);
eventBlock = EVENT_GROUP_SIZE;
eventGrid = nutty::cuda::GetCudaGrid(eventCount, eventBlock);
//merge into compact events?
KERNEL_PROFILE(cudaSetEventsBelongToLeafAndSetNodeIndex(
m_activeNodesIsLeaf.GetPointer() + nodeCount,
m_eventIsLeaf.GetPointer(),
m_nodes_NodeIdToLeafIndex.GetPointer() + g_childNodeOffset,
eventCount,
2 * nodeCount,
m_eventLines.toggleIndex,
eventGrid, eventBlock, m_pStream), 0cudaSetEventsBelongToLeafAndSetNodeIndex);
DEVICE_SYNC_CHECK();
//PROFILE_END;
//if(!m_dthAsyncByteCopy[0])
{
m_interiorContentScanner.Resize(childCount);
KERNEL_PROFILE(m_interiorContentScanner.ExcScan(m_nodes_ContentCount.Begin(), m_nodes_ContentCount.Begin() + childCount, nutty::PrefixSumOp<CTuint>(), m_pStream), 0m_interiorContentScanner);
DEVICE_SYNC_CHECK();
CUDA_RT_SAFE_CALLING_SYNC(cudaMemcpyAsync(
m_nodes_ContentStartAdd.GetPointer(), m_interiorContentScanner.GetPrefixSum().GetConstPointer(), childCount * sizeof(CTuint), cudaMemcpyDeviceToDevice, m_pStream));
//nutty::Copy(m_nodes_ContentStartAdd.Begin(), m_interiorContentScanner.GetPrefixSum().Begin(), m_interiorContentScanner.GetPrefixSum().Begin() + childCount);
DEVICE_SYNC_CHECK();
}
m_dthAsyncByteCopy.WaitForCopy();
DEVICE_SYNC_CHECK();
KERNEL_PROFILE(leavesRes = MakeLeaves(
m_activeNodesIsLeaf.Begin(),
g_childNodeOffset,
nodeCount,
childCount,
eventCount,
g_currentLeafCount + lastLeaves,
g_leafContentOffset, 1,
m_dthAsyncByteCopy[0]), MakeLeaves);
DEVICE_SYNC_CHECK();
eventCount = 2 * leavesRes.interiorPrimitiveCount;
}
else
{
//todo
for(CTuint i = 0; i < nodeCount; ++i)
{
m_nodes_IsLeaf.Insert(g_nodeOffset + i, (CTbyte)1);
}
__ct_printf("errr not good...\n");
}
g_entries += 2 * nodeCount;
eventSum += eventCount;
g_interiorNodesCountOnThisLevel = 2 * (nodeCount - lastLeaves) - leavesRes.leafCount;
g_currentInteriorNodesCount += g_interiorNodesCountOnThisLevel;
g_nodeOffset = g_childNodeOffset;
g_childNodeOffset += 2 * (nodeCount);
//update globals
g_leafContentOffset += leavesRes.leafPrimitiveCount;
g_currentLeafCount += lastLeaves + leavesRes.leafCount;
ct_printf(
"g_nodeOffset=%d g_childNodeOffset=%d g_leafContentOffset=%d g_interiorNodesCountOnThisLevel=%d g_currentInteriorNodesCount=%d g_currentLeafCount=%d\nCreated '%d' Leaves, Interior Nodes '%d'\n",
g_nodeOffset, g_childNodeOffset, g_leafContentOffset, g_interiorNodesCountOnThisLevel, g_currentInteriorNodesCount, g_currentLeafCount, lastLeaves + leavesRes.leafCount, g_interiorNodesCountOnThisLevel);
DEVICE_SYNC_CHECK();
if(!leavesRes.leafCount)
{
cudaMemcpyAsync(m_nodesBBox[0].GetPointer(), m_nodesBBox[1].GetConstPointer(), g_interiorNodesCountOnThisLevel * sizeof(BBox), cudaMemcpyDeviceToDevice, m_pStream);
//nutty::Copy(m_nodesBBox[0].Begin(), m_nodesBBox[1].Begin(), m_nodesBBox[1].Begin() + g_interiorNodesCountOnThisLevel);
}
if(eventCount == 0 || g_interiorNodesCountOnThisLevel == 0) //all nodes are leaf nodes
{
//primitiveCount = lastCnt;
break;
}
if(d < m_depth-1) //are we not done?
{
//check if we need more memory
if(eventCount > m_splits_Above.Size())
{
__ct_printf("need memory...\n");
GrowSplitMemory(2 * eventCount);
}
if(m_activeNodes.Size() < g_interiorNodesCountOnThisLevel + 2 * g_interiorNodesCountOnThisLevel)
{
__ct_printf("need memory...\n");
GrowPerLevelNodeMemory(4 * 2 * g_interiorNodesCountOnThisLevel);
}
if(m_nodes_IsLeaf.Size() < (g_childNodeOffset + 2 * g_interiorNodesCountOnThisLevel))
{
__ct_printf("need memory...\n");
GrowNodeMemory();
}
}
}
#ifdef PROFILE
static int frame = 0;
cudaDeviceSynchronize();
g_timer.Stop();
__ct_printf("Total: %f, Section: %f \n", g_timer.GetMillis(), g_time);
if(frame == PROFILE_FRAMES)
{
for(auto it = g_profileTable.begin(); it != g_profileTable.end(); ++it)
{
__ct_printf("%s\n", it->first.c_str());
}
for(auto it = g_profileTable.begin(); it != g_profileTable.end(); ++it)
{
__ct_printf("%f\n", it->second / (float)PROFILE_FRAMES);
}
g_profileTable.clear();
exit(0);
}
frame++;
#endif
m_interiorNodesCount = g_currentInteriorNodesCount;
m_leafNodesCount = g_currentLeafCount;
//CTuint allNodeCount = m_interiorNodesCount + m_leafNodesCount;
#ifdef _DEBUG
// CUDA_RT_SAFE_CALLING_NO_SYNC(cudaStreamSynchronize(m_pStream));
__ct_printf("%d\n", eventSum);
ct_printf("Tree Summary:\n");
PRINT_BUFFER(m_nodes_IsLeaf);
PRINT_BUFFER(m_nodes_Split);
PRINT_BUFFER(m_nodes_SplitAxis);
PRINT_BUFFER(m_nodes_LeftChild);
PRINT_BUFFER(m_nodes_RightChild);
PRINT_BUFFER(m_leafNodesContentCount);
PRINT_BUFFER(m_leafNodesContentStart);
PRINT_BUFFER(m_nodes_NodeIdToLeafIndex);
if(m_leafNodesContent.Size() < 1024)
{
PRINT_BUFFER(m_leafNodesContent);
}
else
{
ct_printf("skipping content '%d' elements...\n", m_leafNodesContent.Size());
}
#endif
DEVICE_SYNC_CHECK();
#ifdef _DEBUG
ValidateTree();
#endif
return CT_SUCCESS;
}
void cuKDTreeScan::ValidateTree(void)
{
std::queue<CTuint> queue;
queue.push(0);
while(!queue.empty())
{
CTuint node = queue.front();
queue.pop();
ct_printf("%d ", node);
if(!m_nodes_IsLeaf[node])
{
ct_printf("\n");
//assertions are happening here if we are out of bounds
CTuint left = m_nodes_LeftChild[node];
CTuint right = m_nodes_RightChild[node];
if(left < node || right < node)
{
assert(0 && "fuck");
}
queue.push(left);
queue.push(right);
}
else
{
CTuint leafIndex = m_nodes_NodeIdToLeafIndex[node];
ct_printf(" - %d\n", leafIndex);
}
}
}
#else
CT_RESULT cuKDTreeScan::Update(void)
{
return CT_INVALID_OPERATION;
}
#endif |
a062b08cf8eeefab178e61300d59d8399c651d99.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <vector>
#include "random/rng.h"
#include "test_utils.h"
#include <cuda_utils.h>
#include "ml_utils.h"
#include "pca/pca.h"
#include <linalg/cublas_wrappers.h>
namespace ML {
using namespace MLCommon;
template<typename T>
struct PcaInputs {
T tolerance;
int len;
int n_row;
int n_col;
int len2;
int n_row2;
int n_col2;
unsigned long long int seed;
int algo;
bool whiten;
};
template<typename T>
::std::ostream& operator<<(::std::ostream& os, const PcaInputs<T>& dims) {
return os;
}
template<typename T>
class PcaTest: public ::testing::TestWithParam<PcaInputs<T> > {
protected:
void basicTest() {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
params = ::testing::TestWithParam<PcaInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
int len = params.len;
allocate(data, len);
allocate(data_back, len);
allocate(trans_data, len);
allocate(trans_data_ref, len);
std::vector<T> data_h = { 1.0, 2.0, 5.0, 4.0, 2.0, 1.0 };
data_h.resize(len);
updateDevice(data, data_h.data(), len);
std::vector<T> trans_data_ref_h = { -2.3231, -0.3517, 2.6748, -0.3979, 0.6571, -0.2592 };
trans_data_ref_h.resize(len);
updateDevice(trans_data_ref, trans_data_ref_h.data(), len);
int len_comp = params.n_col * params.n_col;
allocate(components, len_comp);
allocate(explained_vars, params.n_col);
allocate(explained_var_ratio, params.n_col);
allocate(singular_vals, params.n_col);
allocate(mean, params.n_col);
allocate(noise_vars, 1);
std::vector<T> components_ref_h = { 0.8163, 0.5776, -0.5776, 0.8163 };
components_ref_h.resize(len_comp);
std::vector<T> explained_vars_ref_h = { 6.338, 0.3287 };
explained_vars_ref_h.resize(params.n_col);
allocate(components_ref, len_comp);
allocate(explained_vars_ref, params.n_col);
updateDevice(components_ref, components_ref_h.data(), len_comp);
updateDevice(explained_vars_ref, explained_vars_ref_h.data(), params.n_col);
paramsPCA prms;
prms.n_cols = params.n_col;
prms.n_rows = params.n_row;
prms.n_components = params.n_col;
prms.whiten = false;
if (params.algo == 0)
prms.algorithm = solver::COV_EIG_DQ;
else
prms.algorithm = solver::COV_EIG_JACOBI;
pcaFit(data, components, explained_vars, explained_var_ratio,
singular_vals, mean, noise_vars, prms, cublas_handle, cusolver_handle);
pcaTransform(data, components, trans_data, singular_vals, mean,
prms, cublas_handle);
pcaInverseTransform(trans_data, components, singular_vals, mean, data_back, prms, cublas_handle);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
}
void advancedTest() {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
params = ::testing::TestWithParam<PcaInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
int len = params.len2;
paramsPCA prms;
prms.n_cols = params.n_col2;
prms.n_rows = params.n_row2;
prms.n_components = params.n_col2;
prms.whiten = params.whiten;
if (params.algo == 0)
prms.algorithm = solver::COV_EIG_DQ;
else if (params.algo == 1)
prms.algorithm = solver::COV_EIG_JACOBI;
else if (params.algo == 2) {
prms.algorithm = solver::RANDOMIZED;
prms.n_components = params.n_col2 - 15;
}
allocate(data2, len);
r.uniform(data2, len, T(-1.0), T(1.0));
allocate(data2_trans, prms.n_rows * prms.n_components);
int len_comp = params.n_col2 * prms.n_components;
allocate(components2, len_comp);
allocate(explained_vars2, prms.n_components);
allocate(explained_var_ratio2, prms.n_components);
allocate(singular_vals2, prms.n_components);
allocate(mean2, prms.n_cols);
allocate(noise_vars2, 1);
pcaFitTransform(data2, data2_trans, components2, explained_vars2, explained_var_ratio2,
singular_vals2, mean2, noise_vars2, prms, cublas_handle, cusolver_handle);
allocate(data2_back, len);
pcaInverseTransform(data2_trans, components2, singular_vals2, mean2, data2_back, prms, cublas_handle);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
}
void SetUp() override {
basicTest();
advancedTest();
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(components));
CUDA_CHECK(hipFree(trans_data));
CUDA_CHECK(hipFree(data_back));
CUDA_CHECK(hipFree(trans_data_ref));
CUDA_CHECK(hipFree(explained_vars));
CUDA_CHECK(hipFree(explained_var_ratio));
CUDA_CHECK(hipFree(singular_vals));
CUDA_CHECK(hipFree(mean));
CUDA_CHECK(hipFree(noise_vars));
CUDA_CHECK(hipFree(components_ref));
CUDA_CHECK(hipFree(explained_vars_ref));
CUDA_CHECK(hipFree(data2));
CUDA_CHECK(hipFree(data2_trans));
CUDA_CHECK(hipFree(data2_back));
CUDA_CHECK(hipFree(components2));
CUDA_CHECK(hipFree(explained_vars2));
CUDA_CHECK(hipFree(explained_var_ratio2));
CUDA_CHECK(hipFree(singular_vals2));
CUDA_CHECK(hipFree(mean2));
CUDA_CHECK(hipFree(noise_vars2));
}
protected:
PcaInputs<T> params;
T *data, *trans_data, *data_back, *components, *explained_vars, *explained_var_ratio, *singular_vals,
*mean, *noise_vars, *trans_data_ref, *components_ref, *explained_vars_ref;
T *data2, *data2_trans, *data2_back, *components2, *explained_vars2, *explained_var_ratio2,
*singular_vals2, *mean2, *noise_vars2;
};
const std::vector<PcaInputs<float> > inputsf2 = {
{ 0.01f, 3 * 2, 3, 2, 1024 * 128, 1024, 128, 1234ULL, 0, false },
{ 0.01f, 3 * 2, 3, 2, 256 * 32, 256, 32, 1234ULL, 1, true },
{ 0.05f, 3 * 2, 3, 2, 256 * 64, 256, 64, 1234ULL, 2, true }};
const std::vector<PcaInputs<double> > inputsd2 = {
{ 0.01, 3 * 2, 3, 2, 1024 * 128, 1024, 128, 1234ULL, 0, false },
{ 0.01, 3 * 2, 3, 2, 256 * 32, 256, 32, 1234ULL, 1, true },
{ 0.05, 3 * 2, 3, 2, 256 * 64, 256, 64, 1234ULL, 2, true }};
typedef PcaTest<float> PcaTestValF;
TEST_P(PcaTestValF, Result) {
ASSERT_TRUE(
devArrMatch(explained_vars, explained_vars_ref, params.n_col,
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestValD;
TEST_P(PcaTestValD, Result) {
ASSERT_TRUE(
devArrMatch(explained_vars, explained_vars_ref, params.n_col,
CompareApproxAbs<double>(params.tolerance)));
}
typedef PcaTest<float> PcaTestLeftVecF;
TEST_P(PcaTestLeftVecF, Result) {
ASSERT_TRUE(
devArrMatch(components, components_ref,
(params.n_col * params.n_col),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestLeftVecD;
TEST_P(PcaTestLeftVecD, Result) {
ASSERT_TRUE(
devArrMatch(components, components_ref,
(params.n_col * params.n_col),
CompareApproxAbs<double>(params.tolerance)));
}
typedef PcaTest<float> PcaTestTransDataF;
TEST_P(PcaTestTransDataF, Result) {
ASSERT_TRUE(
devArrMatch(trans_data, trans_data_ref,
(params.n_row * params.n_col),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestTransDataD;
TEST_P(PcaTestTransDataD, Result) {
ASSERT_TRUE(
devArrMatch(trans_data, trans_data_ref,
(params.n_row * params.n_col),
CompareApproxAbs<double>(params.tolerance)));
}
typedef PcaTest<float> PcaTestDataVecSmallF;
TEST_P(PcaTestDataVecSmallF, Result) {
ASSERT_TRUE(
devArrMatch(data, data_back,
(params.n_col * params.n_col),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestDataVecSmallD;
TEST_P(PcaTestDataVecSmallD, Result) {
ASSERT_TRUE(
devArrMatch(data, data_back,
(params.n_col * params.n_col),
CompareApproxAbs<double>(params.tolerance)));
}
typedef PcaTest<float> PcaTestDataVecF;
TEST_P(PcaTestDataVecF, Result) {
ASSERT_TRUE(
devArrMatch(data2, data2_back,
(params.n_col2 * params.n_col2),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestDataVecD;
TEST_P(PcaTestDataVecD, Result) {
ASSERT_TRUE(
devArrMatch(data2, data2_back,
(params.n_col2 * params.n_col2),
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestValF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestValD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestLeftVecF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestLeftVecD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecSmallF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecSmallD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestTransDataF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestTransDataD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecD, ::testing::ValuesIn(inputsd2));
} // end namespace ML
| a062b08cf8eeefab178e61300d59d8399c651d99.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <vector>
#include "random/rng.h"
#include "test_utils.h"
#include <cuda_utils.h>
#include "ml_utils.h"
#include "pca/pca.h"
#include <linalg/cublas_wrappers.h>
namespace ML {
using namespace MLCommon;
template<typename T>
struct PcaInputs {
T tolerance;
int len;
int n_row;
int n_col;
int len2;
int n_row2;
int n_col2;
unsigned long long int seed;
int algo;
bool whiten;
};
template<typename T>
::std::ostream& operator<<(::std::ostream& os, const PcaInputs<T>& dims) {
return os;
}
template<typename T>
class PcaTest: public ::testing::TestWithParam<PcaInputs<T> > {
protected:
void basicTest() {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
params = ::testing::TestWithParam<PcaInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
int len = params.len;
allocate(data, len);
allocate(data_back, len);
allocate(trans_data, len);
allocate(trans_data_ref, len);
std::vector<T> data_h = { 1.0, 2.0, 5.0, 4.0, 2.0, 1.0 };
data_h.resize(len);
updateDevice(data, data_h.data(), len);
std::vector<T> trans_data_ref_h = { -2.3231, -0.3517, 2.6748, -0.3979, 0.6571, -0.2592 };
trans_data_ref_h.resize(len);
updateDevice(trans_data_ref, trans_data_ref_h.data(), len);
int len_comp = params.n_col * params.n_col;
allocate(components, len_comp);
allocate(explained_vars, params.n_col);
allocate(explained_var_ratio, params.n_col);
allocate(singular_vals, params.n_col);
allocate(mean, params.n_col);
allocate(noise_vars, 1);
std::vector<T> components_ref_h = { 0.8163, 0.5776, -0.5776, 0.8163 };
components_ref_h.resize(len_comp);
std::vector<T> explained_vars_ref_h = { 6.338, 0.3287 };
explained_vars_ref_h.resize(params.n_col);
allocate(components_ref, len_comp);
allocate(explained_vars_ref, params.n_col);
updateDevice(components_ref, components_ref_h.data(), len_comp);
updateDevice(explained_vars_ref, explained_vars_ref_h.data(), params.n_col);
paramsPCA prms;
prms.n_cols = params.n_col;
prms.n_rows = params.n_row;
prms.n_components = params.n_col;
prms.whiten = false;
if (params.algo == 0)
prms.algorithm = solver::COV_EIG_DQ;
else
prms.algorithm = solver::COV_EIG_JACOBI;
pcaFit(data, components, explained_vars, explained_var_ratio,
singular_vals, mean, noise_vars, prms, cublas_handle, cusolver_handle);
pcaTransform(data, components, trans_data, singular_vals, mean,
prms, cublas_handle);
pcaInverseTransform(trans_data, components, singular_vals, mean, data_back, prms, cublas_handle);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
}
void advancedTest() {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
params = ::testing::TestWithParam<PcaInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
int len = params.len2;
paramsPCA prms;
prms.n_cols = params.n_col2;
prms.n_rows = params.n_row2;
prms.n_components = params.n_col2;
prms.whiten = params.whiten;
if (params.algo == 0)
prms.algorithm = solver::COV_EIG_DQ;
else if (params.algo == 1)
prms.algorithm = solver::COV_EIG_JACOBI;
else if (params.algo == 2) {
prms.algorithm = solver::RANDOMIZED;
prms.n_components = params.n_col2 - 15;
}
allocate(data2, len);
r.uniform(data2, len, T(-1.0), T(1.0));
allocate(data2_trans, prms.n_rows * prms.n_components);
int len_comp = params.n_col2 * prms.n_components;
allocate(components2, len_comp);
allocate(explained_vars2, prms.n_components);
allocate(explained_var_ratio2, prms.n_components);
allocate(singular_vals2, prms.n_components);
allocate(mean2, prms.n_cols);
allocate(noise_vars2, 1);
pcaFitTransform(data2, data2_trans, components2, explained_vars2, explained_var_ratio2,
singular_vals2, mean2, noise_vars2, prms, cublas_handle, cusolver_handle);
allocate(data2_back, len);
pcaInverseTransform(data2_trans, components2, singular_vals2, mean2, data2_back, prms, cublas_handle);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
}
void SetUp() override {
basicTest();
advancedTest();
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(components));
CUDA_CHECK(cudaFree(trans_data));
CUDA_CHECK(cudaFree(data_back));
CUDA_CHECK(cudaFree(trans_data_ref));
CUDA_CHECK(cudaFree(explained_vars));
CUDA_CHECK(cudaFree(explained_var_ratio));
CUDA_CHECK(cudaFree(singular_vals));
CUDA_CHECK(cudaFree(mean));
CUDA_CHECK(cudaFree(noise_vars));
CUDA_CHECK(cudaFree(components_ref));
CUDA_CHECK(cudaFree(explained_vars_ref));
CUDA_CHECK(cudaFree(data2));
CUDA_CHECK(cudaFree(data2_trans));
CUDA_CHECK(cudaFree(data2_back));
CUDA_CHECK(cudaFree(components2));
CUDA_CHECK(cudaFree(explained_vars2));
CUDA_CHECK(cudaFree(explained_var_ratio2));
CUDA_CHECK(cudaFree(singular_vals2));
CUDA_CHECK(cudaFree(mean2));
CUDA_CHECK(cudaFree(noise_vars2));
}
protected:
PcaInputs<T> params;
T *data, *trans_data, *data_back, *components, *explained_vars, *explained_var_ratio, *singular_vals,
*mean, *noise_vars, *trans_data_ref, *components_ref, *explained_vars_ref;
T *data2, *data2_trans, *data2_back, *components2, *explained_vars2, *explained_var_ratio2,
*singular_vals2, *mean2, *noise_vars2;
};
const std::vector<PcaInputs<float> > inputsf2 = {
{ 0.01f, 3 * 2, 3, 2, 1024 * 128, 1024, 128, 1234ULL, 0, false },
{ 0.01f, 3 * 2, 3, 2, 256 * 32, 256, 32, 1234ULL, 1, true },
{ 0.05f, 3 * 2, 3, 2, 256 * 64, 256, 64, 1234ULL, 2, true }};
const std::vector<PcaInputs<double> > inputsd2 = {
{ 0.01, 3 * 2, 3, 2, 1024 * 128, 1024, 128, 1234ULL, 0, false },
{ 0.01, 3 * 2, 3, 2, 256 * 32, 256, 32, 1234ULL, 1, true },
{ 0.05, 3 * 2, 3, 2, 256 * 64, 256, 64, 1234ULL, 2, true }};
typedef PcaTest<float> PcaTestValF;
TEST_P(PcaTestValF, Result) {
ASSERT_TRUE(
devArrMatch(explained_vars, explained_vars_ref, params.n_col,
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestValD;
TEST_P(PcaTestValD, Result) {
ASSERT_TRUE(
devArrMatch(explained_vars, explained_vars_ref, params.n_col,
CompareApproxAbs<double>(params.tolerance)));
}
typedef PcaTest<float> PcaTestLeftVecF;
TEST_P(PcaTestLeftVecF, Result) {
ASSERT_TRUE(
devArrMatch(components, components_ref,
(params.n_col * params.n_col),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestLeftVecD;
TEST_P(PcaTestLeftVecD, Result) {
ASSERT_TRUE(
devArrMatch(components, components_ref,
(params.n_col * params.n_col),
CompareApproxAbs<double>(params.tolerance)));
}
typedef PcaTest<float> PcaTestTransDataF;
TEST_P(PcaTestTransDataF, Result) {
ASSERT_TRUE(
devArrMatch(trans_data, trans_data_ref,
(params.n_row * params.n_col),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestTransDataD;
TEST_P(PcaTestTransDataD, Result) {
ASSERT_TRUE(
devArrMatch(trans_data, trans_data_ref,
(params.n_row * params.n_col),
CompareApproxAbs<double>(params.tolerance)));
}
typedef PcaTest<float> PcaTestDataVecSmallF;
TEST_P(PcaTestDataVecSmallF, Result) {
ASSERT_TRUE(
devArrMatch(data, data_back,
(params.n_col * params.n_col),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestDataVecSmallD;
TEST_P(PcaTestDataVecSmallD, Result) {
ASSERT_TRUE(
devArrMatch(data, data_back,
(params.n_col * params.n_col),
CompareApproxAbs<double>(params.tolerance)));
}
typedef PcaTest<float> PcaTestDataVecF;
TEST_P(PcaTestDataVecF, Result) {
ASSERT_TRUE(
devArrMatch(data2, data2_back,
(params.n_col2 * params.n_col2),
CompareApproxAbs<float>(params.tolerance)));
}
typedef PcaTest<double> PcaTestDataVecD;
TEST_P(PcaTestDataVecD, Result) {
ASSERT_TRUE(
devArrMatch(data2, data2_back,
(params.n_col2 * params.n_col2),
CompareApproxAbs<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestValF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestValD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestLeftVecF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestLeftVecD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecSmallF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecSmallD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestTransDataF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestTransDataD, ::testing::ValuesIn(inputsd2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(PcaTests, PcaTestDataVecD, ::testing::ValuesIn(inputsd2));
} // end namespace ML
|
5ef79d80974ecb051d5373ac00b64cdb38f6f7cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/op_kernel_state_wrapper.h"
#include "oneflow/core/common/data_type.h"
#include "oneflow/core/device/device_context.h"
#include "oneflow/core/framework/random_generator.h"
#include "oneflow/user/kernels/range_kernel_util.h"
#include "oneflow/user/kernels/distributions/uniform_kernel.h"
#include "oneflow/user/kernels/radix_sort.cuh"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
namespace oneflow {
__global__ void GeneKeysAndValues(const int32_t n, int32_t* values, int32_t* keys,
hiprandState_t* state) {
XPU_1D_KERNEL_LOOP(i, n) {
keys[i] = hiprand(state + i);
values[i] = i;
}
}
class GpuRandPermKernel final : public user_op::OpKernel {
public:
GpuRandPermKernel() = default;
~GpuRandPermKernel() = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
const auto& generator = CHECK_JUST(one::MakeAutoGenerator());
generator->set_current_seed(ctx->Attr<int64_t>("seed"));
return std::make_shared<UniformKernelState>(generator);
}
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
int32_t* output = out->mut_dptr<int32_t>();
const int32_t n = ctx->Attr<int32_t>("n");
if (n == 0) { return; }
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
auto* randperm_kernel_state = dynamic_cast<UniformKernelState*>(state);
CHECK_NOTNULL(randperm_kernel_state);
const auto& generator = randperm_kernel_state->generator();
const auto& gpu_generator = CHECK_JUST(generator->Get<one::CUDAGeneratorImpl>());
CHECK_NOTNULL(generator);
int32_t block_num = gpu_generator->max_block_num();
int32_t thread_num = gpu_generator->max_thread_num();
hiprandState_t* curand_states = gpu_generator->curand_states();
// layout for tmp |...key(in and out,2xN)..|....value....|.... space for sort function....|
// values are the desired indexes ,and keys are generated randomly.
void* tmp = tmp_buffer->mut_dptr<void>();
int32_t* key_base = reinterpret_cast<int32_t*>(tmp);
const int32_t key_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
int32_t* value_base =
reinterpret_cast<int32_t*>(reinterpret_cast<char*>(key_base) + 2 * key_aligned_bytes);
const int32_t indices_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
void* tmp_base =
reinterpret_cast<void*>(reinterpret_cast<char*>(value_base) + indices_aligned_bytes);
size_t temp_storage_bytes = InferTempStorageForSortPairsDescending<int32_t, int32_t>(1, n);
hipLaunchKernelGGL(( GeneKeysAndValues), dim3(block_num), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
n, value_base, key_base, curand_states);
auto err = hipcub::DeviceRadixSort::SortPairs(
/* d_temp_storage */ tmp_base,
/* temp_storage_bytes */ temp_storage_bytes,
/* d_keys_in */ key_base,
/* d_keys_out */ key_base + n,
/* d_values_in */ value_base,
/* d_values_out */ output,
/* num_items */ n,
/* begin_bit */ 0,
/* end_bit */ sizeof(int32_t) * 8,
/* stream */ ctx->device_ctx()->cuda_stream());
OF_CUDA_CHECK(err);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("randperm")
.SetCreateFn<GpuRandPermKernel>()
.SetIsMatchedHob(user_op::HobDeviceTag() == "gpu")
.SetInferTmpSizeFn([](user_op::InferContext* ctx) {
const int32_t n = ctx->Attr<int32_t>("n");
/* Sorted In */
const int32_t sorted_in_aligned_bytes = 2 * GetCudaAlignedSize(n * sizeof(int32_t));
/* Indices */
const int32_t indices_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
/* CUB Temp Storage */
const int32_t temp_storage_bytes =
InferTempStorageForSortPairsDescending<int32_t, int32_t>(1, n);
return sorted_in_aligned_bytes + indices_aligned_bytes + temp_storage_bytes;
});
} // namespace oneflow
| 5ef79d80974ecb051d5373ac00b64cdb38f6f7cf.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/user/kernels/op_kernel_state_wrapper.h"
#include "oneflow/core/common/data_type.h"
#include "oneflow/core/device/device_context.h"
#include "oneflow/core/framework/random_generator.h"
#include "oneflow/user/kernels/range_kernel_util.h"
#include "oneflow/user/kernels/distributions/uniform_kernel.h"
#include "oneflow/user/kernels/radix_sort.cuh"
#include <curand.h>
#include <curand_kernel.h>
namespace oneflow {
__global__ void GeneKeysAndValues(const int32_t n, int32_t* values, int32_t* keys,
curandState* state) {
XPU_1D_KERNEL_LOOP(i, n) {
keys[i] = curand(state + i);
values[i] = i;
}
}
class GpuRandPermKernel final : public user_op::OpKernel {
public:
GpuRandPermKernel() = default;
~GpuRandPermKernel() = default;
std::shared_ptr<user_op::OpKernelState> CreateOpKernelState(
user_op::KernelInitContext* ctx) const override {
const auto& generator = CHECK_JUST(one::MakeAutoGenerator());
generator->set_current_seed(ctx->Attr<int64_t>("seed"));
return std::make_shared<UniformKernelState>(generator);
}
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx, user_op::OpKernelState* state) const override {
user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
int32_t* output = out->mut_dptr<int32_t>();
const int32_t n = ctx->Attr<int32_t>("n");
if (n == 0) { return; }
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
auto* randperm_kernel_state = dynamic_cast<UniformKernelState*>(state);
CHECK_NOTNULL(randperm_kernel_state);
const auto& generator = randperm_kernel_state->generator();
const auto& gpu_generator = CHECK_JUST(generator->Get<one::CUDAGeneratorImpl>());
CHECK_NOTNULL(generator);
int32_t block_num = gpu_generator->max_block_num();
int32_t thread_num = gpu_generator->max_thread_num();
curandState* curand_states = gpu_generator->curand_states();
// layout for tmp |...key(in and out,2xN)..|....value....|.... space for sort function....|
// values are the desired indexes ,and keys are generated randomly.
void* tmp = tmp_buffer->mut_dptr<void>();
int32_t* key_base = reinterpret_cast<int32_t*>(tmp);
const int32_t key_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
int32_t* value_base =
reinterpret_cast<int32_t*>(reinterpret_cast<char*>(key_base) + 2 * key_aligned_bytes);
const int32_t indices_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
void* tmp_base =
reinterpret_cast<void*>(reinterpret_cast<char*>(value_base) + indices_aligned_bytes);
size_t temp_storage_bytes = InferTempStorageForSortPairsDescending<int32_t, int32_t>(1, n);
GeneKeysAndValues<<<block_num, kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
n, value_base, key_base, curand_states);
auto err = cub::DeviceRadixSort::SortPairs(
/* d_temp_storage */ tmp_base,
/* temp_storage_bytes */ temp_storage_bytes,
/* d_keys_in */ key_base,
/* d_keys_out */ key_base + n,
/* d_values_in */ value_base,
/* d_values_out */ output,
/* num_items */ n,
/* begin_bit */ 0,
/* end_bit */ sizeof(int32_t) * 8,
/* stream */ ctx->device_ctx()->cuda_stream());
OF_CUDA_CHECK(err);
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("randperm")
.SetCreateFn<GpuRandPermKernel>()
.SetIsMatchedHob(user_op::HobDeviceTag() == "gpu")
.SetInferTmpSizeFn([](user_op::InferContext* ctx) {
const int32_t n = ctx->Attr<int32_t>("n");
/* Sorted In */
const int32_t sorted_in_aligned_bytes = 2 * GetCudaAlignedSize(n * sizeof(int32_t));
/* Indices */
const int32_t indices_aligned_bytes = GetCudaAlignedSize(n * sizeof(int32_t));
/* CUB Temp Storage */
const int32_t temp_storage_bytes =
InferTempStorageForSortPairsDescending<int32_t, int32_t>(1, n);
return sorted_in_aligned_bytes + indices_aligned_bytes + temp_storage_bytes;
});
} // namespace oneflow
|
4eb7ac71339c9e5a4359b5b56c6d1f6b383453ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../include/cuda_ds/CountingHashTable/_CountingHashTable.h"
#include <iostream>
#include <stdexcept>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
namespace cuda_ds {
namespace internal {
/**
* Determines the first free position i in the hash table with i >= l.
*/
template<uint32_t intsPerKey, uint32_t blocksize>
__device__ inline uint32_t firstFreePosition(KeyValuePair<intsPerKey>* table, const uint32_t numEntries,
const uint32_t l, const uint32_t r, uint32_t* shared) {
const uint32_t intsPerEntry = intsPerKey + 1;
const uint32_t entriesPerBlock = blocksize / intsPerEntry;
const uint32_t tId = threadIdx.x;
// Shared Memory and views on it.
// This is the same physical memory as defined in compressKernel!
uint32_t* leftPart = &shared[0];
volatile uint32_t *result = &shared[2 * blocksize];
if (tId == 0)
*result = (uint32_t) -1;
__syncthreads();
uint32_t i = l;
while (i < r) {
uint32_t j = i % entriesPerBlock;
if(j == 0) {
// read the next stripe of hash table from global memory
const uint32_t* startAddr = (uint32_t*) (table + i);
const uint32_t* stopAddr = (uint32_t*) min((uint64_t) (table + i + entriesPerBlock), (uint64_t) (table + numEntries));
const uint32_t* addr = startAddr + tId;
if(addr < stopAddr) {
leftPart[tId] = *addr;
//printf("thread %u: i=%u loading memory: %u\n", tId, i, leftPart[tId]);
}
__syncthreads();
}
// linear scan of the part of the hash table
for (; j < entriesPerBlock && i < r && (*result) == (uint32_t) -1;
j++, i++) {
// only one thread participates
if (tId == 0) {
// the first free position is returned
if (leftPart[j * intsPerEntry + intsPerKey] == 0)
*result = i;
}
__syncthreads();
}
if (*result != (uint32_t) -1)
return *result;
}
// no free position found
return r;
}
/**
* Determines the last free position i in the hash table with i <= r.
*/
template<uint32_t intsPerKey, uint32_t blocksize>
__device__ inline uint32_t lastOccupiedPosition(KeyValuePair<intsPerKey>* table, const uint32_t numEntries, const uint32_t l,
const uint32_t r, uint32_t* shared) {
const uint32_t intsPerEntry = intsPerKey + 1;
const uint32_t entriesPerBlock = blocksize / intsPerEntry;
const uint32_t tId = threadIdx.x;
// Views on the shared memory
volatile uint32_t* rightPart = &shared[blocksize];
volatile uint32_t *result = &shared[2 * blocksize + 1];
if (tId == 0)
*result = (uint32_t) -1;
__syncthreads();
// the position from the end of the table
uint32_t i = numEntries - r - 1;
// Run to front
while (numEntries-i-1 > l) {
uint32_t j = i % entriesPerBlock;
if(j == 0) {
const uint32_t numEntriesLoaded = min(entriesPerBlock, numEntries-i);
const uint32_t* startAddr = (uint32_t*) (table + numEntries - i - numEntriesLoaded);
const uint32_t* stopAddr = (uint32_t*) (table + numEntries - i);
const uint32_t* addr = (uint32_t*) (table + numEntries - i - entriesPerBlock) + tId;
// read the next stripe of hash table from global memory
if(addr >= startAddr && addr < stopAddr) {
rightPart[tId] = *addr;
//printf("thread %u: i=%u loading memory: %u\n", tId, i, rightPart[tId]);
}
__syncthreads();
}
// linear scan of the part of the hash table
for (; j < entriesPerBlock && numEntries-i-1 > l && *result == (uint32_t) -1;
j++, i++) {
// only one thread participates
if (tId == 0) {
// the last occupied position is returned
if (rightPart[(entriesPerBlock - j - 1) * intsPerEntry + intsPerKey] != 0)
*result = numEntries - i -1;
}
__syncthreads();
}
if (*result != (uint32_t) -1) {
//printf("r is %u\n", *result);
return *result;
}
}
// no free position found
return l;
}
template<uint32_t intsPerKey, uint32_t blocksize>
__device__ inline void my_swap(KeyValuePair<intsPerKey>* table, const uint32_t numEntries,
const uint32_t l, const uint32_t r, uint32_t* shared) {
const uint32_t tId = threadIdx.x;
const uint32_t intsPerEntry = intsPerKey + 1;
const uint32_t entriesPerBlock = blocksize / intsPerEntry;
uint32_t* tableInts = (uint32_t*) table;
//if(tId == 0)
// printf("swap %u %u\n", l, r);
// Views on the shared memory
uint32_t* leftPart = &shared[0];
uint32_t* rightPart = &shared[blocksize];
if (tId < intsPerEntry) {
// swap in global memory
const uint32_t x = numEntries - r - 1;
const uint32_t pos = entriesPerBlock - (x % entriesPerBlock) - 1;
tableInts[intsPerEntry * l + tId] = rightPart[pos * intsPerEntry + tId];
tableInts[intsPerEntry * r + tId] = leftPart[(l % entriesPerBlock) * intsPerEntry
+ tId];
}
__syncthreads();
}
/**
* Compresses the table content to have nonzero entries at the beginning of the table.
* The number of nonzero entries is stored in *res.
*/
template<uint32_t intsPerKey, uint32_t blocksize>
__global__ void compressKernel(KeyValuePair<intsPerKey>* table,
const uint32_t numEntries, uint32_t* res) {
// prepare shared memory to hold parts of the hash table containing l and r
__shared__ uint32_t shared[2 * blocksize + 2];
// init left and right marker
uint32_t l = firstFreePosition<intsPerKey, blocksize>(table, numEntries, 0, numEntries - 1, shared); // l points to first zero position
uint32_t r = lastOccupiedPosition<intsPerKey, blocksize>(table, numEntries, 0, numEntries - 1, shared); // r points to last nonzero position
/* invariant:
* l points to first zero position.
* r points to last nonzero position.
*/
while (l < r) {
//printf("l=%u, r=%u\n", l, r);
// swap table[l] with table[r])
my_swap<intsPerKey, blocksize>(table, numEntries, l, r, shared);
// repair invariant
l = firstFreePosition<intsPerKey, blocksize>(table, numEntries, l+1, r, shared);
r = lastOccupiedPosition<intsPerKey, blocksize>(table, numEntries, l, r-1, shared);
}
//printf("l=%u, r=%u\n", l, r);
*res = l;
}
template<uint32_t intsPerKey>
void _compress(KeyValuePair<intsPerKey>* table, const uint32_t numEntries,
uint32_t* result) {
const uint32_t blocksize = 1024;
// launch kernel
hipLaunchKernelGGL(( compressKernel<intsPerKey, blocksize>) , dim3(1), dim3(blocksize), 0, 0, table, numEntries,
result);
}
/**
* Export Templates for meaningful ints per key.
*/
#define EXPORT(x) \
template void _compress<x>(KeyValuePair<x>*, const uint32_t, uint32_t*);
EXPORT(1)
EXPORT(2)
EXPORT(3)
EXPORT(4)
EXPORT(5)
EXPORT(6)
}
}
| 4eb7ac71339c9e5a4359b5b56c6d1f6b383453ea.cu | #include "../../include/cuda_ds/CountingHashTable/_CountingHashTable.h"
#include <iostream>
#include <stdexcept>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
namespace cuda_ds {
namespace internal {
/**
* Determines the first free position i in the hash table with i >= l.
*/
template<uint32_t intsPerKey, uint32_t blocksize>
__device__ inline uint32_t firstFreePosition(KeyValuePair<intsPerKey>* table, const uint32_t numEntries,
const uint32_t l, const uint32_t r, uint32_t* shared) {
const uint32_t intsPerEntry = intsPerKey + 1;
const uint32_t entriesPerBlock = blocksize / intsPerEntry;
const uint32_t tId = threadIdx.x;
// Shared Memory and views on it.
// This is the same physical memory as defined in compressKernel!
uint32_t* leftPart = &shared[0];
volatile uint32_t *result = &shared[2 * blocksize];
if (tId == 0)
*result = (uint32_t) -1;
__syncthreads();
uint32_t i = l;
while (i < r) {
uint32_t j = i % entriesPerBlock;
if(j == 0) {
// read the next stripe of hash table from global memory
const uint32_t* startAddr = (uint32_t*) (table + i);
const uint32_t* stopAddr = (uint32_t*) min((uint64_t) (table + i + entriesPerBlock), (uint64_t) (table + numEntries));
const uint32_t* addr = startAddr + tId;
if(addr < stopAddr) {
leftPart[tId] = *addr;
//printf("thread %u: i=%u loading memory: %u\n", tId, i, leftPart[tId]);
}
__syncthreads();
}
// linear scan of the part of the hash table
for (; j < entriesPerBlock && i < r && (*result) == (uint32_t) -1;
j++, i++) {
// only one thread participates
if (tId == 0) {
// the first free position is returned
if (leftPart[j * intsPerEntry + intsPerKey] == 0)
*result = i;
}
__syncthreads();
}
if (*result != (uint32_t) -1)
return *result;
}
// no free position found
return r;
}
/**
* Determines the last free position i in the hash table with i <= r.
*/
template<uint32_t intsPerKey, uint32_t blocksize>
__device__ inline uint32_t lastOccupiedPosition(KeyValuePair<intsPerKey>* table, const uint32_t numEntries, const uint32_t l,
const uint32_t r, uint32_t* shared) {
const uint32_t intsPerEntry = intsPerKey + 1;
const uint32_t entriesPerBlock = blocksize / intsPerEntry;
const uint32_t tId = threadIdx.x;
// Views on the shared memory
volatile uint32_t* rightPart = &shared[blocksize];
volatile uint32_t *result = &shared[2 * blocksize + 1];
if (tId == 0)
*result = (uint32_t) -1;
__syncthreads();
// the position from the end of the table
uint32_t i = numEntries - r - 1;
// Run to front
while (numEntries-i-1 > l) {
uint32_t j = i % entriesPerBlock;
if(j == 0) {
const uint32_t numEntriesLoaded = min(entriesPerBlock, numEntries-i);
const uint32_t* startAddr = (uint32_t*) (table + numEntries - i - numEntriesLoaded);
const uint32_t* stopAddr = (uint32_t*) (table + numEntries - i);
const uint32_t* addr = (uint32_t*) (table + numEntries - i - entriesPerBlock) + tId;
// read the next stripe of hash table from global memory
if(addr >= startAddr && addr < stopAddr) {
rightPart[tId] = *addr;
//printf("thread %u: i=%u loading memory: %u\n", tId, i, rightPart[tId]);
}
__syncthreads();
}
// linear scan of the part of the hash table
for (; j < entriesPerBlock && numEntries-i-1 > l && *result == (uint32_t) -1;
j++, i++) {
// only one thread participates
if (tId == 0) {
// the last occupied position is returned
if (rightPart[(entriesPerBlock - j - 1) * intsPerEntry + intsPerKey] != 0)
*result = numEntries - i -1;
}
__syncthreads();
}
if (*result != (uint32_t) -1) {
//printf("r is %u\n", *result);
return *result;
}
}
// no free position found
return l;
}
template<uint32_t intsPerKey, uint32_t blocksize>
__device__ inline void my_swap(KeyValuePair<intsPerKey>* table, const uint32_t numEntries,
const uint32_t l, const uint32_t r, uint32_t* shared) {
const uint32_t tId = threadIdx.x;
const uint32_t intsPerEntry = intsPerKey + 1;
const uint32_t entriesPerBlock = blocksize / intsPerEntry;
uint32_t* tableInts = (uint32_t*) table;
//if(tId == 0)
// printf("swap %u %u\n", l, r);
// Views on the shared memory
uint32_t* leftPart = &shared[0];
uint32_t* rightPart = &shared[blocksize];
if (tId < intsPerEntry) {
// swap in global memory
const uint32_t x = numEntries - r - 1;
const uint32_t pos = entriesPerBlock - (x % entriesPerBlock) - 1;
tableInts[intsPerEntry * l + tId] = rightPart[pos * intsPerEntry + tId];
tableInts[intsPerEntry * r + tId] = leftPart[(l % entriesPerBlock) * intsPerEntry
+ tId];
}
__syncthreads();
}
/**
* Compresses the table content to have nonzero entries at the beginning of the table.
* The number of nonzero entries is stored in *res.
*/
template<uint32_t intsPerKey, uint32_t blocksize>
__global__ void compressKernel(KeyValuePair<intsPerKey>* table,
const uint32_t numEntries, uint32_t* res) {
// prepare shared memory to hold parts of the hash table containing l and r
__shared__ uint32_t shared[2 * blocksize + 2];
// init left and right marker
uint32_t l = firstFreePosition<intsPerKey, blocksize>(table, numEntries, 0, numEntries - 1, shared); // l points to first zero position
uint32_t r = lastOccupiedPosition<intsPerKey, blocksize>(table, numEntries, 0, numEntries - 1, shared); // r points to last nonzero position
/* invariant:
* l points to first zero position.
* r points to last nonzero position.
*/
while (l < r) {
//printf("l=%u, r=%u\n", l, r);
// swap table[l] with table[r])
my_swap<intsPerKey, blocksize>(table, numEntries, l, r, shared);
// repair invariant
l = firstFreePosition<intsPerKey, blocksize>(table, numEntries, l+1, r, shared);
r = lastOccupiedPosition<intsPerKey, blocksize>(table, numEntries, l, r-1, shared);
}
//printf("l=%u, r=%u\n", l, r);
*res = l;
}
template<uint32_t intsPerKey>
void _compress(KeyValuePair<intsPerKey>* table, const uint32_t numEntries,
uint32_t* result) {
const uint32_t blocksize = 1024;
// launch kernel
compressKernel<intsPerKey, blocksize> <<<1, blocksize>>>(table, numEntries,
result);
}
/**
* Export Templates for meaningful ints per key.
*/
#define EXPORT(x) \
template void _compress<x>(KeyValuePair<x>*, const uint32_t, uint32_t*);
EXPORT(1)
EXPORT(2)
EXPORT(3)
EXPORT(4)
EXPORT(5)
EXPORT(6)
}
}
|
28f91bd75065c3ef68c5e7ae3fb17f0aabd34cac.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_vector_types.h>
#include "bitmap_image.hpp"
using namespace std;
__global__ void color_to_grey(uchar3 *input_image, uchar3 *output_image, int width, int height)
{
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if(col < width && row < height)
{
int pos = row * width + col;
output_image[pos].x = static_cast<unsigned char>(input_image[pos].x * 0.2126f + input_image[pos].y * 0.7125f + input_image[pos].z * 0.0722f);
output_image[pos].y = static_cast<unsigned char>(input_image[pos].x * 0.2126f + input_image[pos].y * 0.7125f + input_image[pos].z * 0.0722f);
output_image[pos].z = static_cast<unsigned char>(input_image[pos].x * 0.2126f + input_image[pos].y * 0.7125f + input_image[pos].z * 0.0722f);
}
}
int main()
{
bitmap_image bmp("lenna.bmp");
if(!bmp)
{
cerr << "Image not found" << endl;
exit(1);
}
int height = bmp.height();
int width = bmp.width();
cout << "image dimensions" << endl;
cout << "height " << height << " width " << width << endl;
//Transform image into vector of doubles
vector<uchar3> input_image;
rgb_t color;
for(int x = 0; x < width; x++)
{
for(int y = 0; y < height; y++)
{
bmp.get_pixel(x, y, color);
input_image.push_back( {color.red, color.green, color.blue} );
}
}
vector<uchar3> output_image(input_image.size());
uchar3 *d_in, *d_out;
int img_size = (input_image.size() * sizeof(char) * 3);
hipMalloc(&d_in, img_size);
hipMalloc(&d_out, img_size);
hipMemcpy(d_in, input_image.data(), img_size, hipMemcpyHostToDevice);
hipMemcpy(d_out, input_image.data(), img_size, hipMemcpyHostToDevice);
dim3 dimGrid(ceil(width / 16), ceil(height / 16), 1);
dim3 dimBlock(16, 16, 1);
hipLaunchKernelGGL(( color_to_grey), dim3(dimGrid) , dim3(dimBlock) , 0, 0, d_in, d_out, width, height);
hipMemcpy(output_image.data(), d_out, img_size, hipMemcpyDeviceToHost);
//Set updated pixels
for(int x = 0; x < width; x++)
{
for(int y = 0; y < height; y++)
{
int pos = x * width + y;
bmp.set_pixel(x, y, output_image[pos].x, output_image[pos].y, output_image[pos].z);
}
}
bmp.save_image("./grayscaled.bmp");
hipFree(d_in);
hipFree(d_out);
} | 28f91bd75065c3ef68c5e7ae3fb17f0aabd34cac.cu | #include <iostream>
#include <vector>
#include <cuda.h>
#include <vector_types.h>
#include "bitmap_image.hpp"
using namespace std;
__global__ void color_to_grey(uchar3 *input_image, uchar3 *output_image, int width, int height)
{
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if(col < width && row < height)
{
int pos = row * width + col;
output_image[pos].x = static_cast<unsigned char>(input_image[pos].x * 0.2126f + input_image[pos].y * 0.7125f + input_image[pos].z * 0.0722f);
output_image[pos].y = static_cast<unsigned char>(input_image[pos].x * 0.2126f + input_image[pos].y * 0.7125f + input_image[pos].z * 0.0722f);
output_image[pos].z = static_cast<unsigned char>(input_image[pos].x * 0.2126f + input_image[pos].y * 0.7125f + input_image[pos].z * 0.0722f);
}
}
int main()
{
bitmap_image bmp("lenna.bmp");
if(!bmp)
{
cerr << "Image not found" << endl;
exit(1);
}
int height = bmp.height();
int width = bmp.width();
cout << "image dimensions" << endl;
cout << "height " << height << " width " << width << endl;
//Transform image into vector of doubles
vector<uchar3> input_image;
rgb_t color;
for(int x = 0; x < width; x++)
{
for(int y = 0; y < height; y++)
{
bmp.get_pixel(x, y, color);
input_image.push_back( {color.red, color.green, color.blue} );
}
}
vector<uchar3> output_image(input_image.size());
uchar3 *d_in, *d_out;
int img_size = (input_image.size() * sizeof(char) * 3);
cudaMalloc(&d_in, img_size);
cudaMalloc(&d_out, img_size);
cudaMemcpy(d_in, input_image.data(), img_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_out, input_image.data(), img_size, cudaMemcpyHostToDevice);
dim3 dimGrid(ceil(width / 16), ceil(height / 16), 1);
dim3 dimBlock(16, 16, 1);
color_to_grey<<< dimGrid , dimBlock >>> (d_in, d_out, width, height);
cudaMemcpy(output_image.data(), d_out, img_size, cudaMemcpyDeviceToHost);
//Set updated pixels
for(int x = 0; x < width; x++)
{
for(int y = 0; y < height; y++)
{
int pos = x * width + y;
bmp.set_pixel(x, y, output_image[pos].x, output_image[pos].y, output_image[pos].z);
}
}
bmp.save_image("./grayscaled.bmp");
cudaFree(d_in);
cudaFree(d_out);
} |
32c7509047d6101ab5749da880fc923129dfcdea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <vector>
#include <iostream>
#include <fstream>
#include <string>
#define M 41
using namespace std;
typedef vector<vector<float>> matrix;
__global__ void initial(float *u,float *v,float *p,float *b,int nx,int ny){
int i = blockIdx.x / (ny / M);
int j = threadIdx.x + blockDim.x * (blockIdx.x % (nx / M));
u[ny*i+j]=0.0;
v[ny*i+j]=0.0;
p[ny*i+j]=0.0;
b[ny*i+j]=0.0;
}
__global__ void build_up_b(float *b,float rho,float dt,float *u,float *v,float dx,float dy,int nx,int ny) {
int i = blockIdx.x / (ny / M);
int j = threadIdx.x + blockDim.x * (blockIdx.x % (nx / M));
if(i>0&&i<nx-1&&j>0&&j<ny-1){
b[ny*i+j]=(rho*(1.0/dt*
((u[i*ny+j+1]-u[i*ny+j-1])/
(2*dx)+(v[(i+1)*ny+j]-v[(i-1)*ny+j])/(2*dy))-
((u[i*ny+j+1]-u[i*ny+j-1])/(2*dx))*((u[i*ny+j+1]-u[i*ny+j-1])/(2*dx))-
2*((u[(i+1)*ny+j]-u[(i-1)*ny+j])/(2*dy)*
(v[i*ny+j+1]-v[i*ny+j-1])/(2*dx))-
((v[(i+1)*ny+j]-v[(i-1)*ny+j])/(2*dy))*((v[(i+1)*ny+j]-v[(i-1)*ny+j])/(2*dy))
)
);
}
__syncthreads();
}
__global__ void pressure_poisson(float *p,float dx,float dy,float *b,int nx,int ny,float *pn) {
int i = blockIdx.x / (ny / M);
int j = threadIdx.x + blockDim.x * (blockIdx.x % (nx / M));
if(i>0&&i<nx-1&&j>0&&j<ny-1){
p[ny*i+j]=(((pn[i*ny+j+1] + pn[i*ny+j-1])*dy*dy+
(pn[(i+1)*ny+j] + pn[(i-1)*ny+j])*dx*dx)/
(2*(dx*dx+dy*dy))-
dx*dx*dy*dy/(2*(dx*dx+dy*dy))*b[i*ny+j]
);
}
__syncthreads();
}
__global__ void pressure_poisson_2(float *p,int nx,int ny) {
int i = blockIdx.x / (ny / M);
int j = threadIdx.x + blockDim.x * (blockIdx.x % (nx / M));
if(j==ny-1){
p[ny*i+j]=p[ny*i+j-1];
}
if(i==0){
p[ny*i+j]=p[(i+1)*ny+j];
}
if(j==0){
p[ny*i+j]=p[i*ny+j+1];
}
if(i==nx-1){
p[ny*i+j] = 0.0;
}
__syncthreads();
}
__global__ void cavity_flow(int nt,float *u,float *v,float dt,float dx,float dy,float*p,float rho,float nu,int nx,int ny,float *un,float *vn) {
int i = blockIdx.x / (ny / M);
int j = threadIdx.x + blockDim.x * (blockIdx.x % (nx / M));
if(i>0&&i<nx-1&&j>0&&j<ny-1){
u[i*ny+j]=(un[i*ny+j]-
un[i*ny+j]*dt/dx*
(un[i*ny+j]-un[i*ny+j-1])-
vn[i*ny+j]*dt/dy*
(un[i*ny+j]-un[(i-1)*ny+j])-
dt/(2*rho*dx)*(p[i*ny+j+1]-p[i*ny+j-1])+
nu*(dt/(dx*dx)*
(un[i*ny+j+1]-2*un[i*ny+j]+un[i*ny+j-1])+
dt/(dy*dy)*
(un[(i+1)*ny+j]-2*un[i*ny+j]+un[(i-1)*ny+j])
)
);
v[i*ny+j]=(vn[i*ny+j]-
un[i*ny+j]*dt/dx*
(vn[i*ny+j]-vn[i*ny+j-1])-
vn[i*ny+j]*dt/dy*
(vn[i*ny+j]-vn[(i-1)*ny+j])-
dt/(2*rho*dx)*(p[(i+1)*ny+j]-p[(i-1)*ny+j])+
nu*(dt/(dx*dx)*
(vn[i*ny+j+1]-2*vn[i*ny+j]+vn[i*ny+j-1])+
dt/(dy*dy)*
(vn[(i+1)*ny+j]-2*vn[i*ny+j]+vn[(i-1)*ny+j])
)
);
}
__syncthreads();
}
__global__ void cavity_flow_2(float *u,float *v,int nx,int ny) {
int i = blockIdx.x / (ny / M);
int j = threadIdx.x + blockDim.x * (blockIdx.x % (nx / M));
if(i==0){
u[i*ny+j]=0.0;
v[i*ny+j]=0.0;
}
if(j==0){
u[i*ny+j]=0.0;
v[i*ny+j]=0.0;
}
if(j==ny-1){
u[i*ny+j]=0.0;
v[i*ny+j]=0.0;
}
if(i==nx-1){
u[i*ny+j]=1.0;
v[i*ny+j]=0.0;
}
__syncthreads();
}
int main() {
int nx = 41;
int ny = 41;
int nt = 500;
int nit = 50;
float dx = 2.0/(nx-1);
float dy = 2.0/(ny-1);
float rho = 1.0;
float nu = 0.1;
float dt = 0.001;
int size = nx * ny * sizeof(float);
float *u,*v,*p,*b;
hipMallocManaged(&u, size);
hipMallocManaged(&v, size);
hipMallocManaged(&p, size);
hipMallocManaged(&b, size);
float *pn;
hipMallocManaged(&pn, size);
float *un,*vn;
hipMallocManaged(&un, size);
hipMallocManaged(&vn, size);
//-------------------nt=100----------------------------------
nt=100;
hipLaunchKernelGGL(( initial), dim3(nx*ny/M),dim3(M), 0, 0, u,v,p,b,nx,ny);
hipDeviceSynchronize();
for (int nt_index=0;nt_index<nt;nt_index++){
un=u;
vn=v;
hipLaunchKernelGGL(( build_up_b), dim3(nx*ny/M),dim3(M), 0, 0, b,rho,dt,u,v,dx,dy,nx,ny);
hipDeviceSynchronize();
for (int nit_index=0;nit_index<nit;nit_index++){
pn=p;
hipLaunchKernelGGL(( pressure_poisson), dim3(nx*ny/M),dim3(M), 0, 0, p,dx,dy,b,nx,ny,pn);
hipLaunchKernelGGL(( pressure_poisson_2), dim3(nx*ny/M),dim3(M), 0, 0, p,nx,ny);
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( cavity_flow), dim3(nx*ny/M),dim3(M), 0, 0, nt,u,v,dt,dx,dy,p,rho,nu,nx,ny,un,vn);
hipLaunchKernelGGL(( cavity_flow_2), dim3(nx*ny/M),dim3(M), 0, 0, u,v,nx,ny);
hipDeviceSynchronize();
}
ofstream outFile_u_100,outFile_v_100,outFile_p_100;
outFile_u_100.open("./Data/u_data_100.csv", ios::out);
outFile_v_100.open("./Data/v_data_100.csv", ios::out);
outFile_p_100.open("./Data/p_data_100.csv", ios::out);
for (int i=0;i<nx;i++){
for (int j=0;j<ny;j++){
outFile_u_100<<u[ny*i+j]<<',';
outFile_v_100<<v[ny*i+j]<<',';
outFile_p_100<<p[ny*i+j]<<',';
}
outFile_u_100<<endl;
outFile_v_100<<endl;
outFile_p_100<<endl;
}
//-------------------nt=700----------------------------------
nt=700;
hipLaunchKernelGGL(( initial), dim3(nx*ny/M),dim3(M), 0, 0, u,v,p,b,nx,ny);
hipDeviceSynchronize();
for (int nt_index=0;nt_index<nt;nt_index++){
un=u;
vn=v;
hipLaunchKernelGGL(( build_up_b), dim3(nx*ny/M),dim3(M), 0, 0, b,rho,dt,u,v,dx,dy,nx,ny);
hipDeviceSynchronize();
for (int nit_index=0;nit_index<nit;nit_index++){
pn=p;
hipLaunchKernelGGL(( pressure_poisson), dim3(nx*ny/M),dim3(M), 0, 0, p,dx,dy,b,nx,ny,pn);
hipLaunchKernelGGL(( pressure_poisson_2), dim3(nx*ny/M),dim3(M), 0, 0, p,nx,ny);
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( cavity_flow), dim3(nx*ny/M),dim3(M), 0, 0, nt,u,v,dt,dx,dy,p,rho,nu,nx,ny,un,vn);
hipLaunchKernelGGL(( cavity_flow_2), dim3(nx*ny/M),dim3(M), 0, 0, u,v,nx,ny);
hipDeviceSynchronize();
}
ofstream outFile_u_700,outFile_v_700,outFile_p_700;
outFile_u_700.open("./Data/u_data_700.csv", ios::out);
outFile_v_700.open("./Data/v_data_700.csv", ios::out);
outFile_p_700.open("./Data/p_data_700.csv", ios::out);
for (int i=0;i<nx;i++){
for (int j=0;j<ny;j++){
outFile_u_700<<u[ny*i+j]<<',';
outFile_v_700<<v[ny*i+j]<<',';
outFile_p_700<<p[ny*i+j]<<',';
}
outFile_u_700<<endl;
outFile_v_700<<endl;
outFile_p_700<<endl;
}
hipFree(u);
hipFree(v);
hipFree(p);
hipFree(b);
hipFree(pn);
hipFree(un);
hipFree(vn);
}
| 32c7509047d6101ab5749da880fc923129dfcdea.cu | #include <cstdio>
#include <vector>
#include <iostream>
#include <fstream>
#include <string>
#define M 41
using namespace std;
typedef vector<vector<float>> matrix;
__global__ void initial(float *u,float *v,float *p,float *b,int nx,int ny){
int i = blockIdx.x / (ny / M);
int j = threadIdx.x + blockDim.x * (blockIdx.x % (nx / M));
u[ny*i+j]=0.0;
v[ny*i+j]=0.0;
p[ny*i+j]=0.0;
b[ny*i+j]=0.0;
}
__global__ void build_up_b(float *b,float rho,float dt,float *u,float *v,float dx,float dy,int nx,int ny) {
int i = blockIdx.x / (ny / M);
int j = threadIdx.x + blockDim.x * (blockIdx.x % (nx / M));
if(i>0&&i<nx-1&&j>0&&j<ny-1){
b[ny*i+j]=(rho*(1.0/dt*
((u[i*ny+j+1]-u[i*ny+j-1])/
(2*dx)+(v[(i+1)*ny+j]-v[(i-1)*ny+j])/(2*dy))-
((u[i*ny+j+1]-u[i*ny+j-1])/(2*dx))*((u[i*ny+j+1]-u[i*ny+j-1])/(2*dx))-
2*((u[(i+1)*ny+j]-u[(i-1)*ny+j])/(2*dy)*
(v[i*ny+j+1]-v[i*ny+j-1])/(2*dx))-
((v[(i+1)*ny+j]-v[(i-1)*ny+j])/(2*dy))*((v[(i+1)*ny+j]-v[(i-1)*ny+j])/(2*dy))
)
);
}
__syncthreads();
}
__global__ void pressure_poisson(float *p,float dx,float dy,float *b,int nx,int ny,float *pn) {
int i = blockIdx.x / (ny / M);
int j = threadIdx.x + blockDim.x * (blockIdx.x % (nx / M));
if(i>0&&i<nx-1&&j>0&&j<ny-1){
p[ny*i+j]=(((pn[i*ny+j+1] + pn[i*ny+j-1])*dy*dy+
(pn[(i+1)*ny+j] + pn[(i-1)*ny+j])*dx*dx)/
(2*(dx*dx+dy*dy))-
dx*dx*dy*dy/(2*(dx*dx+dy*dy))*b[i*ny+j]
);
}
__syncthreads();
}
__global__ void pressure_poisson_2(float *p,int nx,int ny) {
int i = blockIdx.x / (ny / M);
int j = threadIdx.x + blockDim.x * (blockIdx.x % (nx / M));
if(j==ny-1){
p[ny*i+j]=p[ny*i+j-1];
}
if(i==0){
p[ny*i+j]=p[(i+1)*ny+j];
}
if(j==0){
p[ny*i+j]=p[i*ny+j+1];
}
if(i==nx-1){
p[ny*i+j] = 0.0;
}
__syncthreads();
}
__global__ void cavity_flow(int nt,float *u,float *v,float dt,float dx,float dy,float*p,float rho,float nu,int nx,int ny,float *un,float *vn) {
int i = blockIdx.x / (ny / M);
int j = threadIdx.x + blockDim.x * (blockIdx.x % (nx / M));
if(i>0&&i<nx-1&&j>0&&j<ny-1){
u[i*ny+j]=(un[i*ny+j]-
un[i*ny+j]*dt/dx*
(un[i*ny+j]-un[i*ny+j-1])-
vn[i*ny+j]*dt/dy*
(un[i*ny+j]-un[(i-1)*ny+j])-
dt/(2*rho*dx)*(p[i*ny+j+1]-p[i*ny+j-1])+
nu*(dt/(dx*dx)*
(un[i*ny+j+1]-2*un[i*ny+j]+un[i*ny+j-1])+
dt/(dy*dy)*
(un[(i+1)*ny+j]-2*un[i*ny+j]+un[(i-1)*ny+j])
)
);
v[i*ny+j]=(vn[i*ny+j]-
un[i*ny+j]*dt/dx*
(vn[i*ny+j]-vn[i*ny+j-1])-
vn[i*ny+j]*dt/dy*
(vn[i*ny+j]-vn[(i-1)*ny+j])-
dt/(2*rho*dx)*(p[(i+1)*ny+j]-p[(i-1)*ny+j])+
nu*(dt/(dx*dx)*
(vn[i*ny+j+1]-2*vn[i*ny+j]+vn[i*ny+j-1])+
dt/(dy*dy)*
(vn[(i+1)*ny+j]-2*vn[i*ny+j]+vn[(i-1)*ny+j])
)
);
}
__syncthreads();
}
__global__ void cavity_flow_2(float *u,float *v,int nx,int ny) {
int i = blockIdx.x / (ny / M);
int j = threadIdx.x + blockDim.x * (blockIdx.x % (nx / M));
if(i==0){
u[i*ny+j]=0.0;
v[i*ny+j]=0.0;
}
if(j==0){
u[i*ny+j]=0.0;
v[i*ny+j]=0.0;
}
if(j==ny-1){
u[i*ny+j]=0.0;
v[i*ny+j]=0.0;
}
if(i==nx-1){
u[i*ny+j]=1.0;
v[i*ny+j]=0.0;
}
__syncthreads();
}
int main() {
int nx = 41;
int ny = 41;
int nt = 500;
int nit = 50;
float dx = 2.0/(nx-1);
float dy = 2.0/(ny-1);
float rho = 1.0;
float nu = 0.1;
float dt = 0.001;
int size = nx * ny * sizeof(float);
float *u,*v,*p,*b;
cudaMallocManaged(&u, size);
cudaMallocManaged(&v, size);
cudaMallocManaged(&p, size);
cudaMallocManaged(&b, size);
float *pn;
cudaMallocManaged(&pn, size);
float *un,*vn;
cudaMallocManaged(&un, size);
cudaMallocManaged(&vn, size);
//-------------------nt=100----------------------------------
nt=100;
initial<<<nx*ny/M,M>>>(u,v,p,b,nx,ny);
cudaDeviceSynchronize();
for (int nt_index=0;nt_index<nt;nt_index++){
un=u;
vn=v;
build_up_b<<<nx*ny/M,M>>>(b,rho,dt,u,v,dx,dy,nx,ny);
cudaDeviceSynchronize();
for (int nit_index=0;nit_index<nit;nit_index++){
pn=p;
pressure_poisson<<<nx*ny/M,M>>>(p,dx,dy,b,nx,ny,pn);
pressure_poisson_2<<<nx*ny/M,M>>>(p,nx,ny);
}
cudaDeviceSynchronize();
cavity_flow<<<nx*ny/M,M>>>(nt,u,v,dt,dx,dy,p,rho,nu,nx,ny,un,vn);
cavity_flow_2<<<nx*ny/M,M>>>(u,v,nx,ny);
cudaDeviceSynchronize();
}
ofstream outFile_u_100,outFile_v_100,outFile_p_100;
outFile_u_100.open("./Data/u_data_100.csv", ios::out);
outFile_v_100.open("./Data/v_data_100.csv", ios::out);
outFile_p_100.open("./Data/p_data_100.csv", ios::out);
for (int i=0;i<nx;i++){
for (int j=0;j<ny;j++){
outFile_u_100<<u[ny*i+j]<<',';
outFile_v_100<<v[ny*i+j]<<',';
outFile_p_100<<p[ny*i+j]<<',';
}
outFile_u_100<<endl;
outFile_v_100<<endl;
outFile_p_100<<endl;
}
//-------------------nt=700----------------------------------
nt=700;
initial<<<nx*ny/M,M>>>(u,v,p,b,nx,ny);
cudaDeviceSynchronize();
for (int nt_index=0;nt_index<nt;nt_index++){
un=u;
vn=v;
build_up_b<<<nx*ny/M,M>>>(b,rho,dt,u,v,dx,dy,nx,ny);
cudaDeviceSynchronize();
for (int nit_index=0;nit_index<nit;nit_index++){
pn=p;
pressure_poisson<<<nx*ny/M,M>>>(p,dx,dy,b,nx,ny,pn);
pressure_poisson_2<<<nx*ny/M,M>>>(p,nx,ny);
}
cudaDeviceSynchronize();
cavity_flow<<<nx*ny/M,M>>>(nt,u,v,dt,dx,dy,p,rho,nu,nx,ny,un,vn);
cavity_flow_2<<<nx*ny/M,M>>>(u,v,nx,ny);
cudaDeviceSynchronize();
}
ofstream outFile_u_700,outFile_v_700,outFile_p_700;
outFile_u_700.open("./Data/u_data_700.csv", ios::out);
outFile_v_700.open("./Data/v_data_700.csv", ios::out);
outFile_p_700.open("./Data/p_data_700.csv", ios::out);
for (int i=0;i<nx;i++){
for (int j=0;j<ny;j++){
outFile_u_700<<u[ny*i+j]<<',';
outFile_v_700<<v[ny*i+j]<<',';
outFile_p_700<<p[ny*i+j]<<',';
}
outFile_u_700<<endl;
outFile_v_700<<endl;
outFile_p_700<<endl;
}
cudaFree(u);
cudaFree(v);
cudaFree(p);
cudaFree(b);
cudaFree(pn);
cudaFree(un);
cudaFree(vn);
}
|
81f1c702fdea5fd3eecf4747722badc5f1f241d3.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <lu.hpp>
#include <err_common.hpp>
#if defined(WITH_CUDA_LINEAR_ALGEBRA)
#include <cusolverDnManager.hpp>
#include <memory.hpp>
#include <copy.hpp>
#include <math.hpp>
#include <err_common.hpp>
#include <kernel/lu_split.hpp>
namespace cuda
{
using cusolver::getDnHandle;
//cusolverStatus_t CUDENSEAPI cusolverDn<>getrf_bufferSize(
// hipsolverDnHandle_t handle,
// int m, int n,
// <> *A,
// int lda, int *Lwork );
//
//
//cusolverStatus_t CUDENSEAPI cusolverDn<>getrf(
// hipsolverDnHandle_t handle,
// int m, int n,
// <> *A,
// int lda,
// <> *Workspace,
// int *devIpiv, int *devInfo );
template<typename T>
struct getrf_func_def_t
{
typedef cusolverStatus_t (*getrf_func_def) (
hipsolverDnHandle_t, int, int,
T *, int,
T *,
int *, int *);
};
template<typename T>
struct getrf_buf_func_def_t
{
typedef cusolverStatus_t (*getrf_buf_func_def) (
hipsolverDnHandle_t, int, int,
T *, int, int *);
};
#define LU_FUNC_DEF( FUNC ) \
template<typename T> \
typename FUNC##_func_def_t<T>::FUNC##_func_def \
FUNC##_func(); \
\
template<typename T> \
typename FUNC##_buf_func_def_t<T>::FUNC##_buf_func_def \
FUNC##_buf_func();
#define LU_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_func_def_t<TYPE>::FUNC##_func_def \
FUNC##_func<TYPE>() \
{ return (FUNC##_func_def_t<TYPE>::FUNC##_func_def)&cusolverDn##PREFIX##FUNC; } \
\
template<> typename FUNC##_buf_func_def_t<TYPE>::FUNC##_buf_func_def \
FUNC##_buf_func<TYPE>() \
{ return (FUNC##_buf_func_def_t<TYPE>::FUNC##_buf_func_def)& cusolverDn##PREFIX##FUNC##_bufferSize; }
LU_FUNC_DEF( getrf )
LU_FUNC(getrf , float , S)
LU_FUNC(getrf , double , D)
LU_FUNC(getrf , cfloat , C)
LU_FUNC(getrf , cdouble, Z)
void convertPivot(Array<int> &pivot, int out_sz)
{
dim_t d0 = pivot.dims()[0];
std::vector<int> d_po(out_sz);
for(int i = 0; i < out_sz; i++) {
d_po[i] = i;
}
std::vector<int> d_pi(d0);
copyData(&d_pi[0], pivot);
for(int j = 0; j < d0; j++) {
// 1 indexed in pivot
std::swap(d_po[j], d_po[d_pi[j] - 1]);
}
pivot = createHostDataArray<int>(out_sz, &d_po[0]);
}
template<typename T>
void lu(Array<T> &lower, Array<T> &upper, Array<int> &pivot, const Array<T> &in)
{
dim4 iDims = in.dims();
int M = iDims[0];
int N = iDims[1];
Array<T> in_copy = copyArray<T>(in);
pivot = lu_inplace(in_copy);
// SPLIT into lower and upper
dim4 ldims(M, min(M, N));
dim4 udims(min(M, N), N);
lower = createEmptyArray<T>(ldims);
upper = createEmptyArray<T>(udims);
kernel::lu_split<T>(lower, upper, in_copy);
}
template<typename T>
Array<int> lu_inplace(Array<T> &in, const bool convert_pivot)
{
dim4 iDims = in.dims();
int M = iDims[0];
int N = iDims[1];
Array<int> pivot = createEmptyArray<int>(af::dim4(min(M, N), 1, 1, 1));
int lwork = 0;
CUSOLVER_CHECK(getrf_buf_func<T>()(getDnHandle(),
M, N,
in.get(), in.strides()[1],
&lwork));
T *workspace = memAlloc<T>(lwork);
int *info = memAlloc<int>(1);
CUSOLVER_CHECK(getrf_func<T>()(getDnHandle(),
M, N,
in.get(), in.strides()[1],
workspace,
pivot.get(),
info));
if(convert_pivot) convertPivot(pivot, M);
memFree(workspace);
memFree(info);
return pivot;
}
#define INSTANTIATE_LU(T) \
template Array<int> lu_inplace<T>(Array<T> &in, const bool convert_pivot); \
template void lu<T>(Array<T> &lower, Array<T> &upper, Array<int> &pivot, const Array<T> &in);
INSTANTIATE_LU(float)
INSTANTIATE_LU(cfloat)
INSTANTIATE_LU(double)
INSTANTIATE_LU(cdouble)
}
#else
namespace cuda
{
template<typename T>
void lu(Array<T> &lower, Array<T> &upper, Array<int> &pivot, const Array<T> &in)
{
AF_ERROR("CUDA cusolver not available. Linear Algebra is disabled",
AF_ERR_NOT_CONFIGURED);
}
template<typename T>
Array<int> lu_inplace(Array<T> &in, const bool convert_pivot)
{
AF_ERROR("CUDA cusolver not available. Linear Algebra is disabled",
AF_ERR_NOT_CONFIGURED);
}
#define INSTANTIATE_LU(T) \
template Array<int> lu_inplace<T>(Array<T> &in, const bool convert_pivot); \
template void lu<T>(Array<T> &lower, Array<T> &upper, Array<int> &pivot, const Array<T> &in);
INSTANTIATE_LU(float)
INSTANTIATE_LU(cfloat)
INSTANTIATE_LU(double)
INSTANTIATE_LU(cdouble)
}
#endif
| 81f1c702fdea5fd3eecf4747722badc5f1f241d3.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <lu.hpp>
#include <err_common.hpp>
#if defined(WITH_CUDA_LINEAR_ALGEBRA)
#include <cusolverDnManager.hpp>
#include <memory.hpp>
#include <copy.hpp>
#include <math.hpp>
#include <err_common.hpp>
#include <kernel/lu_split.hpp>
namespace cuda
{
using cusolver::getDnHandle;
//cusolverStatus_t CUDENSEAPI cusolverDn<>getrf_bufferSize(
// cusolverDnHandle_t handle,
// int m, int n,
// <> *A,
// int lda, int *Lwork );
//
//
//cusolverStatus_t CUDENSEAPI cusolverDn<>getrf(
// cusolverDnHandle_t handle,
// int m, int n,
// <> *A,
// int lda,
// <> *Workspace,
// int *devIpiv, int *devInfo );
template<typename T>
struct getrf_func_def_t
{
typedef cusolverStatus_t (*getrf_func_def) (
cusolverDnHandle_t, int, int,
T *, int,
T *,
int *, int *);
};
template<typename T>
struct getrf_buf_func_def_t
{
typedef cusolverStatus_t (*getrf_buf_func_def) (
cusolverDnHandle_t, int, int,
T *, int, int *);
};
#define LU_FUNC_DEF( FUNC ) \
template<typename T> \
typename FUNC##_func_def_t<T>::FUNC##_func_def \
FUNC##_func(); \
\
template<typename T> \
typename FUNC##_buf_func_def_t<T>::FUNC##_buf_func_def \
FUNC##_buf_func();
#define LU_FUNC( FUNC, TYPE, PREFIX ) \
template<> typename FUNC##_func_def_t<TYPE>::FUNC##_func_def \
FUNC##_func<TYPE>() \
{ return (FUNC##_func_def_t<TYPE>::FUNC##_func_def)&cusolverDn##PREFIX##FUNC; } \
\
template<> typename FUNC##_buf_func_def_t<TYPE>::FUNC##_buf_func_def \
FUNC##_buf_func<TYPE>() \
{ return (FUNC##_buf_func_def_t<TYPE>::FUNC##_buf_func_def)& cusolverDn##PREFIX##FUNC##_bufferSize; }
LU_FUNC_DEF( getrf )
LU_FUNC(getrf , float , S)
LU_FUNC(getrf , double , D)
LU_FUNC(getrf , cfloat , C)
LU_FUNC(getrf , cdouble, Z)
void convertPivot(Array<int> &pivot, int out_sz)
{
dim_t d0 = pivot.dims()[0];
std::vector<int> d_po(out_sz);
for(int i = 0; i < out_sz; i++) {
d_po[i] = i;
}
std::vector<int> d_pi(d0);
copyData(&d_pi[0], pivot);
for(int j = 0; j < d0; j++) {
// 1 indexed in pivot
std::swap(d_po[j], d_po[d_pi[j] - 1]);
}
pivot = createHostDataArray<int>(out_sz, &d_po[0]);
}
template<typename T>
void lu(Array<T> &lower, Array<T> &upper, Array<int> &pivot, const Array<T> &in)
{
dim4 iDims = in.dims();
int M = iDims[0];
int N = iDims[1];
Array<T> in_copy = copyArray<T>(in);
pivot = lu_inplace(in_copy);
// SPLIT into lower and upper
dim4 ldims(M, min(M, N));
dim4 udims(min(M, N), N);
lower = createEmptyArray<T>(ldims);
upper = createEmptyArray<T>(udims);
kernel::lu_split<T>(lower, upper, in_copy);
}
template<typename T>
Array<int> lu_inplace(Array<T> &in, const bool convert_pivot)
{
dim4 iDims = in.dims();
int M = iDims[0];
int N = iDims[1];
Array<int> pivot = createEmptyArray<int>(af::dim4(min(M, N), 1, 1, 1));
int lwork = 0;
CUSOLVER_CHECK(getrf_buf_func<T>()(getDnHandle(),
M, N,
in.get(), in.strides()[1],
&lwork));
T *workspace = memAlloc<T>(lwork);
int *info = memAlloc<int>(1);
CUSOLVER_CHECK(getrf_func<T>()(getDnHandle(),
M, N,
in.get(), in.strides()[1],
workspace,
pivot.get(),
info));
if(convert_pivot) convertPivot(pivot, M);
memFree(workspace);
memFree(info);
return pivot;
}
#define INSTANTIATE_LU(T) \
template Array<int> lu_inplace<T>(Array<T> &in, const bool convert_pivot); \
template void lu<T>(Array<T> &lower, Array<T> &upper, Array<int> &pivot, const Array<T> &in);
INSTANTIATE_LU(float)
INSTANTIATE_LU(cfloat)
INSTANTIATE_LU(double)
INSTANTIATE_LU(cdouble)
}
#else
namespace cuda
{
template<typename T>
void lu(Array<T> &lower, Array<T> &upper, Array<int> &pivot, const Array<T> &in)
{
AF_ERROR("CUDA cusolver not available. Linear Algebra is disabled",
AF_ERR_NOT_CONFIGURED);
}
template<typename T>
Array<int> lu_inplace(Array<T> &in, const bool convert_pivot)
{
AF_ERROR("CUDA cusolver not available. Linear Algebra is disabled",
AF_ERR_NOT_CONFIGURED);
}
#define INSTANTIATE_LU(T) \
template Array<int> lu_inplace<T>(Array<T> &in, const bool convert_pivot); \
template void lu<T>(Array<T> &lower, Array<T> &upper, Array<int> &pivot, const Array<T> &in);
INSTANTIATE_LU(float)
INSTANTIATE_LU(cfloat)
INSTANTIATE_LU(double)
INSTANTIATE_LU(cdouble)
}
#endif
|
58732382ed2303f3e4e2b9fe5e2296dd9919e23a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <ctime>
#include "layer_hip.cuh"
#include "config.h"
int main(int argc, char*argv[]){
ElementInputA* in_data;
ElementOutput* out;
int batch_size = 8;
int in_channels = 32;
int input_height = 224;
int input_width = 224;
int out_channels = 16;
int filter_height = 3;
int filter_width = 3;
int stride = 1;
// for CUTLASS test
// int in_bytes = batch_size * in_channels * input_height * input_width * sizeof(ElementInputA);
// hipMalloc(&in_data, in_bytes);
// auto conv = new CONV(batch_size, input_height, input_width, in_channels, out_channels, filter_height, filter_width, stride, 1);
// out = conv->forward(in_data);
// auto fc = new FC(batch_size, in_channels*input_height*input_width, out_channels);
// out = fc->forward(in_data);
// for cuDNN test.
// set the pooling layer for evaluation.
int in_bytes = batch_size * in_channels * input_height * input_width * sizeof(cuDNNtype);
hipMalloc(&in_data, in_bytes);
cudnnHandle_t cudnn;
checkCUDNN(cudnnCreate(&cudnn));
// auto pool = new POOL(batch_size, in_channels, input_height, input_width, &cudnn);
// auto relu = new RELU(batch_size, in_channels, pool->get_output_height(), pool->get_output_width(), &cudnn);
// auto relu = new RELU(batch_size, in_channels, input_height, input_width, &cudnn);
auto bn = new BN(batch_size, in_channels, input_height, input_width, &cudnn);
// out = pool->forward(in_data);
// out = relu->forward(in_data);
out = bn->forward(in_data);
return 0;
} | 58732382ed2303f3e4e2b9fe5e2296dd9919e23a.cu |
#include <stdio.h>
#include <ctime>
#include "layer.cuh"
#include "config.h"
int main(int argc, char*argv[]){
ElementInputA* in_data;
ElementOutput* out;
int batch_size = 8;
int in_channels = 32;
int input_height = 224;
int input_width = 224;
int out_channels = 16;
int filter_height = 3;
int filter_width = 3;
int stride = 1;
// for CUTLASS test
// int in_bytes = batch_size * in_channels * input_height * input_width * sizeof(ElementInputA);
// cudaMalloc(&in_data, in_bytes);
// auto conv = new CONV(batch_size, input_height, input_width, in_channels, out_channels, filter_height, filter_width, stride, 1);
// out = conv->forward(in_data);
// auto fc = new FC(batch_size, in_channels*input_height*input_width, out_channels);
// out = fc->forward(in_data);
// for cuDNN test.
// set the pooling layer for evaluation.
int in_bytes = batch_size * in_channels * input_height * input_width * sizeof(cuDNNtype);
cudaMalloc(&in_data, in_bytes);
cudnnHandle_t cudnn;
checkCUDNN(cudnnCreate(&cudnn));
// auto pool = new POOL(batch_size, in_channels, input_height, input_width, &cudnn);
// auto relu = new RELU(batch_size, in_channels, pool->get_output_height(), pool->get_output_width(), &cudnn);
// auto relu = new RELU(batch_size, in_channels, input_height, input_width, &cudnn);
auto bn = new BN(batch_size, in_channels, input_height, input_width, &cudnn);
// out = pool->forward(in_data);
// out = relu->forward(in_data);
out = bn->forward(in_data);
return 0;
} |
9a8c643c8219907d778f8193d9784f424ea6a80a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void read_coaleased_write_stride_mat_trans(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[ix*ny + iy] = input[iy*nx + ix];
}
} | 9a8c643c8219907d778f8193d9784f424ea6a80a.cu | #include "includes.h"
__global__ void read_coaleased_write_stride_mat_trans(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[ix*ny + iy] = input[iy*nx + ix];
}
} |
e364f3e98615987fda6036393a88034f770bf4cd.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| e364f3e98615987fda6036393a88034f770bf4cd.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
7e687ee49d1ffc57fc64cb8d0a1a6a44a412d622.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <fstream>
using namespace std;
__global__ void matrixMulKernel(int *d_M, int *d_N, int *d_P, int width){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
int Pvalue;
if((row < width)&&(col < width)){
Pvalue = 0;
for (int k = 0; k < width ; ++k){
Pvalue += d_M[row*width+k] * d_N[k*width+col];
}
d_P[row*width+col] = Pvalue;
}
}
int matrixMulHost(int *h_M, int *h_N, int *h_P, int width){
int Pvalue;
for(int row = 0; row < width ; ++row){
for(int col = 0; col < width ; ++col){
Pvalue = 0;
for(int k = 0; k < width ; ++k){
Pvalue += h_M[row*width+k] * h_N[k*width+col];
}
h_P[row*width+col] = Pvalue;
}
}
return 0;
}
int initValues(int *data, int width){
for(int i = 0; i < width*width; i++)
data[i] = 2;
return 0;
}
int printData(int *data, int width){
for(int i = 0; i < width; ++i){
for(int j = 0; j < width; ++j){
printf("%d ", data[(i*width)+j]);
}
printf("\n");
}
return 0;
}
int main(int argc, char const *argv[])
{
//device
int *d_MA, *d_MB,*d_MR;
//host matrix A, matrix B, matrix result, matrix result Device,
int *h_MA, *h_MB, *h_MR,*h_MRD;
//width of matrix
int width = 2048;
hipError_t error = hipSuccess;
int size = width * width * sizeof(int);
//clock
clock_t start, end, startGPU, endGPU;
double cpu_time, gpu_time;
// Allocate memory for each matrix on host
h_MA = (int*)malloc(size);
h_MB = (int*)malloc(size);
h_MR = (int*)malloc(size);
h_MRD = (int*)malloc(size);
initValues(h_MA, width);
initValues(h_MB, width);
/////////Algoritmo Secuencial////////////////////////////////////////////
start = clock();
matrixMulHost(h_MA, h_MB, h_MR, width);
end = clock();
cpu_time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo algoritmo secuencial: %.10f\n", cpu_time);
//reservando memoria para el Device//////////////////////////////////////////
error = hipMalloc((void**)&d_MA,size);
if(error != hipSuccess){
printf("Error reservando memoria para MAtrix A en device");
exit(0);
}
error = hipMalloc((void**)&d_MB,size);
if(error != hipSuccess){
printf("Error reservando memoria para MAtrix B en device");
exit(0);
}
error = hipMalloc((void**)&d_MR,size);
if(error != hipSuccess){
printf("Error reservando memoria para MAtrix resultado en device");
exit(0);
}
//////////////////////Algoritmo Paralelo///////////////////////////
///////////////// copiando matrices del Host al device////////////
startGPU = clock();
error = hipMemcpy(d_MA, h_MA, size, hipMemcpyHostToDevice);
if(error != hipSuccess){
printf("Error copiando matriz A del host al Device");
exit(0);
}
error = hipMemcpy(d_MB, h_MB, size, hipMemcpyHostToDevice);
if(error != hipSuccess){
printf("Error copiando matriz B del host al Device");
exit(0);
}
/////////Lanzamiento de kernel///////////////////////////////////
int blockSize = 32;
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(ceil(width/float(blockSize)),ceil(width/float(blockSize)),1);
hipLaunchKernelGGL(( matrixMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_MA,d_MA,d_MR,width);
hipDeviceSynchronize();
hipMemcpy(h_MRD, d_MR, size, hipMemcpyDeviceToHost);
endGPU = clock();
gpu_time = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
printf("Tiempo algoritmo paralelo: %.10f\n", gpu_time);
printf("La aceleracin obtenida es de %.10fX\n",cpu_time/gpu_time);
///////////////////////Algoritmo Paralelo////////////////////////////
free(h_MA);
free(h_MB);
free(h_MR);
hipFree(d_MA);
hipFree(d_MB);
hipFree(d_MR);
return 0;
}
| 7e687ee49d1ffc57fc64cb8d0a1a6a44a412d622.cu | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <time.h>
#include <fstream>
using namespace std;
__global__ void matrixMulKernel(int *d_M, int *d_N, int *d_P, int width){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
int Pvalue;
if((row < width)&&(col < width)){
Pvalue = 0;
for (int k = 0; k < width ; ++k){
Pvalue += d_M[row*width+k] * d_N[k*width+col];
}
d_P[row*width+col] = Pvalue;
}
}
int matrixMulHost(int *h_M, int *h_N, int *h_P, int width){
int Pvalue;
for(int row = 0; row < width ; ++row){
for(int col = 0; col < width ; ++col){
Pvalue = 0;
for(int k = 0; k < width ; ++k){
Pvalue += h_M[row*width+k] * h_N[k*width+col];
}
h_P[row*width+col] = Pvalue;
}
}
return 0;
}
int initValues(int *data, int width){
for(int i = 0; i < width*width; i++)
data[i] = 2;
return 0;
}
int printData(int *data, int width){
for(int i = 0; i < width; ++i){
for(int j = 0; j < width; ++j){
printf("%d ", data[(i*width)+j]);
}
printf("\n");
}
return 0;
}
int main(int argc, char const *argv[])
{
//device
int *d_MA, *d_MB,*d_MR;
//host matrix A, matrix B, matrix result, matrix result Device,
int *h_MA, *h_MB, *h_MR,*h_MRD;
//width of matrix
int width = 2048;
cudaError_t error = cudaSuccess;
int size = width * width * sizeof(int);
//clock
clock_t start, end, startGPU, endGPU;
double cpu_time, gpu_time;
// Allocate memory for each matrix on host
h_MA = (int*)malloc(size);
h_MB = (int*)malloc(size);
h_MR = (int*)malloc(size);
h_MRD = (int*)malloc(size);
initValues(h_MA, width);
initValues(h_MB, width);
/////////Algoritmo Secuencial////////////////////////////////////////////
start = clock();
matrixMulHost(h_MA, h_MB, h_MR, width);
end = clock();
cpu_time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("Tiempo algoritmo secuencial: %.10f\n", cpu_time);
//reservando memoria para el Device//////////////////////////////////////////
error = cudaMalloc((void**)&d_MA,size);
if(error != cudaSuccess){
printf("Error reservando memoria para MAtrix A en device");
exit(0);
}
error = cudaMalloc((void**)&d_MB,size);
if(error != cudaSuccess){
printf("Error reservando memoria para MAtrix B en device");
exit(0);
}
error = cudaMalloc((void**)&d_MR,size);
if(error != cudaSuccess){
printf("Error reservando memoria para MAtrix resultado en device");
exit(0);
}
//////////////////////Algoritmo Paralelo///////////////////////////
///////////////// copiando matrices del Host al device////////////
startGPU = clock();
error = cudaMemcpy(d_MA, h_MA, size, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando matriz A del host al Device");
exit(0);
}
error = cudaMemcpy(d_MB, h_MB, size, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando matriz B del host al Device");
exit(0);
}
/////////Lanzamiento de kernel///////////////////////////////////
int blockSize = 32;
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(ceil(width/float(blockSize)),ceil(width/float(blockSize)),1);
matrixMulKernel<<<dimGrid,dimBlock>>>(d_MA,d_MA,d_MR,width);
cudaDeviceSynchronize();
cudaMemcpy(h_MRD, d_MR, size, cudaMemcpyDeviceToHost);
endGPU = clock();
gpu_time = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
printf("Tiempo algoritmo paralelo: %.10f\n", gpu_time);
printf("La aceleración obtenida es de %.10fX\n",cpu_time/gpu_time);
///////////////////////Algoritmo Paralelo////////////////////////////
free(h_MA);
free(h_MB);
free(h_MR);
cudaFree(d_MA);
cudaFree(d_MB);
cudaFree(d_MR);
return 0;
}
|
9844055928a790c2652f7ef6793da46f9a9330f8.hip | // !!! This is a file automatically generated by hipify!!!
/* Matrix multiplication: P = M * N.
* Host code.
*/
// includes, system
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include "matrixmul_kernel.hip"
#include "assist.h"
#define ERROR_CHECK { hipError_t err; \
if ((err = hipGetLastError()) != hipSuccess) { \
printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__);}}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char** argv)
{
bool if_quiet = true;
int i, j;
char *matrix_id = NULL, *input_fn = NULL, *gold_fn = NULL;
float * deviceM = NULL, * deviceN = NULL, * deviceP = NULL;
int Mw = 0, Mh = 0, Nw = 0, Nh = 0, Pw = 0, Ph = 0;
int block_size = 0;
hipEvent_t start, stop;
float timer_compute, timer_memory_in, timer_memory_out;
// Create CUDA events for measuring execution times
hipEventCreate(&start);
hipEventCreate(&stop);
if (argc == 2) {
matrix_id = strdup(argv[1]);
} else {
fprintf(stderr, "Error: Wrong input parameter numbers.\n");
fprintf(stderr, "Usage:\n"
"$> ./lab1.1-matrixmul <8, 128, 512, 3072, 4096>\n"
"Examples:\n"
" $> ./lab1.1-matrixmul 128\n"
);
exit(1);
}
// Note: Matrix width and height must be multiples of block size.
if (!strcmp(matrix_id, "8")) {
Mw = Mh = Nw = Nh = Pw = Ph = 8;
block_size = 2; // thread number = block_size^2
input_fn = strdup("matrix_8.bin");
gold_fn = strdup("matrix_8.gold");
if_quiet = false; // If not display matrix contents
} else
if (!strcmp(matrix_id, "128")) {
Mw = Mh = Nw = Nh = Pw = Ph = 128;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_128.bin");
gold_fn = strdup("matrix_128.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "512")) {
Mw = Mh = Nw = Nh = Pw = Ph = 512;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_512.bin");
gold_fn = strdup("matrix_512.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "3072")) {
Mw = Mh = Nw = Nh = Pw = Ph = 3072;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_3072.bin");
gold_fn = strdup("matrix_3072.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "4096")) {
Mw = Mh = Nw = Nh = Pw = Ph = 4096;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_4096.bin");
gold_fn = strdup("matrix_4096.gold");
if_quiet = true; // If not display matrix contents
} else {
printf("***Error on %s: %d: Undefined matrix ID.\n",
__FILE__, __LINE__);
printf(" You should add it to the source code.\n");
printf(" Current available ID's are 8, 128, 512, 3072, 4096.\n");
exit(1);
}
printf("Input matrix file name: %s\n", input_fn);
printf("Setup host side environment and launch kernel:\n");
// allocate host memory for matrices M and N
printf(" Allocate host memory for matrices M and N.\n");
printf(" M: %d x %d\n", Mw, Mh);
printf(" N: %d x %d\n", Nw, Nh);
unsigned int size_M = Mw * Mh;
unsigned int mem_size_M = sizeof(float) * size_M;
float* hostM = (float*) malloc(mem_size_M);
unsigned int size_N = Nw * (Nh);
unsigned int mem_size_N = sizeof(float) * size_N;
float* hostN = (float*) malloc(mem_size_N);
// allocate memory for the result on host side
printf(" Allocate memory for the result on host side.\n");
unsigned int size_P = Pw * Ph;
unsigned int mem_size_P = sizeof(float) * size_P;
float* hostP = (float*) malloc(mem_size_P);
// Initialize the input matrices.
printf(" Initialize the input matrices.\n");
unsigned int * matrix = ReadMatrixFile(input_fn, Pw, Ph, if_quiet);
for (i = 0; i < Mw; i++)
for (j = 0; j < Nw; j++)
hostM[i * Mw + j] = hostN[i * Mw + j] = (float) matrix[i*Mw + j];
free(matrix); matrix = NULL;
// ===================================================================
// Allocate device memory for the input matrices.
// Copy memory from the host memory to the device memory.
// ===================================================================
// Start measuring transfer times from CPU to GPU
hipEventRecord(start, NULL);
printf(" Allocate device memory.\n");
hipMalloc((void**) &deviceM, mem_size_M );
hipMalloc((void**) &deviceN, mem_size_N );
printf(" Copy host memory data to device.\n");
hipMemcpy( deviceM, hostM, mem_size_M, hipMemcpyHostToDevice);
hipMemcpy( deviceN, hostN, mem_size_N, hipMemcpyHostToDevice);
printf(" Allocate device memory for results and clean it.\n");
hipMalloc((void**) &deviceP, mem_size_P);
hipMemset(deviceP, 0, mem_size_P);
// Stop measuring transfer times from CPU to GPU
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&timer_memory_in, start, stop);
// ===================================================================
// Initialize the thread block and kernel grid dimensions
// and invoke the CUDA kernel.
// You may assume that each matrix dimension is a multiple
// of the defined constant block_size.
// ===================================================================
printf(" Setup kernel execution parameters.\n");
// Different ways of declarations
#if 1
dim3 block;
dim3 grid;
grid.x = Pw/block_size;
grid.y = Pw/block_size;
block.x = block_size;
block.y = block_size;
#else
dim3 block(block_size, block_size);
dim3 grid(Pw/block.x, Pw/block.y);
#endif
printf(" # of threads in a block: %d x %d (%d)\n",
block.x, block.y, block.x * block.y);
printf(" # of blocks in a grid : %d x %d (%d)\n",
grid.x, grid.y, grid.x * grid.y);
printf(" Executing the kernel...\n");
// Start measuring the computation time for the CUDA kernel
hipEventRecord(start, NULL);
// Invoke the CUDA kernel here
hipLaunchKernelGGL(( matrixMul), dim3(grid), dim3(block), 0, 0, deviceP, deviceM, deviceN, Mh, Mw, Nw, block_size);
// Make sure all threads have finished their jobs
// before we stop the timer_compute.
hipDeviceSynchronize();
// Stop measuring the computation time for the CUDA kernel
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&timer_compute, start, stop);
// ===================================================================
// ===================================================================
// Copy the results back from the host
// ===================================================================
printf(" Copy result from device to host.\n");
// Start measuring the transfer time back from the GPU to the CPU
hipEventRecord(start, NULL);
hipMemcpy( hostP, deviceP, mem_size_P, hipMemcpyDeviceToHost);
// Stop measuring the transfer time back from the GPU to the CPU
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&timer_memory_out, start, stop);
// ================================================
// Show timing information
// ================================================
printf("Transfer time from CPU to GPU: %.4f ms.\n", timer_memory_in);
printf("GPU computation time: %.4f ms.\n", timer_compute);
printf("Transfer time from GPU to CPU: %.4f ms.\n\n", timer_memory_out);
printf("Total GPU processing time: %.4f ms.\n", timer_memory_in+timer_compute+timer_memory_out);
// ===========================
// Compare CPU and GPU results
// ===========================
// Full result check when input matrix is <= 512x512
//if (0) {
if (Mw * Nw > 512*512) {
printf("\nInput matrix size is too big. Skip computing reference.\n");
} else {
printf("\nCheck results with those computed by CPU.\n");
printf (" Computing reference solution.\n");
// Start measuring the computation time for the CPU
hipEventRecord(start, NULL);
float* reference = (float*) malloc(mem_size_P);
computeGold(reference, hostM, hostN, Mh, Mw, Nw);
// Stop measuring the computation time for the CPU
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&timer_compute, start, stop);
printf(" CPU Processing time : %.4f ms.\n\n", timer_compute);
printf(" CPU checksum: %g\n", CheckSum(reference, Mw, Nw));
matrix = (unsigned int *) malloc(Pw * Ph * sizeof(unsigned int));
for (i = 0; i < Ph; i++)
for (j = 0; j < Pw; j++)
matrix[i*Pw + j] = (unsigned int) reference[i*Pw + j];
WriteMatrixFile("lab1.1-matrixmul.gold", matrix, Pw, Ph, 1);
free(matrix); matrix = NULL;
free(reference);
}
printf(" GPU checksum: %g\n", CheckSum(hostP, Mw, Nw));
/* Write matrix C to output binary file */
matrix = (unsigned int *) malloc (Pw * Ph * sizeof(unsigned int));
for (i = 0; i < Ph; i++)
for (j = 0; j < Pw; j++)
matrix[i*Pw + j] = (unsigned int) hostP[i*Pw + j];
WriteMatrixFile("lab1.1-matrixmul.bin", matrix, Pw, Ph, 1);
free (matrix); matrix = NULL;
if (Mw >= 3072 && Mh >= 3072) {
CompareMatrixFile("lab1.1-matrixmul.bin", gold_fn, Pw, Ph, if_quiet);
} else {
CompareMatrixFile("lab1.1-matrixmul.bin", "lab1.1-matrixmul.gold",
Pw, Ph, if_quiet);
}
// clean up memory
free(hostM); free(hostN); free(hostP);
free(input_fn); free(gold_fn);
// ===================================================================
// Free the device memory
// ===================================================================
hipFree( deviceM );
hipFree( deviceN );
hipFree( deviceP );
}
| 9844055928a790c2652f7ef6793da46f9a9330f8.cu | /* Matrix multiplication: P = M * N.
* Host code.
*/
// includes, system
#include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include "matrixmul_kernel.cu"
#include "assist.h"
#define ERROR_CHECK { cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__);}}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest(int argc, char** argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void runTest(int argc, char** argv)
{
bool if_quiet = true;
int i, j;
char *matrix_id = NULL, *input_fn = NULL, *gold_fn = NULL;
float * deviceM = NULL, * deviceN = NULL, * deviceP = NULL;
int Mw = 0, Mh = 0, Nw = 0, Nh = 0, Pw = 0, Ph = 0;
int block_size = 0;
cudaEvent_t start, stop;
float timer_compute, timer_memory_in, timer_memory_out;
// Create CUDA events for measuring execution times
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (argc == 2) {
matrix_id = strdup(argv[1]);
} else {
fprintf(stderr, "Error: Wrong input parameter numbers.\n");
fprintf(stderr, "Usage:\n"
"$> ./lab1.1-matrixmul <8, 128, 512, 3072, 4096>\n"
"Examples:\n"
" $> ./lab1.1-matrixmul 128\n"
);
exit(1);
}
// Note: Matrix width and height must be multiples of block size.
if (!strcmp(matrix_id, "8")) {
Mw = Mh = Nw = Nh = Pw = Ph = 8;
block_size = 2; // thread number = block_size^2
input_fn = strdup("matrix_8.bin");
gold_fn = strdup("matrix_8.gold");
if_quiet = false; // If not display matrix contents
} else
if (!strcmp(matrix_id, "128")) {
Mw = Mh = Nw = Nh = Pw = Ph = 128;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_128.bin");
gold_fn = strdup("matrix_128.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "512")) {
Mw = Mh = Nw = Nh = Pw = Ph = 512;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_512.bin");
gold_fn = strdup("matrix_512.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "3072")) {
Mw = Mh = Nw = Nh = Pw = Ph = 3072;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_3072.bin");
gold_fn = strdup("matrix_3072.gold");
if_quiet = true; // If not display matrix contents
} else
if (!strcmp(matrix_id, "4096")) {
Mw = Mh = Nw = Nh = Pw = Ph = 4096;
block_size = 16; // thread number = block_size^2
input_fn = strdup("matrix_4096.bin");
gold_fn = strdup("matrix_4096.gold");
if_quiet = true; // If not display matrix contents
} else {
printf("***Error on %s: %d: Undefined matrix ID.\n",
__FILE__, __LINE__);
printf(" You should add it to the source code.\n");
printf(" Current available ID's are 8, 128, 512, 3072, 4096.\n");
exit(1);
}
printf("Input matrix file name: %s\n", input_fn);
printf("Setup host side environment and launch kernel:\n");
// allocate host memory for matrices M and N
printf(" Allocate host memory for matrices M and N.\n");
printf(" M: %d x %d\n", Mw, Mh);
printf(" N: %d x %d\n", Nw, Nh);
unsigned int size_M = Mw * Mh;
unsigned int mem_size_M = sizeof(float) * size_M;
float* hostM = (float*) malloc(mem_size_M);
unsigned int size_N = Nw * (Nh);
unsigned int mem_size_N = sizeof(float) * size_N;
float* hostN = (float*) malloc(mem_size_N);
// allocate memory for the result on host side
printf(" Allocate memory for the result on host side.\n");
unsigned int size_P = Pw * Ph;
unsigned int mem_size_P = sizeof(float) * size_P;
float* hostP = (float*) malloc(mem_size_P);
// Initialize the input matrices.
printf(" Initialize the input matrices.\n");
unsigned int * matrix = ReadMatrixFile(input_fn, Pw, Ph, if_quiet);
for (i = 0; i < Mw; i++)
for (j = 0; j < Nw; j++)
hostM[i * Mw + j] = hostN[i * Mw + j] = (float) matrix[i*Mw + j];
free(matrix); matrix = NULL;
// ===================================================================
// Allocate device memory for the input matrices.
// Copy memory from the host memory to the device memory.
// ===================================================================
// Start measuring transfer times from CPU to GPU
cudaEventRecord(start, NULL);
printf(" Allocate device memory.\n");
cudaMalloc((void**) &deviceM, mem_size_M );
cudaMalloc((void**) &deviceN, mem_size_N );
printf(" Copy host memory data to device.\n");
cudaMemcpy( deviceM, hostM, mem_size_M, cudaMemcpyHostToDevice);
cudaMemcpy( deviceN, hostN, mem_size_N, cudaMemcpyHostToDevice);
printf(" Allocate device memory for results and clean it.\n");
cudaMalloc((void**) &deviceP, mem_size_P);
cudaMemset(deviceP, 0, mem_size_P);
// Stop measuring transfer times from CPU to GPU
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timer_memory_in, start, stop);
// ===================================================================
// Initialize the thread block and kernel grid dimensions
// and invoke the CUDA kernel.
// You may assume that each matrix dimension is a multiple
// of the defined constant block_size.
// ===================================================================
printf(" Setup kernel execution parameters.\n");
// Different ways of declarations
#if 1
dim3 block;
dim3 grid;
grid.x = Pw/block_size;
grid.y = Pw/block_size;
block.x = block_size;
block.y = block_size;
#else
dim3 block(block_size, block_size);
dim3 grid(Pw/block.x, Pw/block.y);
#endif
printf(" # of threads in a block: %d x %d (%d)\n",
block.x, block.y, block.x * block.y);
printf(" # of blocks in a grid : %d x %d (%d)\n",
grid.x, grid.y, grid.x * grid.y);
printf(" Executing the kernel...\n");
// Start measuring the computation time for the CUDA kernel
cudaEventRecord(start, NULL);
// Invoke the CUDA kernel here
matrixMul<<<grid, block>>> (deviceP, deviceM, deviceN, Mh, Mw, Nw, block_size);
// Make sure all threads have finished their jobs
// before we stop the timer_compute.
cudaThreadSynchronize();
// Stop measuring the computation time for the CUDA kernel
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timer_compute, start, stop);
// ===================================================================
// ===================================================================
// Copy the results back from the host
// ===================================================================
printf(" Copy result from device to host.\n");
// Start measuring the transfer time back from the GPU to the CPU
cudaEventRecord(start, NULL);
cudaMemcpy( hostP, deviceP, mem_size_P, cudaMemcpyDeviceToHost);
// Stop measuring the transfer time back from the GPU to the CPU
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timer_memory_out, start, stop);
// ================================================
// Show timing information
// ================================================
printf("Transfer time from CPU to GPU: %.4f ms.\n", timer_memory_in);
printf("GPU computation time: %.4f ms.\n", timer_compute);
printf("Transfer time from GPU to CPU: %.4f ms.\n\n", timer_memory_out);
printf("Total GPU processing time: %.4f ms.\n", timer_memory_in+timer_compute+timer_memory_out);
// ===========================
// Compare CPU and GPU results
// ===========================
// Full result check when input matrix is <= 512x512
//if (0) {
if (Mw * Nw > 512*512) {
printf("\nInput matrix size is too big. Skip computing reference.\n");
} else {
printf("\nCheck results with those computed by CPU.\n");
printf (" Computing reference solution.\n");
// Start measuring the computation time for the CPU
cudaEventRecord(start, NULL);
float* reference = (float*) malloc(mem_size_P);
computeGold(reference, hostM, hostN, Mh, Mw, Nw);
// Stop measuring the computation time for the CPU
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timer_compute, start, stop);
printf(" CPU Processing time : %.4f ms.\n\n", timer_compute);
printf(" CPU checksum: %g\n", CheckSum(reference, Mw, Nw));
matrix = (unsigned int *) malloc(Pw * Ph * sizeof(unsigned int));
for (i = 0; i < Ph; i++)
for (j = 0; j < Pw; j++)
matrix[i*Pw + j] = (unsigned int) reference[i*Pw + j];
WriteMatrixFile("lab1.1-matrixmul.gold", matrix, Pw, Ph, 1);
free(matrix); matrix = NULL;
free(reference);
}
printf(" GPU checksum: %g\n", CheckSum(hostP, Mw, Nw));
/* Write matrix C to output binary file */
matrix = (unsigned int *) malloc (Pw * Ph * sizeof(unsigned int));
for (i = 0; i < Ph; i++)
for (j = 0; j < Pw; j++)
matrix[i*Pw + j] = (unsigned int) hostP[i*Pw + j];
WriteMatrixFile("lab1.1-matrixmul.bin", matrix, Pw, Ph, 1);
free (matrix); matrix = NULL;
if (Mw >= 3072 && Mh >= 3072) {
CompareMatrixFile("lab1.1-matrixmul.bin", gold_fn, Pw, Ph, if_quiet);
} else {
CompareMatrixFile("lab1.1-matrixmul.bin", "lab1.1-matrixmul.gold",
Pw, Ph, if_quiet);
}
// clean up memory
free(hostM); free(hostN); free(hostP);
free(input_fn); free(gold_fn);
// ===================================================================
// Free the device memory
// ===================================================================
cudaFree( deviceM );
cudaFree( deviceN );
cudaFree( deviceP );
}
|
1670d94060103be8e6f1d16d7bbab59c5f5a42c2.hip | // !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::GemmSplitKParallel<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
| 1670d94060103be8e6f1d16d7bbab59c5f5a42c2.cu | #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::ColumnMajor;
using LayoutB = cutlass::layout::RowMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::GemmSplitKParallel<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
82be82644391bebbe32b066ade9df58f938f859e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include <stdio.h>
#include <float.h>
#include "utils.h"
// Min or max reduction. Produces a reduced value per block.
__global__
void reduce_minmax(float *d_out, const float *d_in, size_t n, bool maxMode)
{
// allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float cache[];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int cacheIndex = threadIdx.x;
float tmp = FLT_MAX;
if (maxMode)
tmp = FLT_MIN;
// reduction of values outside the span of the grid
while (tid < n) {
if (maxMode)
tmp = max(tmp, d_in[tid]);
else
tmp = min(tmp, d_in[tid]);
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = tmp;
// synchronize threads in this block
__syncthreads();
// tree reduction of values in cache
for (int i = blockDim.x / 2; i > 0; i /= 2) {
if (cacheIndex < i) {
if (maxMode)
cache[cacheIndex] = max(cache[cacheIndex], cache[cacheIndex + i]);
else
cache[cacheIndex] = min(cache[cacheIndex], cache[cacheIndex + i]);
}
__syncthreads();
}
if (cacheIndex == 0)
d_out[blockIdx.x] = cache[0];
}
// Computes a histogram of the logLuminance channel.
__global__
void histogram(unsigned int *d_bins, size_t numBins,
const float *d_in, size_t n, float lumMin, float lumRange)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// increment global thread index by the total number of threads for
// each iteration, to handle the case where there are more input
// values than threads
while (tid < n) {
int bin = ((d_in[tid] - lumMin) / lumRange) * numBins;
atomicAdd(&d_bins[bin], 1);
tid += blockDim.x * gridDim.x;
}
}
// Hillis & Steele exclusive sum scan.
__global__
void hillis_steele_excl_scan(unsigned int *d_out, const unsigned int *d_in,
size_t n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n)
d_out[tid] = d_in[tid];
for (int i = 1; i < n; i *= 2) {
if (tid < n && tid - i >= 0)
d_out[tid] += d_out[tid - i];
__syncthreads();
}
// convert to exclusive scan
if (tid < n)
d_out[tid] -= d_in[tid];
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
/* Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you)
*/
// Expected:
// min_logLum = -3.109206
// max_logLum = 2.265088
// num_bins = 1024
// bins = [1, 0, 0, ..., 9, 10, 9, 7, 11, 9, 15, 13, 12, 18, 16, 32, 29, ..]
// cdf = [1, 1, 1, 1, ..., 6, 8, 9, 10, 11, 12, 13, 16, 20 ,21 ,22, 31, ..]
const int threads = 128;
const int blocks = min(64, (int) ((numRows * numCols) + threads - 1) / threads);
float res, lumRange;
float *d_intermediate;
float *intermediate;
// 1) find the minimum and maximum value in the input logLuminance
// channel store in min_logLum and max_logLum
// allocate memory for intermediate values on the CPU and GPU
intermediate = (float *) malloc(sizeof(float) * blocks);
checkCudaErrors(hipMalloc((void **) &d_intermediate, sizeof(float) * blocks));
// launch min reduction kernel
hipLaunchKernelGGL(( reduce_minmax), dim3(blocks), dim3(threads), sizeof(float) * threads, 0,
d_intermediate, d_logLuminance, numRows * numCols, false);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// copy the intermediate values back from the GPU to the CPU
checkCudaErrors(hipMemcpy(intermediate, d_intermediate,
sizeof(float) * blocks, hipMemcpyDeviceToHost));
// finish up on the CPU side
res = FLT_MAX;
for (int i = 0; i < blocks; i++)
res = min(res, intermediate[i]);
min_logLum = res;
// launch max kernel
hipLaunchKernelGGL(( reduce_minmax), dim3(blocks), dim3(threads), sizeof(float) * threads, 0,
d_intermediate, d_logLuminance, numRows * numCols, true);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// copy the intermediate values back from the GPU to the CPU
checkCudaErrors(hipMemcpy(intermediate, d_intermediate,
sizeof(float) * blocks, hipMemcpyDeviceToHost));
// finish up on the CPU side
res = FLT_MIN;
for (int i = 0; i < blocks; i++)
res = max(res, intermediate[i]);
max_logLum = res;
printf("min_logLum = %f\n", min_logLum);
printf("max_logLum = %f\n", max_logLum);
// 2) subtract them to find the range
lumRange = max_logLum - min_logLum;
printf("lumRange = %f\n", lumRange);
// 3) generate a histogram of all the values in the logLuminance channel
// using the formula: bin = (lum[i] - lumMin) / lumRange * numBins
unsigned int *d_bins;
size_t histoSize = sizeof(unsigned int) * numBins;
// allocate memory for the bins on the device and initialize to zero
checkCudaErrors(hipMalloc((void **) &d_bins, histoSize));
checkCudaErrors(hipMemset(d_bins, 0, histoSize));
// launch histogram kernel
hipLaunchKernelGGL(( histogram), dim3(blocks), dim3(threads), 0, 0, d_bins, numBins, d_logLuminance,
numRows * numCols, min_logLum, lumRange);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// 4) Perform an exclusive scan (prefix sum) on the histogram to get
// the cumulative distribution of luminance values (this should go in the
// incoming d_cdf pointer which already has been allocated for you)
hipLaunchKernelGGL(( hillis_steele_excl_scan), dim3(1), dim3(numBins), 0, 0, d_cdf, d_bins, numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// free memeory
free(intermediate);
checkCudaErrors(hipFree(d_intermediate));
checkCudaErrors(hipFree(d_bins));
}
| 82be82644391bebbe32b066ade9df58f938f859e.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include <stdio.h>
#include <float.h>
#include "utils.h"
// Min or max reduction. Produces a reduced value per block.
__global__
void reduce_minmax(float *d_out, const float *d_in, size_t n, bool maxMode)
{
// allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float cache[];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int cacheIndex = threadIdx.x;
float tmp = FLT_MAX;
if (maxMode)
tmp = FLT_MIN;
// reduction of values outside the span of the grid
while (tid < n) {
if (maxMode)
tmp = max(tmp, d_in[tid]);
else
tmp = min(tmp, d_in[tid]);
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = tmp;
// synchronize threads in this block
__syncthreads();
// tree reduction of values in cache
for (int i = blockDim.x / 2; i > 0; i /= 2) {
if (cacheIndex < i) {
if (maxMode)
cache[cacheIndex] = max(cache[cacheIndex], cache[cacheIndex + i]);
else
cache[cacheIndex] = min(cache[cacheIndex], cache[cacheIndex + i]);
}
__syncthreads();
}
if (cacheIndex == 0)
d_out[blockIdx.x] = cache[0];
}
// Computes a histogram of the logLuminance channel.
__global__
void histogram(unsigned int *d_bins, size_t numBins,
const float *d_in, size_t n, float lumMin, float lumRange)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// increment global thread index by the total number of threads for
// each iteration, to handle the case where there are more input
// values than threads
while (tid < n) {
int bin = ((d_in[tid] - lumMin) / lumRange) * numBins;
atomicAdd(&d_bins[bin], 1);
tid += blockDim.x * gridDim.x;
}
}
// Hillis & Steele exclusive sum scan.
__global__
void hillis_steele_excl_scan(unsigned int *d_out, const unsigned int *d_in,
size_t n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n)
d_out[tid] = d_in[tid];
for (int i = 1; i < n; i *= 2) {
if (tid < n && tid - i >= 0)
d_out[tid] += d_out[tid - i];
__syncthreads();
}
// convert to exclusive scan
if (tid < n)
d_out[tid] -= d_in[tid];
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
/* Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you)
*/
// Expected:
// min_logLum = -3.109206
// max_logLum = 2.265088
// num_bins = 1024
// bins = [1, 0, 0, ..., 9, 10, 9, 7, 11, 9, 15, 13, 12, 18, 16, 32, 29, ..]
// cdf = [1, 1, 1, 1, ..., 6, 8, 9, 10, 11, 12, 13, 16, 20 ,21 ,22, 31, ..]
const int threads = 128;
const int blocks = min(64, (int) ((numRows * numCols) + threads - 1) / threads);
float res, lumRange;
float *d_intermediate;
float *intermediate;
// 1) find the minimum and maximum value in the input logLuminance
// channel store in min_logLum and max_logLum
// allocate memory for intermediate values on the CPU and GPU
intermediate = (float *) malloc(sizeof(float) * blocks);
checkCudaErrors(cudaMalloc((void **) &d_intermediate, sizeof(float) * blocks));
// launch min reduction kernel
reduce_minmax<<<blocks, threads, sizeof(float) * threads>>>
(d_intermediate, d_logLuminance, numRows * numCols, false);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// copy the intermediate values back from the GPU to the CPU
checkCudaErrors(cudaMemcpy(intermediate, d_intermediate,
sizeof(float) * blocks, cudaMemcpyDeviceToHost));
// finish up on the CPU side
res = FLT_MAX;
for (int i = 0; i < blocks; i++)
res = min(res, intermediate[i]);
min_logLum = res;
// launch max kernel
reduce_minmax<<<blocks, threads, sizeof(float) * threads>>>
(d_intermediate, d_logLuminance, numRows * numCols, true);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// copy the intermediate values back from the GPU to the CPU
checkCudaErrors(cudaMemcpy(intermediate, d_intermediate,
sizeof(float) * blocks, cudaMemcpyDeviceToHost));
// finish up on the CPU side
res = FLT_MIN;
for (int i = 0; i < blocks; i++)
res = max(res, intermediate[i]);
max_logLum = res;
printf("min_logLum = %f\n", min_logLum);
printf("max_logLum = %f\n", max_logLum);
// 2) subtract them to find the range
lumRange = max_logLum - min_logLum;
printf("lumRange = %f\n", lumRange);
// 3) generate a histogram of all the values in the logLuminance channel
// using the formula: bin = (lum[i] - lumMin) / lumRange * numBins
unsigned int *d_bins;
size_t histoSize = sizeof(unsigned int) * numBins;
// allocate memory for the bins on the device and initialize to zero
checkCudaErrors(cudaMalloc((void **) &d_bins, histoSize));
checkCudaErrors(cudaMemset(d_bins, 0, histoSize));
// launch histogram kernel
histogram<<<blocks, threads>>>(d_bins, numBins, d_logLuminance,
numRows * numCols, min_logLum, lumRange);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// 4) Perform an exclusive scan (prefix sum) on the histogram to get
// the cumulative distribution of luminance values (this should go in the
// incoming d_cdf pointer which already has been allocated for you)
hillis_steele_excl_scan<<<1, numBins>>>(d_cdf, d_bins, numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// free memeory
free(intermediate);
checkCudaErrors(cudaFree(d_intermediate));
checkCudaErrors(cudaFree(d_bins));
}
|
c9dc6dbce6da8db058c8682f1ae1a3da775a31cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "split_points.cuh"
#include <catboost/cuda/cuda_lib/cuda_base.h>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <contrib/libs/cub/hipcub/hipcub.hpp>
#include <catboost/cuda/cuda_util/gpu_data/partitions.h>
#include <catboost/cuda/cuda_util/kernel/update_part_props.cuh>
#include <catboost/cuda/cuda_util/kernel/reorder_one_bit.cuh>
#include <catboost/cuda/cuda_util/kernel/reorder_one_bit_impl.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
namespace NKernel {
template <typename T>
__global__ void CopyInLeavesImpl(const ui32* leaves,
const TDataPartition* parts,
const T *src,
T *dst,
ui32 numStats,
ui64 lineSize) {
const ui32 leafId = leaves[blockIdx.y];
const ui32 offset = parts[leafId].Offset;
const ui32 size = parts[leafId].Size;
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
src += offset;
dst += offset;
while (i < size) {
#pragma unroll 8
for (int k = 0; k < numStats; ++k) {
WriteThrough(dst + i + k * lineSize, __ldg(src + i + k * lineSize));
}
i += gridDim.x * blockDim.x;
}
}
template <ui32 Size, ui32 BlockSize = 1024>
__global__ void GatherInplaceImpl(const ui32* leaf,
const TDataPartition* parts,
const ui32* map,
float* stats,
ui64 lineSize,
ui32* indices) {
__shared__ char4 tmp[Size];
char4* data = blockIdx.x == 0 ? (char4*)indices : (char4*)(stats + (blockIdx.x - 1) * lineSize);
const ui32 leafId = leaf[blockIdx.y];
TDataPartition part = Ldg(parts + leafId);
const ui32 offset = part.Offset;
ui32 size = part.Size;
//should be always true btw, but may help compiler
const ui32 tid = threadIdx.x;
map += offset;
data += offset;
#pragma unroll
for (ui32 i = tid; i < Size; i += BlockSize) {
if (i < size) {
const ui32 loadIdx = __ldg(map + i);
tmp[i] = __ldg(data + loadIdx);
}
}
__syncthreads();
#pragma unroll
for (ui32 i = tid; i < Size; i += BlockSize) {
if (i < size) {
WriteThrough(data + i, tmp[i]);
}
}
}
template <int Size>
void GatherInplaceLeqSize(const ui32* leaf, ui32 leavesCount,
const TDataPartition* parts,
const ui32* map,
float* stats, ui32 statCount,
ui64 lineSize,
ui32* indices,
TCudaStream stream) {
const ui32 blockSize = 1024;
dim3 numBlocks;
numBlocks.x = 1 + statCount;
numBlocks.y = leavesCount;
numBlocks.z = 1;
hipLaunchKernelGGL(( GatherInplaceImpl<Size, blockSize>) , dim3(numBlocks), dim3(blockSize), 0, stream, leaf, parts, map, stats, lineSize, indices);
}
template <ui32 Size, ui32 BlockSize = 1024>
__global__ void GatherInplaceSingleLeafImpl(const ui32 leafId,
const TDataPartition* parts,
const ui32* map,
float* stats,
ui64 lineSize,
ui32* indices) {
__shared__ char4 tmp[Size];
char4* data = blockIdx.x == 0 ? (char4*)indices : (char4*)(stats + (blockIdx.x - 1) * lineSize);
TDataPartition part = Ldg(parts + leafId);
const ui32 offset = part.Offset;
ui32 size = part.Size;
//should be always true btw, but may help compiler
const ui32 tid = threadIdx.x;
data += offset;
#pragma unroll
for (ui32 i = tid; i < Size; i += BlockSize) {
if (i < size) {
const ui32 loadIdx = __ldg(map + i);
tmp[i] = __ldg(data + loadIdx);
}
}
__syncthreads();
#pragma unroll
for (ui32 i = tid; i < Size; i += BlockSize) {
if (i < size) {
WriteThrough(data + i, tmp[i]);
}
}
}
template <int Size>
void GatherInplaceSingleLeaf(const ui32 leaf,
const TDataPartition* parts,
const ui32* map,
float* stats, ui32 statCount,
ui64 lineSize,
ui32* indices,
TCudaStream stream) {
const ui32 blockSize = 1024;
dim3 numBlocks;
numBlocks.x = 1 + statCount;
numBlocks.y = 1;
numBlocks.z = 1;
hipLaunchKernelGGL(( GatherInplaceSingleLeafImpl<Size, blockSize>) , dim3(numBlocks), dim3(blockSize), 0, stream, leaf, parts, map, stats, lineSize, indices);
}
/* this should be called before updatePartProps */
template <typename T>
__global__ void GatherInLeavesImpl(const ui32* leaves,
const TDataPartition* parts,
const T *src,
const ui32* map,
T *dst,
ui32 numStats,
ui64 lineSize) {
const ui32 leafId = leaves[blockIdx.y];
const ui32 offset = parts[leafId].Offset;
const ui32 size = parts[leafId].Size;
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
map += offset;
src += offset;
dst += offset;
while (i < size) {
const ui32 loadIdx = __ldg(map + i);
#pragma unroll 8
for (int k = 0; k < numStats; ++k) {
WriteThrough(dst + i + k * lineSize, __ldg(src + loadIdx + k * lineSize));
}
i += gridDim.x * blockDim.x;
}
}
template <class T>
void CopyInLeaves(const ui32* leaves, const ui32 leavesCount,
const TDataPartition* parts,
const T *src,
T *dst,
ui32 numStats,
ui32 lineSize,
TCudaStream stream) {
const ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount();
numBlocks.y = leavesCount;
numBlocks.z = 1;
if (leavesCount) {
hipLaunchKernelGGL(( CopyInLeavesImpl<T>), dim3(numBlocks), dim3(blockSize), 0, stream, leaves, parts, src, dst, numStats, lineSize);
}
}
template <class T>
void GatherInLeaves(const ui32* leaves, const ui32 leavesCount,
const TDataPartition* parts,
const T *src,
const ui32* map,
T *dst,
ui32 numStats,
ui32 lineSize,
TCudaStream stream) {
const ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount();
numBlocks.y = leavesCount;
numBlocks.z = 1;
if (leavesCount) {
hipLaunchKernelGGL(( GatherInLeavesImpl), dim3(numBlocks), dim3(blockSize), 0, stream, leaves, parts, src, map, dst, numStats, lineSize);
}
}
template <typename T>
__global__ void CopyLeafImpl(const ui32 leafId,
const TDataPartition* parts,
const T* src,
T* dst,
ui32 numStats,
ui64 lineSize) {
const ui32 offset = parts[leafId].Offset;
const ui32 size = parts[leafId].Size;
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
src += offset;
while (i < size) {
#pragma unroll 8
for (int k = 0; k < numStats; ++k) {
WriteThrough(dst + i + k * size, __ldg(src + i + k * lineSize));
}
i += gridDim.x * blockDim.x;
}
}
template <class T>
void CopyLeaf(const ui32 leafId, const ui32 leafSize,
const TDataPartition* parts,
const T* src,
T* dst,
ui32 numStats,
ui32 lineSize,
TCudaStream stream) {
const ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.x = (leafSize + blockSize - 1) / blockSize;
numBlocks.y = 1;
numBlocks.z = 1;
if (leafSize) {
hipLaunchKernelGGL(( CopyLeafImpl<T>), dim3(numBlocks), dim3(blockSize), 0, stream, leafId, parts, src, dst, numStats, lineSize);
}
}
/* this should be called before updatePartProps */
template <typename T>
__global__ void GatherLeafImpl(const ui32 leafId,
const TDataPartition* parts,
const T* src,
const ui32* map,
T* dst,
ui32 numStats,
ui64 lineSize) {
const ui32 offset = parts[leafId].Offset;
const ui32 size = parts[leafId].Size;
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
dst += offset;
while (i < size) {
const ui32 loadIdx = __ldg(map + i);
#pragma unroll 8
for (int k = 0; k < numStats; ++k) {
WriteThrough(dst + i + k * lineSize, __ldg(src + loadIdx + k * size));
}
i += gridDim.x * blockDim.x;
}
}
template <class T>
void GatherLeaf(const ui32 leafId, const ui32 leafSize,
const TDataPartition* parts,
const T* src,
const ui32* map,
T* dst,
ui32 numStats,
ui32 lineSize,
TCudaStream stream) {
const ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.x = (leafSize + blockSize - 1) / blockSize;
numBlocks.y = 1;
numBlocks.z = 1;
if (leafSize) {
hipLaunchKernelGGL(( GatherLeafImpl), dim3(numBlocks), dim3(blockSize), 0, stream, leafId, parts, src, map, dst, numStats, lineSize);
}
}
__global__ void UpdatePartitionsAfterSplitImpl(const ui32* leftLeaves,
const ui32* rightLeaves,
ui32 leafCount,
const bool* sortedFlags,
TDataPartition* parts,
TDataPartition* partsCpu
) {
const ui32 leftLeaf = leftLeaves[blockIdx.y];
const ui32 rightLeaf = rightLeaves[blockIdx.y];
sortedFlags += parts[leftLeaf].Offset;
const ui32 partSize = parts[leftLeaf].Size;
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const ui32 offset = parts[leftLeaf].Offset;
while (i <= partSize) {
int flag0 = i < partSize ? Ldg(sortedFlags + i) : 1;
int flag1 = i ? Ldg(sortedFlags + i - 1) : 0;
if (flag0 != flag1) {
//we are on border
TDataPartition leftPart = parts[leftLeaf];
leftPart.Size = i;
parts[leftLeaf] = leftPart;
partsCpu[leftLeaf] = leftPart;
TDataPartition rightPart = parts[rightLeaf];
rightPart.Offset = offset + i;
rightPart.Size = partSize - i;
parts[rightLeaf] = rightPart;
partsCpu[rightLeaf] = rightPart;
break;
}
i += blockDim.x * gridDim.x;
}
}
void UpdatePartitionsAfterSplit(const ui32* leftLeafs,
const ui32* rightLeafs,
ui32 leavesCount,
const bool* sortedFlag,
TDataPartition* parts,
TDataPartition* partsCpu,
TCudaStream stream) {
const ui32 blockSize = 512;
dim3 numBlocks;
numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount();
numBlocks.y = leavesCount;
numBlocks.z = 1;
if (leavesCount) {
hipLaunchKernelGGL(( UpdatePartitionsAfterSplitImpl), dim3(numBlocks), dim3(blockSize), 0, stream, leftLeafs, rightLeafs, leavesCount, sortedFlag, parts, partsCpu);
}
}
__global__ void UpdatePartitionAfterSplitImpl(const ui32 leftLeaf,
const ui32 rightLeaf,
const bool* sortedFlags,
TDataPartition* parts,
TDataPartition* partsCpu
) {
const ui32 partSize = parts[leftLeaf].Size;
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const ui32 offset = parts[leftLeaf].Offset;
while (i <= partSize) {
int flag0 = i < partSize ? Ldg(sortedFlags + i) : 1;
int flag1 = i ? Ldg(sortedFlags + i - 1) : 0;
if (flag0 != flag1) {
//we are on border
TDataPartition leftPart = parts[leftLeaf];
leftPart.Size = i;
partsCpu[leftLeaf] = leftPart;
parts[leftLeaf] = leftPart;
TDataPartition rightPart = parts[rightLeaf];
rightPart.Offset = offset + i;
rightPart.Size = partSize - i;
partsCpu[rightLeaf] = rightPart;
parts[rightLeaf] = rightPart;
break;
}
i += blockDim.x * gridDim.x;
}
}
void UpdatePartitionAfterSplit(const ui32 leftLeaf,
const ui32 rightLeaf,
ui32 leafSize,
const bool* sortedFlag,
TDataPartition* parts,
TDataPartition* partsCpu,
TCudaStream stream) {
const ui32 blockSize = 512;
dim3 numBlocks;
numBlocks.x = (leafSize + blockSize - 1) / blockSize;
numBlocks.y = 1;
numBlocks.z = 1;
if (leafSize) {
hipLaunchKernelGGL(( UpdatePartitionAfterSplitImpl), dim3(numBlocks), dim3(blockSize), 0, stream, leftLeaf, rightLeaf, sortedFlag, parts, partsCpu);
}
}
/*
* blockIdx.x * gridDim.x + threadIdx.x is index in leaf
* blockIdx.y is part number
* this is not time critical kernel, so we make for several blocks per SM for each leaf and just skip computations if necessary
*/
template <int N, int BlockSize>
__global__ void SplitAndMakeSequenceInLeavesImpl(const ui32* compressedIndex,
const ui32* loadIndices,
const TDataPartition* parts,
const ui32* leafIds,
const TCFeature* splitFeatures,
const ui32* splitBins,
bool* splitFlags,
ui32* indices) {
const ui32 leafId = leafIds[blockIdx.y];
TDataPartition part = Ldg(parts + leafId);
const i32 size = part.Size;
const i32 offset = part.Offset;
loadIndices += offset;
indices += offset;
splitFlags += offset;
int i = blockIdx.x * BlockSize * N + threadIdx.x;
if (i >= size) {
return;
}
TCFeature feature = splitFeatures[blockIdx.y];
const ui32 binIdx = splitBins[blockIdx.y];
const ui32 value = binIdx << feature.Shift;
const ui32 mask = feature.Mask << feature.Shift;
const bool oneHot = feature.OneHotFeature;
compressedIndex += feature.Offset;
while (i < size) {
ui32 loadIndex[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
loadIndex[k] = loadIndices ? __ldg(loadIndices + i + k * BlockSize) : i + k * BlockSize;
}
}
ui32 featureVal[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
featureVal[k] = __ldg(compressedIndex + loadIndex[k]) & mask;
}
}
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
WriteThrough(indices + i + k * BlockSize, static_cast<ui32>(i + k * BlockSize));
}
}
bool split[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
split[k] = (oneHot ? (featureVal[k] == value) : featureVal[k] > value);
}
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
WriteThrough(splitFlags + i + k * BlockSize, split[k]);
}
}
i += N * BlockSize * gridDim.x;
}
}
void SplitAndMakeSequenceInLeaves(const ui32* compressedIndex,
const ui32* loadIndices,
const TDataPartition* parts,
const ui32* leafIds,
ui32 leavesCount,
const TCFeature* splitFeatures,
const ui32* splitBins,
bool* splitFlags,
ui32* indices,
TCudaStream stream) {
if (leavesCount) {
const ui32 blockSize = 512;
const int N = 4;
dim3 numBlocks;
numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount();
numBlocks.y = leavesCount;
numBlocks.z = 1;
hipLaunchKernelGGL(( SplitAndMakeSequenceInLeavesImpl<N, blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, compressedIndex, loadIndices, parts, leafIds, splitFeatures, splitBins, splitFlags, indices);
}
}
template <int N, int BlockSize>
__global__ void SplitAndMakeSequenceInSingleLeafImpl(const ui32* compressedIndex,
const ui32* loadIndices,
const TDataPartition* parts,
const ui32 leafId,
const TCFeature feature,
const ui32 binIdx,
bool* splitFlags,
ui32* indices) {
TDataPartition part = Ldg(parts + leafId);
const i32 size = part.Size;
const i32 offset = part.Offset;
loadIndices += offset;
const int i = blockIdx.x * BlockSize * N + threadIdx.x;
const ui32 value = binIdx << feature.Shift;
const ui32 mask = feature.Mask << feature.Shift;
const bool oneHot = feature.OneHotFeature;
compressedIndex += feature.Offset;
ui32 loadIndex[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
loadIndex[k] = __ldg(loadIndices + i + k * BlockSize);
}
}
ui32 featureVal[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
featureVal[k] = __ldg(compressedIndex + loadIndex[k]) & mask;
}
}
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
WriteThrough(indices + i + k * BlockSize, static_cast<ui32>(i + k * BlockSize));
}
}
bool split[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
split[k] = (oneHot ? (featureVal[k] == value) : featureVal[k] > value);
}
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
WriteThrough(splitFlags + i + k * BlockSize, split[k]);
}
}
}
void SplitAndMakeSequenceInLeaf(const ui32* compressedIndex,
const ui32* loadIndices,
const TDataPartition* parts,
ui32 leafId,
ui32 leafSize,
TCFeature splitFeature,
ui32 splitBin,
bool* splitFlags,
ui32* indices,
TCudaStream stream) {
const ui32 blockSize = 256;
const int N = 2;
dim3 numBlocks;
numBlocks.x = (leafSize + blockSize * N - 1) / (blockSize * N);
numBlocks.y = 1;
numBlocks.z = 1;
if (numBlocks.x) {
SplitAndMakeSequenceInSingleLeafImpl<N, blockSize> << < numBlocks, blockSize, 0, stream >>
> (compressedIndex, loadIndices, parts, leafId, splitFeature, splitBin, splitFlags, indices);
}
}
//TODO(noxoomo): cub sucks for this, write proper segmented version
void SortByFlagsInLeaves(const ui32* leavesToSplit, const ui32 leafCount,
const TDataPartition* partsCpu,
TSplitPointsContext& context,
TCudaStream stream) {
/*
* Sort leaves by flags
*/
for (ui32 i = 0; i < leafCount; ++i) {
const ui32 leafId = leavesToSplit[i];
TDataPartition part = partsCpu[leafId];
const bool* flagsSrc = context.TempFlags.Get() + part.Offset;
bool* flagsDst = context.Flags.Get() + part.Offset;
const ui32* indicesSrc = context.TempIndices.Get() + part.Offset;
ui32* indicesDst = context.Indices.Get() + part.Offset;
hipError_t error = hipcub::DeviceRadixSort::SortPairs<bool, ui32>((void*)context.TempStorage.Get(),
context.TempStorageSizes[i],
flagsSrc,
flagsDst,
indicesSrc,
indicesDst,
(int)part.Size,
0,
1,
stream);
CUDA_SAFE_CALL(error);
}
}
//
void SortWithoutCub(ui32 leafId, const TDataPartition* partsCpu, TSplitPointsContext& context, TCudaStream stream) {
TDataPartition part = partsCpu[leafId];
if (part.Size) {
const bool* flagsSrc = context.TempFlags.Get();
bool* flagsDst = context.Flags.Get();
const ui32* indicesSrc = context.TempIndices.Get();
ui32* indicesDst = context.Indices.Get();
char* tempStorage = context.TempStorage.Get();
const ui64 tempOffsetsSize = sizeof(int) * part.Size;
{
using TInput = TScanBitIterator<bool>;
TInput inputIter(context.TempFlags.Get(), 0);
ui64 tempStorageSize = tempStorage ? context.TempStorageSizes[0] - tempOffsetsSize : 0;
auto scanTmp = tempStorage ? (void*)(tempStorage + tempOffsetsSize) : nullptr;
hipError_t err = hipcub::DeviceScan::ExclusiveSum < TInput, int*> (scanTmp,
tempStorageSize,
inputIter,
(int*)tempStorage,
part.Size,
stream);
if (!tempStorage) {
context.TempStorageSizes[0] = tempStorageSize + tempOffsetsSize;
}
CUDA_SAFE_CALL(err);
}
if (tempStorage) {
const int blockSize = 512;
const int N = 1;
const int numBlocks = (part.Size + (N * blockSize) - 1) / (N * blockSize);
ReorderOneBitImpl<bool, ui32, N, blockSize> << < numBlocks, blockSize, 0, stream >> > (
flagsSrc,
indicesSrc,
(int*) tempStorage,
0,
flagsDst,
indicesDst,
part.Size);
}
}
}
ui32 FastSortSize() {
return 500000;
}
void SortByFlagsInLeaf(ui32 leafId,
const TDataPartition* partsCpu,
TSplitPointsContext& context,
TCudaStream stream) {
/*
* Sort leaves by flags
*/
TDataPartition part = partsCpu[leafId];
if (part.Size > FastSortSize()) {
const bool* flagsSrc = context.TempFlags.Get();
bool* flagsDst = context.Flags.Get();
const ui32* indicesSrc = context.TempIndices.Get();
ui32* indicesDst = context.Indices.Get();
hipError_t error = hipcub::DeviceRadixSort::SortPairs < bool, ui32 > ((void*) context.TempStorage.Get(),
context.TempStorageSizes[0],
flagsSrc,
flagsDst,
indicesSrc,
indicesDst,
(int) part.Size,
0,
1,
stream);
CUDA_SAFE_CALL(error);
} else {
SortWithoutCub(leafId, partsCpu, context, stream);
}
}
#define TEMPL_INST(Type)\
template void CopyInLeaves<Type>(const ui32* leaves, const ui32 leavesCount, const TDataPartition* parts, const Type *src, Type *dst, ui32 numCopies, ui32 lineSize, TCudaStream stream);\
template void GatherInLeaves<Type>(const ui32* leaves, const ui32 leavesCount, const TDataPartition* parts, const Type* src, const ui32* map, Type *dst, ui32 numStats, ui32 lineSize, TCudaStream stream);\
template void GatherLeaf<Type>(const ui32 leaf, const ui32 size, const TDataPartition* parts, const Type* src, const ui32* map, Type *dst, ui32 numStats, ui32 lineSize, TCudaStream stream);\
template void CopyLeaf<Type>(const ui32 leaf, const ui32 size, const TDataPartition* parts, const Type *src, Type *dst, ui32 numCopies, ui32 lineSize, TCudaStream stream);
TEMPL_INST(ui32)
TEMPL_INST(float)
#undef TEMPL_INST
template void GatherInplaceLeqSize<12288>(const ui32* leaf, ui32 leavesCount,
const TDataPartition* parts,
const ui32* map,
float* stats, ui32 statCount,
ui64 lineSize,
ui32* indices,
TCudaStream stream);
template void GatherInplaceLeqSize<6144>(const ui32* leaf, ui32 leavesCount,
const TDataPartition* parts,
const ui32* map,
float* stats, ui32 statCount,
ui64 lineSize,
ui32* indices,
TCudaStream stream);
template void GatherInplaceLeqSize<3072>(const ui32* leaf, ui32 leavesCount,
const TDataPartition* parts,
const ui32* map,
float* stats, ui32 statCount,
ui64 lineSize,
ui32* indices,
TCudaStream stream);
template void GatherInplaceLeqSize<1024>(const ui32* leaf, ui32 leavesCount,
const TDataPartition* parts,
const ui32* map,
float* stats, ui32 statCount,
ui64 lineSize,
ui32* indices,
TCudaStream stream);
#define INPLACE_SINGLE_LEAF(Size)\
template void GatherInplaceSingleLeaf<Size>(const ui32 leaf, \
const TDataPartition* parts,\
const ui32* map,\
float* stats, ui32 statCount,\
ui64 lineSize,\
ui32* indices,\
TCudaStream stream);
INPLACE_SINGLE_LEAF(6144)
INPLACE_SINGLE_LEAF(12288)
INPLACE_SINGLE_LEAF(3072)
INPLACE_SINGLE_LEAF(1024)
}
| c9dc6dbce6da8db058c8682f1ae1a3da775a31cd.cu | #include "split_points.cuh"
#include <catboost/cuda/cuda_lib/cuda_base.h>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <contrib/libs/cub/cub/device/device_radix_sort.cuh>
#include <catboost/cuda/cuda_util/gpu_data/partitions.h>
#include <catboost/cuda/cuda_util/kernel/update_part_props.cuh>
#include <catboost/cuda/cuda_util/kernel/reorder_one_bit.cuh>
#include <catboost/cuda/cuda_util/kernel/reorder_one_bit_impl.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
namespace NKernel {
template <typename T>
__global__ void CopyInLeavesImpl(const ui32* leaves,
const TDataPartition* parts,
const T *src,
T *dst,
ui32 numStats,
ui64 lineSize) {
const ui32 leafId = leaves[blockIdx.y];
const ui32 offset = parts[leafId].Offset;
const ui32 size = parts[leafId].Size;
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
src += offset;
dst += offset;
while (i < size) {
#pragma unroll 8
for (int k = 0; k < numStats; ++k) {
WriteThrough(dst + i + k * lineSize, __ldg(src + i + k * lineSize));
}
i += gridDim.x * blockDim.x;
}
}
template <ui32 Size, ui32 BlockSize = 1024>
__global__ void GatherInplaceImpl(const ui32* leaf,
const TDataPartition* parts,
const ui32* map,
float* stats,
ui64 lineSize,
ui32* indices) {
__shared__ char4 tmp[Size];
char4* data = blockIdx.x == 0 ? (char4*)indices : (char4*)(stats + (blockIdx.x - 1) * lineSize);
const ui32 leafId = leaf[blockIdx.y];
TDataPartition part = Ldg(parts + leafId);
const ui32 offset = part.Offset;
ui32 size = part.Size;
//should be always true btw, but may help compiler
const ui32 tid = threadIdx.x;
map += offset;
data += offset;
#pragma unroll
for (ui32 i = tid; i < Size; i += BlockSize) {
if (i < size) {
const ui32 loadIdx = __ldg(map + i);
tmp[i] = __ldg(data + loadIdx);
}
}
__syncthreads();
#pragma unroll
for (ui32 i = tid; i < Size; i += BlockSize) {
if (i < size) {
WriteThrough(data + i, tmp[i]);
}
}
}
template <int Size>
void GatherInplaceLeqSize(const ui32* leaf, ui32 leavesCount,
const TDataPartition* parts,
const ui32* map,
float* stats, ui32 statCount,
ui64 lineSize,
ui32* indices,
TCudaStream stream) {
const ui32 blockSize = 1024;
dim3 numBlocks;
numBlocks.x = 1 + statCount;
numBlocks.y = leavesCount;
numBlocks.z = 1;
GatherInplaceImpl<Size, blockSize> <<<numBlocks, blockSize, 0, stream>>>(leaf, parts, map, stats, lineSize, indices);
}
template <ui32 Size, ui32 BlockSize = 1024>
__global__ void GatherInplaceSingleLeafImpl(const ui32 leafId,
const TDataPartition* parts,
const ui32* map,
float* stats,
ui64 lineSize,
ui32* indices) {
__shared__ char4 tmp[Size];
char4* data = blockIdx.x == 0 ? (char4*)indices : (char4*)(stats + (blockIdx.x - 1) * lineSize);
TDataPartition part = Ldg(parts + leafId);
const ui32 offset = part.Offset;
ui32 size = part.Size;
//should be always true btw, but may help compiler
const ui32 tid = threadIdx.x;
data += offset;
#pragma unroll
for (ui32 i = tid; i < Size; i += BlockSize) {
if (i < size) {
const ui32 loadIdx = __ldg(map + i);
tmp[i] = __ldg(data + loadIdx);
}
}
__syncthreads();
#pragma unroll
for (ui32 i = tid; i < Size; i += BlockSize) {
if (i < size) {
WriteThrough(data + i, tmp[i]);
}
}
}
template <int Size>
void GatherInplaceSingleLeaf(const ui32 leaf,
const TDataPartition* parts,
const ui32* map,
float* stats, ui32 statCount,
ui64 lineSize,
ui32* indices,
TCudaStream stream) {
const ui32 blockSize = 1024;
dim3 numBlocks;
numBlocks.x = 1 + statCount;
numBlocks.y = 1;
numBlocks.z = 1;
GatherInplaceSingleLeafImpl<Size, blockSize> <<<numBlocks, blockSize, 0, stream>>>(leaf, parts, map, stats, lineSize, indices);
}
/* this should be called before updatePartProps */
template <typename T>
__global__ void GatherInLeavesImpl(const ui32* leaves,
const TDataPartition* parts,
const T *src,
const ui32* map,
T *dst,
ui32 numStats,
ui64 lineSize) {
const ui32 leafId = leaves[blockIdx.y];
const ui32 offset = parts[leafId].Offset;
const ui32 size = parts[leafId].Size;
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
map += offset;
src += offset;
dst += offset;
while (i < size) {
const ui32 loadIdx = __ldg(map + i);
#pragma unroll 8
for (int k = 0; k < numStats; ++k) {
WriteThrough(dst + i + k * lineSize, __ldg(src + loadIdx + k * lineSize));
}
i += gridDim.x * blockDim.x;
}
}
template <class T>
void CopyInLeaves(const ui32* leaves, const ui32 leavesCount,
const TDataPartition* parts,
const T *src,
T *dst,
ui32 numStats,
ui32 lineSize,
TCudaStream stream) {
const ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount();
numBlocks.y = leavesCount;
numBlocks.z = 1;
if (leavesCount) {
CopyInLeavesImpl<T><<<numBlocks, blockSize, 0, stream>>>(leaves, parts, src, dst, numStats, lineSize);
}
}
template <class T>
void GatherInLeaves(const ui32* leaves, const ui32 leavesCount,
const TDataPartition* parts,
const T *src,
const ui32* map,
T *dst,
ui32 numStats,
ui32 lineSize,
TCudaStream stream) {
const ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount();
numBlocks.y = leavesCount;
numBlocks.z = 1;
if (leavesCount) {
GatherInLeavesImpl<<<numBlocks, blockSize, 0, stream>>>(leaves, parts, src, map, dst, numStats, lineSize);
}
}
template <typename T>
__global__ void CopyLeafImpl(const ui32 leafId,
const TDataPartition* parts,
const T* src,
T* dst,
ui32 numStats,
ui64 lineSize) {
const ui32 offset = parts[leafId].Offset;
const ui32 size = parts[leafId].Size;
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
src += offset;
while (i < size) {
#pragma unroll 8
for (int k = 0; k < numStats; ++k) {
WriteThrough(dst + i + k * size, __ldg(src + i + k * lineSize));
}
i += gridDim.x * blockDim.x;
}
}
template <class T>
void CopyLeaf(const ui32 leafId, const ui32 leafSize,
const TDataPartition* parts,
const T* src,
T* dst,
ui32 numStats,
ui32 lineSize,
TCudaStream stream) {
const ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.x = (leafSize + blockSize - 1) / blockSize;
numBlocks.y = 1;
numBlocks.z = 1;
if (leafSize) {
CopyLeafImpl<T><<<numBlocks, blockSize, 0, stream>>>(leafId, parts, src, dst, numStats, lineSize);
}
}
/* this should be called before updatePartProps */
template <typename T>
__global__ void GatherLeafImpl(const ui32 leafId,
const TDataPartition* parts,
const T* src,
const ui32* map,
T* dst,
ui32 numStats,
ui64 lineSize) {
const ui32 offset = parts[leafId].Offset;
const ui32 size = parts[leafId].Size;
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
dst += offset;
while (i < size) {
const ui32 loadIdx = __ldg(map + i);
#pragma unroll 8
for (int k = 0; k < numStats; ++k) {
WriteThrough(dst + i + k * lineSize, __ldg(src + loadIdx + k * size));
}
i += gridDim.x * blockDim.x;
}
}
template <class T>
void GatherLeaf(const ui32 leafId, const ui32 leafSize,
const TDataPartition* parts,
const T* src,
const ui32* map,
T* dst,
ui32 numStats,
ui32 lineSize,
TCudaStream stream) {
const ui32 blockSize = 256;
dim3 numBlocks;
numBlocks.x = (leafSize + blockSize - 1) / blockSize;
numBlocks.y = 1;
numBlocks.z = 1;
if (leafSize) {
GatherLeafImpl<<<numBlocks, blockSize, 0, stream>>>(leafId, parts, src, map, dst, numStats, lineSize);
}
}
__global__ void UpdatePartitionsAfterSplitImpl(const ui32* leftLeaves,
const ui32* rightLeaves,
ui32 leafCount,
const bool* sortedFlags,
TDataPartition* parts,
TDataPartition* partsCpu
) {
const ui32 leftLeaf = leftLeaves[blockIdx.y];
const ui32 rightLeaf = rightLeaves[blockIdx.y];
sortedFlags += parts[leftLeaf].Offset;
const ui32 partSize = parts[leftLeaf].Size;
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const ui32 offset = parts[leftLeaf].Offset;
while (i <= partSize) {
int flag0 = i < partSize ? Ldg(sortedFlags + i) : 1;
int flag1 = i ? Ldg(sortedFlags + i - 1) : 0;
if (flag0 != flag1) {
//we are on border
TDataPartition leftPart = parts[leftLeaf];
leftPart.Size = i;
parts[leftLeaf] = leftPart;
partsCpu[leftLeaf] = leftPart;
TDataPartition rightPart = parts[rightLeaf];
rightPart.Offset = offset + i;
rightPart.Size = partSize - i;
parts[rightLeaf] = rightPart;
partsCpu[rightLeaf] = rightPart;
break;
}
i += blockDim.x * gridDim.x;
}
}
void UpdatePartitionsAfterSplit(const ui32* leftLeafs,
const ui32* rightLeafs,
ui32 leavesCount,
const bool* sortedFlag,
TDataPartition* parts,
TDataPartition* partsCpu,
TCudaStream stream) {
const ui32 blockSize = 512;
dim3 numBlocks;
numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount();
numBlocks.y = leavesCount;
numBlocks.z = 1;
if (leavesCount) {
UpdatePartitionsAfterSplitImpl<<<numBlocks, blockSize, 0, stream>>>(leftLeafs, rightLeafs, leavesCount, sortedFlag, parts, partsCpu);
}
}
__global__ void UpdatePartitionAfterSplitImpl(const ui32 leftLeaf,
const ui32 rightLeaf,
const bool* sortedFlags,
TDataPartition* parts,
TDataPartition* partsCpu
) {
const ui32 partSize = parts[leftLeaf].Size;
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const ui32 offset = parts[leftLeaf].Offset;
while (i <= partSize) {
int flag0 = i < partSize ? Ldg(sortedFlags + i) : 1;
int flag1 = i ? Ldg(sortedFlags + i - 1) : 0;
if (flag0 != flag1) {
//we are on border
TDataPartition leftPart = parts[leftLeaf];
leftPart.Size = i;
partsCpu[leftLeaf] = leftPart;
parts[leftLeaf] = leftPart;
TDataPartition rightPart = parts[rightLeaf];
rightPart.Offset = offset + i;
rightPart.Size = partSize - i;
partsCpu[rightLeaf] = rightPart;
parts[rightLeaf] = rightPart;
break;
}
i += blockDim.x * gridDim.x;
}
}
void UpdatePartitionAfterSplit(const ui32 leftLeaf,
const ui32 rightLeaf,
ui32 leafSize,
const bool* sortedFlag,
TDataPartition* parts,
TDataPartition* partsCpu,
TCudaStream stream) {
const ui32 blockSize = 512;
dim3 numBlocks;
numBlocks.x = (leafSize + blockSize - 1) / blockSize;
numBlocks.y = 1;
numBlocks.z = 1;
if (leafSize) {
UpdatePartitionAfterSplitImpl<<<numBlocks, blockSize, 0, stream>>>(leftLeaf, rightLeaf, sortedFlag, parts, partsCpu);
}
}
/*
* blockIdx.x * gridDim.x + threadIdx.x is index in leaf
* blockIdx.y is part number
* this is not time critical kernel, so we make for several blocks per SM for each leaf and just skip computations if necessary
*/
template <int N, int BlockSize>
__global__ void SplitAndMakeSequenceInLeavesImpl(const ui32* compressedIndex,
const ui32* loadIndices,
const TDataPartition* parts,
const ui32* leafIds,
const TCFeature* splitFeatures,
const ui32* splitBins,
bool* splitFlags,
ui32* indices) {
const ui32 leafId = leafIds[blockIdx.y];
TDataPartition part = Ldg(parts + leafId);
const i32 size = part.Size;
const i32 offset = part.Offset;
loadIndices += offset;
indices += offset;
splitFlags += offset;
int i = blockIdx.x * BlockSize * N + threadIdx.x;
if (i >= size) {
return;
}
TCFeature feature = splitFeatures[blockIdx.y];
const ui32 binIdx = splitBins[blockIdx.y];
const ui32 value = binIdx << feature.Shift;
const ui32 mask = feature.Mask << feature.Shift;
const bool oneHot = feature.OneHotFeature;
compressedIndex += feature.Offset;
while (i < size) {
ui32 loadIndex[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
loadIndex[k] = loadIndices ? __ldg(loadIndices + i + k * BlockSize) : i + k * BlockSize;
}
}
ui32 featureVal[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
featureVal[k] = __ldg(compressedIndex + loadIndex[k]) & mask;
}
}
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
WriteThrough(indices + i + k * BlockSize, static_cast<ui32>(i + k * BlockSize));
}
}
bool split[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
split[k] = (oneHot ? (featureVal[k] == value) : featureVal[k] > value);
}
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
WriteThrough(splitFlags + i + k * BlockSize, split[k]);
}
}
i += N * BlockSize * gridDim.x;
}
}
void SplitAndMakeSequenceInLeaves(const ui32* compressedIndex,
const ui32* loadIndices,
const TDataPartition* parts,
const ui32* leafIds,
ui32 leavesCount,
const TCFeature* splitFeatures,
const ui32* splitBins,
bool* splitFlags,
ui32* indices,
TCudaStream stream) {
if (leavesCount) {
const ui32 blockSize = 512;
const int N = 4;
dim3 numBlocks;
numBlocks.x = (leavesCount > 4 ? 2 : 4) * TArchProps::SMCount();
numBlocks.y = leavesCount;
numBlocks.z = 1;
SplitAndMakeSequenceInLeavesImpl<N, blockSize><<<numBlocks, blockSize, 0, stream>>>(compressedIndex, loadIndices, parts, leafIds, splitFeatures, splitBins, splitFlags, indices);
}
}
template <int N, int BlockSize>
__global__ void SplitAndMakeSequenceInSingleLeafImpl(const ui32* compressedIndex,
const ui32* loadIndices,
const TDataPartition* parts,
const ui32 leafId,
const TCFeature feature,
const ui32 binIdx,
bool* splitFlags,
ui32* indices) {
TDataPartition part = Ldg(parts + leafId);
const i32 size = part.Size;
const i32 offset = part.Offset;
loadIndices += offset;
const int i = blockIdx.x * BlockSize * N + threadIdx.x;
const ui32 value = binIdx << feature.Shift;
const ui32 mask = feature.Mask << feature.Shift;
const bool oneHot = feature.OneHotFeature;
compressedIndex += feature.Offset;
ui32 loadIndex[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
loadIndex[k] = __ldg(loadIndices + i + k * BlockSize);
}
}
ui32 featureVal[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
featureVal[k] = __ldg(compressedIndex + loadIndex[k]) & mask;
}
}
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
WriteThrough(indices + i + k * BlockSize, static_cast<ui32>(i + k * BlockSize));
}
}
bool split[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
split[k] = (oneHot ? (featureVal[k] == value) : featureVal[k] > value);
}
#pragma unroll
for (int k = 0; k < N; ++k) {
if (i + k * BlockSize < size) {
WriteThrough(splitFlags + i + k * BlockSize, split[k]);
}
}
}
void SplitAndMakeSequenceInLeaf(const ui32* compressedIndex,
const ui32* loadIndices,
const TDataPartition* parts,
ui32 leafId,
ui32 leafSize,
TCFeature splitFeature,
ui32 splitBin,
bool* splitFlags,
ui32* indices,
TCudaStream stream) {
const ui32 blockSize = 256;
const int N = 2;
dim3 numBlocks;
numBlocks.x = (leafSize + blockSize * N - 1) / (blockSize * N);
numBlocks.y = 1;
numBlocks.z = 1;
if (numBlocks.x) {
SplitAndMakeSequenceInSingleLeafImpl<N, blockSize> << < numBlocks, blockSize, 0, stream >>
> (compressedIndex, loadIndices, parts, leafId, splitFeature, splitBin, splitFlags, indices);
}
}
//TODO(noxoomo): cub sucks for this, write proper segmented version
void SortByFlagsInLeaves(const ui32* leavesToSplit, const ui32 leafCount,
const TDataPartition* partsCpu,
TSplitPointsContext& context,
TCudaStream stream) {
/*
* Sort leaves by flags
*/
for (ui32 i = 0; i < leafCount; ++i) {
const ui32 leafId = leavesToSplit[i];
TDataPartition part = partsCpu[leafId];
const bool* flagsSrc = context.TempFlags.Get() + part.Offset;
bool* flagsDst = context.Flags.Get() + part.Offset;
const ui32* indicesSrc = context.TempIndices.Get() + part.Offset;
ui32* indicesDst = context.Indices.Get() + part.Offset;
cudaError_t error = cub::DeviceRadixSort::SortPairs<bool, ui32>((void*)context.TempStorage.Get(),
context.TempStorageSizes[i],
flagsSrc,
flagsDst,
indicesSrc,
indicesDst,
(int)part.Size,
0,
1,
stream);
CUDA_SAFE_CALL(error);
}
}
//
void SortWithoutCub(ui32 leafId, const TDataPartition* partsCpu, TSplitPointsContext& context, TCudaStream stream) {
TDataPartition part = partsCpu[leafId];
if (part.Size) {
const bool* flagsSrc = context.TempFlags.Get();
bool* flagsDst = context.Flags.Get();
const ui32* indicesSrc = context.TempIndices.Get();
ui32* indicesDst = context.Indices.Get();
char* tempStorage = context.TempStorage.Get();
const ui64 tempOffsetsSize = sizeof(int) * part.Size;
{
using TInput = TScanBitIterator<bool>;
TInput inputIter(context.TempFlags.Get(), 0);
ui64 tempStorageSize = tempStorage ? context.TempStorageSizes[0] - tempOffsetsSize : 0;
auto scanTmp = tempStorage ? (void*)(tempStorage + tempOffsetsSize) : nullptr;
cudaError_t err = cub::DeviceScan::ExclusiveSum < TInput, int*> (scanTmp,
tempStorageSize,
inputIter,
(int*)tempStorage,
part.Size,
stream);
if (!tempStorage) {
context.TempStorageSizes[0] = tempStorageSize + tempOffsetsSize;
}
CUDA_SAFE_CALL(err);
}
if (tempStorage) {
const int blockSize = 512;
const int N = 1;
const int numBlocks = (part.Size + (N * blockSize) - 1) / (N * blockSize);
ReorderOneBitImpl<bool, ui32, N, blockSize> << < numBlocks, blockSize, 0, stream >> > (
flagsSrc,
indicesSrc,
(int*) tempStorage,
0,
flagsDst,
indicesDst,
part.Size);
}
}
}
ui32 FastSortSize() {
return 500000;
}
void SortByFlagsInLeaf(ui32 leafId,
const TDataPartition* partsCpu,
TSplitPointsContext& context,
TCudaStream stream) {
/*
* Sort leaves by flags
*/
TDataPartition part = partsCpu[leafId];
if (part.Size > FastSortSize()) {
const bool* flagsSrc = context.TempFlags.Get();
bool* flagsDst = context.Flags.Get();
const ui32* indicesSrc = context.TempIndices.Get();
ui32* indicesDst = context.Indices.Get();
cudaError_t error = cub::DeviceRadixSort::SortPairs < bool, ui32 > ((void*) context.TempStorage.Get(),
context.TempStorageSizes[0],
flagsSrc,
flagsDst,
indicesSrc,
indicesDst,
(int) part.Size,
0,
1,
stream);
CUDA_SAFE_CALL(error);
} else {
SortWithoutCub(leafId, partsCpu, context, stream);
}
}
#define TEMPL_INST(Type)\
template void CopyInLeaves<Type>(const ui32* leaves, const ui32 leavesCount, const TDataPartition* parts, const Type *src, Type *dst, ui32 numCopies, ui32 lineSize, TCudaStream stream);\
template void GatherInLeaves<Type>(const ui32* leaves, const ui32 leavesCount, const TDataPartition* parts, const Type* src, const ui32* map, Type *dst, ui32 numStats, ui32 lineSize, TCudaStream stream);\
template void GatherLeaf<Type>(const ui32 leaf, const ui32 size, const TDataPartition* parts, const Type* src, const ui32* map, Type *dst, ui32 numStats, ui32 lineSize, TCudaStream stream);\
template void CopyLeaf<Type>(const ui32 leaf, const ui32 size, const TDataPartition* parts, const Type *src, Type *dst, ui32 numCopies, ui32 lineSize, TCudaStream stream);
TEMPL_INST(ui32)
TEMPL_INST(float)
#undef TEMPL_INST
template void GatherInplaceLeqSize<12288>(const ui32* leaf, ui32 leavesCount,
const TDataPartition* parts,
const ui32* map,
float* stats, ui32 statCount,
ui64 lineSize,
ui32* indices,
TCudaStream stream);
template void GatherInplaceLeqSize<6144>(const ui32* leaf, ui32 leavesCount,
const TDataPartition* parts,
const ui32* map,
float* stats, ui32 statCount,
ui64 lineSize,
ui32* indices,
TCudaStream stream);
template void GatherInplaceLeqSize<3072>(const ui32* leaf, ui32 leavesCount,
const TDataPartition* parts,
const ui32* map,
float* stats, ui32 statCount,
ui64 lineSize,
ui32* indices,
TCudaStream stream);
template void GatherInplaceLeqSize<1024>(const ui32* leaf, ui32 leavesCount,
const TDataPartition* parts,
const ui32* map,
float* stats, ui32 statCount,
ui64 lineSize,
ui32* indices,
TCudaStream stream);
#define INPLACE_SINGLE_LEAF(Size)\
template void GatherInplaceSingleLeaf<Size>(const ui32 leaf, \
const TDataPartition* parts,\
const ui32* map,\
float* stats, ui32 statCount,\
ui64 lineSize,\
ui32* indices,\
TCudaStream stream);
INPLACE_SINGLE_LEAF(6144)
INPLACE_SINGLE_LEAF(12288)
INPLACE_SINGLE_LEAF(3072)
INPLACE_SINGLE_LEAF(1024)
}
|
3507433bad4e8c282b549939be7bc02244675306.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_GPU
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/utility.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/filters.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace surf
{
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold);
void loadOctaveConstants(int octave, int layer_rows, int layer_cols);
void bindImgTex(PtrStepSzb img);
size_t bindSumTex(PtrStepSz<unsigned int> sum);
size_t bindMaskSumTex(PtrStepSz<unsigned int> maskSum);
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayer);
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nLayers);
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter);
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures);
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures);
}
}}}
namespace cv { namespace gpu { namespace cudev
{
namespace surf
{
////////////////////////////////////////////////////////////////////////
// Global parameters
// The maximum number of features (before subpixel interpolation) that memory is reserved for.
__constant__ int c_max_candidates;
// The maximum number of features that memory is reserved for.
__constant__ int c_max_features;
// The image size.
__constant__ int c_img_rows;
__constant__ int c_img_cols;
// The number of layers.
__constant__ int c_nOctaveLayers;
// The hessian threshold.
__constant__ float c_hessianThreshold;
// The current octave.
__constant__ int c_octave;
// The current layer size.
__constant__ int c_layer_rows;
__constant__ int c_layer_cols;
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold)
{
cudaSafeCall( hipMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) );
cudaSafeCall( hipMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) );
cudaSafeCall( hipMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) );
cudaSafeCall( hipMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) );
cudaSafeCall( hipMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) );
cudaSafeCall( hipMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) );
}
void loadOctaveConstants(int octave, int layer_rows, int layer_cols)
{
cudaSafeCall( hipMemcpyToSymbol(c_octave, &octave, sizeof(octave)) );
cudaSafeCall( hipMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) );
cudaSafeCall( hipMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) );
}
////////////////////////////////////////////////////////////////////////
// Integral image texture
texture<unsigned char, 2, hipReadModeElementType> imgTex(0, hipFilterModePoint, hipAddressModeClamp);
texture<unsigned int, 2, hipReadModeElementType> sumTex(0, hipFilterModePoint, hipAddressModeClamp);
texture<unsigned int, 2, hipReadModeElementType> maskSumTex(0, hipFilterModePoint, hipAddressModeClamp);
void bindImgTex(PtrStepSzb img)
{
bindTexture(&imgTex, img);
}
size_t bindSumTex(PtrStepSz<uint> sum)
{
size_t offset;
hipChannelFormatDesc desc_sum = hipCreateChannelDesc<uint>();
cudaSafeCall( hipBindTexture2D(&offset, sumTex, sum.data, desc_sum, sum.cols, sum.rows, sum.step));
return offset / sizeof(uint);
}
size_t bindMaskSumTex(PtrStepSz<uint> maskSum)
{
size_t offset;
hipChannelFormatDesc desc_sum = hipCreateChannelDesc<uint>();
cudaSafeCall( hipBindTexture2D(&offset, maskSumTex, maskSum.data, desc_sum, maskSum.cols, maskSum.rows, maskSum.step));
return offset / sizeof(uint);
}
template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 200
typedef double real_t;
#else
typedef float real_t;
#endif
float ratio = (float)newSize / oldSize;
real_t d = 0;
#pragma unroll
for (int k = 0; k < N; ++k)
{
int dx1 = __float2int_rn(ratio * src[k][0]);
int dy1 = __float2int_rn(ratio * src[k][1]);
int dx2 = __float2int_rn(ratio * src[k][2]);
int dy2 = __float2int_rn(ratio * src[k][3]);
real_t t = 0;
t += tex2D(sumTex, x + dx1, y + dy1);
t -= tex2D(sumTex, x + dx1, y + dy2);
t -= tex2D(sumTex, x + dx2, y + dy1);
t += tex2D(sumTex, x + dx2, y + dy2);
d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1));
}
return (float)d;
}
////////////////////////////////////////////////////////////////////////
// Hessian
__constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} };
__constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} };
__constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} };
__host__ __device__ __forceinline__ int calcSize(int octave, int layer)
{
/* Wavelet size at first layer of first octave. */
const int HAAR_SIZE0 = 9;
/* Wavelet size increment between layers. This should be an even number,
such that the wavelet sizes in an octave are either all even or all odd.
This ensures that when looking for the neighbours of a sample, the layers
above and below are aligned correctly. */
const int HAAR_SIZE_INC = 6;
return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
}
__global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace)
{
// Determine the indices
const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2);
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y;
const int j = threadIdx.x + blockIdx.x * blockDim.x;
const int i = threadIdx.y + blockIdx_y * blockDim.y;
const int layer = blockIdx_z;
const int size = calcSize(c_octave, layer);
const int samples_i = 1 + ((c_img_rows - size) >> c_octave);
const int samples_j = 1 + ((c_img_cols - size) >> c_octave);
// Ignore pixels where some of the kernel is outside the image
const int margin = (size >> 1) >> c_octave;
if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j)
{
const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, (i << c_octave), (j << c_octave));
const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, (i << c_octave), (j << c_octave));
const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, (i << c_octave), (j << c_octave));
det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy;
trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy;
}
}
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayers)
{
const int min_size = calcSize(octave, 0);
const int max_samples_i = 1 + ((img_rows - min_size) >> octave);
const int max_samples_j = 1 + ((img_cols - min_size) >> octave);
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(max_samples_j, threads.x);
grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2);
hipLaunchKernelGGL(( icvCalcLayerDetAndTrace), dim3(grid), dim3(threads), 0, 0, det, trace);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// NONMAX
__constant__ float c_DM[5] = {0, 0, 9, 9, 1};
struct WithMask
{
static __device__ bool check(int sum_i, int sum_j, int size)
{
float ratio = (float)size / 9.0f;
float d = 0;
int dx1 = __float2int_rn(ratio * c_DM[0]);
int dy1 = __float2int_rn(ratio * c_DM[1]);
int dx2 = __float2int_rn(ratio * c_DM[2]);
int dy2 = __float2int_rn(ratio * c_DM[3]);
float t = 0;
t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1);
t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2);
t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1);
t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2);
d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1));
return (d >= 0.5f);
}
};
template <typename Mask>
__global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer,
unsigned int* maxCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
extern __shared__ float N9[];
// The hidx variables are the indices to the hessian buffer.
const int gridDim_y = gridDim.y / c_nOctaveLayers;
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y;
const int layer = blockIdx_z + 1;
const int size = calcSize(c_octave, layer);
// Ignore pixels without a 3x3x3 neighbourhood in the layer above
const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1;
const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1;
const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1;
// Is this thread within the hessian buffer?
const int zoff = blockDim.x * blockDim.y;
const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff;
N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
__syncthreads();
if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1)
{
float val0 = N9[localLin];
if (val0 > c_hessianThreshold)
{
// Coordinates for the start of the wavelet in the sum image. There
// is some integer division involved, so don't try to simplify this
// (cancel out sampleStep) without checking the result is the same
const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave;
if (Mask::check(sum_i, sum_j, size))
{
// Check to see if we have a max (in its 26 neighbours)
const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff]
&& val0 > N9[localLin - blockDim.x - zoff]
&& val0 > N9[localLin + 1 - blockDim.x - zoff]
&& val0 > N9[localLin - 1 - zoff]
&& val0 > N9[localLin - zoff]
&& val0 > N9[localLin + 1 - zoff]
&& val0 > N9[localLin - 1 + blockDim.x - zoff]
&& val0 > N9[localLin + blockDim.x - zoff]
&& val0 > N9[localLin + 1 + blockDim.x - zoff]
&& val0 > N9[localLin - 1 - blockDim.x]
&& val0 > N9[localLin - blockDim.x]
&& val0 > N9[localLin + 1 - blockDim.x]
&& val0 > N9[localLin - 1 ]
&& val0 > N9[localLin + 1 ]
&& val0 > N9[localLin - 1 + blockDim.x]
&& val0 > N9[localLin + blockDim.x]
&& val0 > N9[localLin + 1 + blockDim.x]
&& val0 > N9[localLin - 1 - blockDim.x + zoff]
&& val0 > N9[localLin - blockDim.x + zoff]
&& val0 > N9[localLin + 1 - blockDim.x + zoff]
&& val0 > N9[localLin - 1 + zoff]
&& val0 > N9[localLin + zoff]
&& val0 > N9[localLin + 1 + zoff]
&& val0 > N9[localLin - 1 + blockDim.x + zoff]
&& val0 > N9[localLin + blockDim.x + zoff]
&& val0 > N9[localLin + 1 + blockDim.x + zoff]
;
if(condmax)
{
unsigned int ind = atomicInc(maxCounter,(unsigned int) -1);
if (ind < c_max_candidates)
{
const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]);
maxPosBuffer[ind] = make_int4(j, i, layer, laplacian);
}
}
}
}
}
#endif
}
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers)
{
const int layer_rows = img_rows >> octave;
const int layer_cols = img_cols >> octave;
const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1;
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2);
grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers;
const size_t smem_size = threads.x * threads.y * 3 * sizeof(float);
if (use_mask)
hipLaunchKernelGGL(( icvFindMaximaInLayer<WithMask>), dim3(grid), dim3(threads), smem_size, 0, det, trace, maxPosBuffer, maxCounter);
else
hipLaunchKernelGGL(( icvFindMaximaInLayer<WithOutMask>), dim3(grid), dim3(threads), smem_size, 0, det, trace, maxPosBuffer, maxCounter);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// INTERPOLATION
__global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
const int4 maxPos = maxPosBuffer[blockIdx.x];
const int j = maxPos.x - 1 + threadIdx.x;
const int i = maxPos.y - 1 + threadIdx.y;
const int layer = maxPos.z - 1 + threadIdx.z;
__shared__ float N9[3][3][3];
N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j];
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)
{
__shared__ float dD[3];
//dx
dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]);
//dy
dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]);
//ds
dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]);
__shared__ float H[3][3];
//dxx
H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2];
//dxy
H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]);
//dxs
H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]);
//dyx = dxy
H[1][0] = H[0][1];
//dyy
H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1];
//dys
H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]);
//dsx = dxs
H[2][0] = H[0][2];
//dsy = dys
H[2][1] = H[1][2];
//dss
H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1];
__shared__ float x[3];
if (solve3x3(H, dD, x))
{
if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f)
{
// if the step is within the interpolation region, perform it
const int size = calcSize(c_octave, maxPos.z);
const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave;
const float center_i = sum_i + (float)(size - 1) / 2;
const float center_j = sum_j + (float)(size - 1) / 2;
const float px = center_j + x[0] * (1 << c_octave);
const float py = center_i + x[1] * (1 << c_octave);
const int ds = size - calcSize(c_octave, maxPos.z - 1);
const float psize = roundf(size + x[2] * ds);
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = psize * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size)
{
// Get a new feature index.
unsigned int ind = atomicInc(featureCounter, (unsigned int)-1);
if (ind < c_max_features)
{
featureX[ind] = px;
featureY[ind] = py;
featureLaplacian[ind] = maxPos.w;
featureOctave[ind] = c_octave;
featureSize[ind] = psize;
featureHessian[ind] = N9[1][1][1];
}
} // grad_wav_size check
} // If the subpixel interpolation worked
}
} // If this is thread 0.
#endif
}
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
dim3 threads;
threads.x = 3;
threads.y = 3;
threads.z = 3;
dim3 grid;
grid.x = maxCounter;
hipLaunchKernelGGL(( icvInterpolateKeypoint), dim3(grid), dim3(threads), 0, 0, det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Orientation
#define ORI_SEARCH_INC 5
#define ORI_WIN 60
#define ORI_SAMPLES 113
__constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6};
__constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0};
__constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f};
__constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};
__constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};
__global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir)
{
__shared__ float s_X[128];
__shared__ float s_Y[128];
__shared__ float s_angle[128];
__shared__ float s_sumx[32 * 4];
__shared__ float s_sumy[32 * 4];
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size)
return;
// Calc X, Y, angle and store it to shared memory
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
float X = 0.0f, Y = 0.0f, angle = 0.0f;
if (tid < ORI_SAMPLES)
{
const float margin = (float)(grad_wav_size - 1) / 2.0f;
const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin);
const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin);
if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size &&
x >= 0 && x < (c_img_cols + 1) - grad_wav_size)
{
X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x);
Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x);
angle = atan2f(Y, X);
if (angle < 0)
angle += 2.0f * CV_PI_F;
angle *= 180.0f / CV_PI_F;
}
}
s_X[tid] = X;
s_Y[tid] = Y;
s_angle[tid] = angle;
__syncthreads();
float bestx = 0, besty = 0, best_mod = 0;
#if __CUDA_ARCH__ >= 200
#pragma unroll
#endif
for (int i = 0; i < 18; ++i)
{
const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC;
float sumx = 0.0f, sumy = 0.0f;
int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx = s_X[threadIdx.x];
sumy = s_Y[threadIdx.x];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 32];
sumy += s_Y[threadIdx.x + 32];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 64];
sumy += s_Y[threadIdx.x + 64];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 96];
sumy += s_Y[threadIdx.x + 96];
}
plus<float> op;
cudev::reduce<32>(smem_tuple(s_sumx + threadIdx.y * 32, s_sumy + threadIdx.y * 32),
thrust::tie(sumx, sumy), threadIdx.x, thrust::make_tuple(op, op));
const float temp_mod = sumx * sumx + sumy * sumy;
if (temp_mod > best_mod)
{
best_mod = temp_mod;
bestx = sumx;
besty = sumy;
}
__syncthreads();
}
if (threadIdx.x == 0)
{
s_X[threadIdx.y] = bestx;
s_Y[threadIdx.y] = besty;
s_angle[threadIdx.y] = best_mod;
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0)
{
int bestIdx = 0;
if (s_angle[1] > s_angle[bestIdx])
bestIdx = 1;
if (s_angle[2] > s_angle[bestIdx])
bestIdx = 2;
if (s_angle[3] > s_angle[bestIdx])
bestIdx = 3;
float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]);
if (kp_dir < 0)
kp_dir += 2.0f * CV_PI_F;
kp_dir *= 180.0f / CV_PI_F;
kp_dir = 360.0f - kp_dir;
if (::fabsf(kp_dir - 360.f) < numeric_limits<float>::epsilon())
kp_dir = 0.f;
featureDir[blockIdx.x] = kp_dir;
}
}
#undef ORI_SEARCH_INC
#undef ORI_WIN
#undef ORI_SAMPLES
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures)
{
dim3 threads;
threads.x = 32;
threads.y = 4;
dim3 grid;
grid.x = nFeatures;
hipLaunchKernelGGL(( icvCalcOrientation), dim3(grid), dim3(threads), 0, 0, featureX, featureY, featureSize, featureDir);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Descriptors
#define PATCH_SZ 20
__constant__ float c_DW[PATCH_SZ * PATCH_SZ] =
{
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f
};
struct WinReader
{
typedef uchar elem_type;
__device__ __forceinline__ uchar operator ()(int i, int j) const
{
float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir;
float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir;
return tex2D(imgTex, pixel_x, pixel_y);
}
float centerX;
float centerY;
float win_offset;
float cos_dir;
float sin_dir;
int width;
int height;
};
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy);
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy)
{
__shared__ float s_PATCH[PATCH_SZ + 1][PATCH_SZ + 1];
dx = dy = 0.0f;
WinReader win;
win.centerX = featureX[blockIdx.x];
win.centerY = featureY[blockIdx.x];
// The sampling intervals and wavelet sized for selecting an orientation
// and building the keypoint descriptor are defined relative to 's'
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
// Extract a window of pixels around the keypoint of size 20s
const int win_size = (int)((PATCH_SZ + 1) * s);
win.width = win.height = win_size;
// Nearest neighbour version (faster)
win.win_offset = -(win_size - 1.0f) / 2.0f;
float descriptor_dir = 360.0f - featureDir[blockIdx.x];
if (::fabsf(descriptor_dir - 360.f) < numeric_limits<float>::epsilon())
descriptor_dir = 0.f;
descriptor_dir *= CV_PI_F / 180.0f;
sincosf(descriptor_dir, &win.sin_dir, &win.cos_dir);
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int xLoadInd = tid % (PATCH_SZ + 1);
const int yLoadInd = tid / (PATCH_SZ + 1);
if (yLoadInd < (PATCH_SZ + 1))
{
if (s > 1)
{
AreaFilter<WinReader> filter(win, s, s);
s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd, xLoadInd);
}
else
{
LinearFilter<WinReader> filter(win);
s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd * s, xLoadInd * s);
}
}
__syncthreads();
const int xPatchInd = threadIdx.x % 5;
const int yPatchInd = threadIdx.x / 5;
if (yPatchInd < 5)
{
const int xBlockInd = threadIdx.y % 4;
const int yBlockInd = threadIdx.y / 4;
const int xInd = xBlockInd * 5 + xPatchInd;
const int yInd = yBlockInd * 5 + yPatchInd;
const float dw = c_DW[yInd * PATCH_SZ + xInd];
dx = (s_PATCH[yInd ][xInd + 1] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd + 1][xInd ]) * dw;
dy = (s_PATCH[yInd + 1][xInd ] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd ][xInd + 1]) * dw;
}
}
__global__ void compute_descriptors_64(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float smem[32 * 16];
float* sRow = smem + threadIdx.y * 32;
float dx, dy;
calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
float dxabs = ::fabsf(dx);
float dyabs = ::fabsf(dy);
plus<float> op;
reduce<32>(sRow, dx, threadIdx.x, op);
reduce<32>(sRow, dy, threadIdx.x, op);
reduce<32>(sRow, dxabs, threadIdx.x, op);
reduce<32>(sRow, dyabs, threadIdx.x, op);
float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y;
// write dx, dy, |dx|, |dy|
if (threadIdx.x == 0)
*descriptors_block = make_float4(dx, dy, dxabs, dyabs);
}
__global__ void compute_descriptors_128(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float smem[32 * 16];
float* sRow = smem + threadIdx.y * 32;
float dx, dy;
calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y * 2;
plus<float> op;
float d1 = 0.0f;
float d2 = 0.0f;
float abs1 = 0.0f;
float abs2 = 0.0f;
if (dy >= 0)
{
d1 = dx;
abs1 = ::fabsf(dx);
}
else
{
d2 = dx;
abs2 = ::fabsf(dx);
}
reduce<32>(sRow, d1, threadIdx.x, op);
reduce<32>(sRow, d2, threadIdx.x, op);
reduce<32>(sRow, abs1, threadIdx.x, op);
reduce<32>(sRow, abs2, threadIdx.x, op);
// write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0)
if (threadIdx.x == 0)
descriptors_block[0] = make_float4(d1, abs1, d2, abs2);
if (dx >= 0)
{
d1 = dy;
abs1 = ::fabsf(dy);
d2 = 0.0f;
abs2 = 0.0f;
}
else
{
d1 = 0.0f;
abs1 = 0.0f;
d2 = dy;
abs2 = ::fabsf(dy);
}
reduce<32>(sRow, d1, threadIdx.x, op);
reduce<32>(sRow, d2, threadIdx.x, op);
reduce<32>(sRow, abs1, threadIdx.x, op);
reduce<32>(sRow, abs2, threadIdx.x, op);
// write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0)
if (threadIdx.x == 0)
descriptors_block[1] = make_float4(d1, abs1, d2, abs2);
}
template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors)
{
__shared__ float smem[BLOCK_DIM_X];
__shared__ float s_len;
// no need for thread ID
float* descriptor_base = descriptors.ptr(blockIdx.x);
// read in the unnormalized descriptor values (squared)
const float val = descriptor_base[threadIdx.x];
float len = val * val;
reduce<BLOCK_DIM_X>(smem, len, threadIdx.x, plus<float>());
if (threadIdx.x == 0)
s_len = ::sqrtf(len);
__syncthreads();
// normalize and store in output
descriptor_base[threadIdx.x] = val / s_len;
}
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures)
{
// compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D
if (descriptors.cols == 64)
{
hipLaunchKernelGGL(( compute_descriptors_64), dim3(nFeatures), dim3(dim3(32, 16)), 0, 0, descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( normalize_descriptors<64>), dim3(nFeatures), dim3(64), 0, 0, (PtrStepSzf) descriptors);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
else
{
hipLaunchKernelGGL(( compute_descriptors_128), dim3(nFeatures), dim3(dim3(32, 16)), 0, 0, descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( normalize_descriptors<128>), dim3(nFeatures), dim3(128), 0, 0, (PtrStepSzf) descriptors);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
}
} // namespace surf
}}} // namespace cv { namespace gpu { namespace cudev
#endif /* CUDA_DISABLER */
| 3507433bad4e8c282b549939be7bc02244675306.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_GPU
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/utility.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/filters.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace surf
{
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold);
void loadOctaveConstants(int octave, int layer_rows, int layer_cols);
void bindImgTex(PtrStepSzb img);
size_t bindSumTex(PtrStepSz<unsigned int> sum);
size_t bindMaskSumTex(PtrStepSz<unsigned int> maskSum);
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayer);
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nLayers);
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter);
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures);
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures);
}
}}}
namespace cv { namespace gpu { namespace cudev
{
namespace surf
{
////////////////////////////////////////////////////////////////////////
// Global parameters
// The maximum number of features (before subpixel interpolation) that memory is reserved for.
__constant__ int c_max_candidates;
// The maximum number of features that memory is reserved for.
__constant__ int c_max_features;
// The image size.
__constant__ int c_img_rows;
__constant__ int c_img_cols;
// The number of layers.
__constant__ int c_nOctaveLayers;
// The hessian threshold.
__constant__ float c_hessianThreshold;
// The current octave.
__constant__ int c_octave;
// The current layer size.
__constant__ int c_layer_rows;
__constant__ int c_layer_cols;
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold)
{
cudaSafeCall( cudaMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) );
cudaSafeCall( cudaMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) );
cudaSafeCall( cudaMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) );
cudaSafeCall( cudaMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) );
cudaSafeCall( cudaMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) );
cudaSafeCall( cudaMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) );
}
void loadOctaveConstants(int octave, int layer_rows, int layer_cols)
{
cudaSafeCall( cudaMemcpyToSymbol(c_octave, &octave, sizeof(octave)) );
cudaSafeCall( cudaMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) );
cudaSafeCall( cudaMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) );
}
////////////////////////////////////////////////////////////////////////
// Integral image texture
texture<unsigned char, 2, cudaReadModeElementType> imgTex(0, cudaFilterModePoint, cudaAddressModeClamp);
texture<unsigned int, 2, cudaReadModeElementType> sumTex(0, cudaFilterModePoint, cudaAddressModeClamp);
texture<unsigned int, 2, cudaReadModeElementType> maskSumTex(0, cudaFilterModePoint, cudaAddressModeClamp);
void bindImgTex(PtrStepSzb img)
{
bindTexture(&imgTex, img);
}
size_t bindSumTex(PtrStepSz<uint> sum)
{
size_t offset;
cudaChannelFormatDesc desc_sum = cudaCreateChannelDesc<uint>();
cudaSafeCall( cudaBindTexture2D(&offset, sumTex, sum.data, desc_sum, sum.cols, sum.rows, sum.step));
return offset / sizeof(uint);
}
size_t bindMaskSumTex(PtrStepSz<uint> maskSum)
{
size_t offset;
cudaChannelFormatDesc desc_sum = cudaCreateChannelDesc<uint>();
cudaSafeCall( cudaBindTexture2D(&offset, maskSumTex, maskSum.data, desc_sum, maskSum.cols, maskSum.rows, maskSum.step));
return offset / sizeof(uint);
}
template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 200
typedef double real_t;
#else
typedef float real_t;
#endif
float ratio = (float)newSize / oldSize;
real_t d = 0;
#pragma unroll
for (int k = 0; k < N; ++k)
{
int dx1 = __float2int_rn(ratio * src[k][0]);
int dy1 = __float2int_rn(ratio * src[k][1]);
int dx2 = __float2int_rn(ratio * src[k][2]);
int dy2 = __float2int_rn(ratio * src[k][3]);
real_t t = 0;
t += tex2D(sumTex, x + dx1, y + dy1);
t -= tex2D(sumTex, x + dx1, y + dy2);
t -= tex2D(sumTex, x + dx2, y + dy1);
t += tex2D(sumTex, x + dx2, y + dy2);
d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1));
}
return (float)d;
}
////////////////////////////////////////////////////////////////////////
// Hessian
__constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} };
__constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} };
__constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} };
__host__ __device__ __forceinline__ int calcSize(int octave, int layer)
{
/* Wavelet size at first layer of first octave. */
const int HAAR_SIZE0 = 9;
/* Wavelet size increment between layers. This should be an even number,
such that the wavelet sizes in an octave are either all even or all odd.
This ensures that when looking for the neighbours of a sample, the layers
above and below are aligned correctly. */
const int HAAR_SIZE_INC = 6;
return (HAAR_SIZE0 + HAAR_SIZE_INC * layer) << octave;
}
__global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace)
{
// Determine the indices
const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2);
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y;
const int j = threadIdx.x + blockIdx.x * blockDim.x;
const int i = threadIdx.y + blockIdx_y * blockDim.y;
const int layer = blockIdx_z;
const int size = calcSize(c_octave, layer);
const int samples_i = 1 + ((c_img_rows - size) >> c_octave);
const int samples_j = 1 + ((c_img_cols - size) >> c_octave);
// Ignore pixels where some of the kernel is outside the image
const int margin = (size >> 1) >> c_octave;
if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j)
{
const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, (i << c_octave), (j << c_octave));
const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, (i << c_octave), (j << c_octave));
const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, (i << c_octave), (j << c_octave));
det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy;
trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy;
}
}
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayers)
{
const int min_size = calcSize(octave, 0);
const int max_samples_i = 1 + ((img_rows - min_size) >> octave);
const int max_samples_j = 1 + ((img_cols - min_size) >> octave);
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(max_samples_j, threads.x);
grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2);
icvCalcLayerDetAndTrace<<<grid, threads>>>(det, trace);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// NONMAX
__constant__ float c_DM[5] = {0, 0, 9, 9, 1};
struct WithMask
{
static __device__ bool check(int sum_i, int sum_j, int size)
{
float ratio = (float)size / 9.0f;
float d = 0;
int dx1 = __float2int_rn(ratio * c_DM[0]);
int dy1 = __float2int_rn(ratio * c_DM[1]);
int dx2 = __float2int_rn(ratio * c_DM[2]);
int dy2 = __float2int_rn(ratio * c_DM[3]);
float t = 0;
t += tex2D(maskSumTex, sum_j + dx1, sum_i + dy1);
t -= tex2D(maskSumTex, sum_j + dx1, sum_i + dy2);
t -= tex2D(maskSumTex, sum_j + dx2, sum_i + dy1);
t += tex2D(maskSumTex, sum_j + dx2, sum_i + dy2);
d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1));
return (d >= 0.5f);
}
};
template <typename Mask>
__global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer,
unsigned int* maxCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
extern __shared__ float N9[];
// The hidx variables are the indices to the hessian buffer.
const int gridDim_y = gridDim.y / c_nOctaveLayers;
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y;
const int layer = blockIdx_z + 1;
const int size = calcSize(c_octave, layer);
// Ignore pixels without a 3x3x3 neighbourhood in the layer above
const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1;
const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1;
const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1;
// Is this thread within the hessian buffer?
const int zoff = blockDim.x * blockDim.y;
const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff;
N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
__syncthreads();
if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1)
{
float val0 = N9[localLin];
if (val0 > c_hessianThreshold)
{
// Coordinates for the start of the wavelet in the sum image. There
// is some integer division involved, so don't try to simplify this
// (cancel out sampleStep) without checking the result is the same
const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave;
if (Mask::check(sum_i, sum_j, size))
{
// Check to see if we have a max (in its 26 neighbours)
const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff]
&& val0 > N9[localLin - blockDim.x - zoff]
&& val0 > N9[localLin + 1 - blockDim.x - zoff]
&& val0 > N9[localLin - 1 - zoff]
&& val0 > N9[localLin - zoff]
&& val0 > N9[localLin + 1 - zoff]
&& val0 > N9[localLin - 1 + blockDim.x - zoff]
&& val0 > N9[localLin + blockDim.x - zoff]
&& val0 > N9[localLin + 1 + blockDim.x - zoff]
&& val0 > N9[localLin - 1 - blockDim.x]
&& val0 > N9[localLin - blockDim.x]
&& val0 > N9[localLin + 1 - blockDim.x]
&& val0 > N9[localLin - 1 ]
&& val0 > N9[localLin + 1 ]
&& val0 > N9[localLin - 1 + blockDim.x]
&& val0 > N9[localLin + blockDim.x]
&& val0 > N9[localLin + 1 + blockDim.x]
&& val0 > N9[localLin - 1 - blockDim.x + zoff]
&& val0 > N9[localLin - blockDim.x + zoff]
&& val0 > N9[localLin + 1 - blockDim.x + zoff]
&& val0 > N9[localLin - 1 + zoff]
&& val0 > N9[localLin + zoff]
&& val0 > N9[localLin + 1 + zoff]
&& val0 > N9[localLin - 1 + blockDim.x + zoff]
&& val0 > N9[localLin + blockDim.x + zoff]
&& val0 > N9[localLin + 1 + blockDim.x + zoff]
;
if(condmax)
{
unsigned int ind = atomicInc(maxCounter,(unsigned int) -1);
if (ind < c_max_candidates)
{
const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]);
maxPosBuffer[ind] = make_int4(j, i, layer, laplacian);
}
}
}
}
}
#endif
}
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers)
{
const int layer_rows = img_rows >> octave;
const int layer_cols = img_cols >> octave;
const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1;
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2);
grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers;
const size_t smem_size = threads.x * threads.y * 3 * sizeof(float);
if (use_mask)
icvFindMaximaInLayer<WithMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter);
else
icvFindMaximaInLayer<WithOutMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// INTERPOLATION
__global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
#if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110
const int4 maxPos = maxPosBuffer[blockIdx.x];
const int j = maxPos.x - 1 + threadIdx.x;
const int i = maxPos.y - 1 + threadIdx.y;
const int layer = maxPos.z - 1 + threadIdx.z;
__shared__ float N9[3][3][3];
N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j];
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)
{
__shared__ float dD[3];
//dx
dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]);
//dy
dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]);
//ds
dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]);
__shared__ float H[3][3];
//dxx
H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2];
//dxy
H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]);
//dxs
H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]);
//dyx = dxy
H[1][0] = H[0][1];
//dyy
H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1];
//dys
H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]);
//dsx = dxs
H[2][0] = H[0][2];
//dsy = dys
H[2][1] = H[1][2];
//dss
H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1];
__shared__ float x[3];
if (solve3x3(H, dD, x))
{
if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f)
{
// if the step is within the interpolation region, perform it
const int size = calcSize(c_octave, maxPos.z);
const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave;
const float center_i = sum_i + (float)(size - 1) / 2;
const float center_j = sum_j + (float)(size - 1) / 2;
const float px = center_j + x[0] * (1 << c_octave);
const float py = center_i + x[1] * (1 << c_octave);
const int ds = size - calcSize(c_octave, maxPos.z - 1);
const float psize = roundf(size + x[2] * ds);
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = psize * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size)
{
// Get a new feature index.
unsigned int ind = atomicInc(featureCounter, (unsigned int)-1);
if (ind < c_max_features)
{
featureX[ind] = px;
featureY[ind] = py;
featureLaplacian[ind] = maxPos.w;
featureOctave[ind] = c_octave;
featureSize[ind] = psize;
featureHessian[ind] = N9[1][1][1];
}
} // grad_wav_size check
} // If the subpixel interpolation worked
}
} // If this is thread 0.
#endif
}
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
dim3 threads;
threads.x = 3;
threads.y = 3;
threads.z = 3;
dim3 grid;
grid.x = maxCounter;
icvInterpolateKeypoint<<<grid, threads>>>(det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Orientation
#define ORI_SEARCH_INC 5
#define ORI_WIN 60
#define ORI_SAMPLES 113
__constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6};
__constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0};
__constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f};
__constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};
__constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};
__global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir)
{
__shared__ float s_X[128];
__shared__ float s_Y[128];
__shared__ float s_angle[128];
__shared__ float s_sumx[32 * 4];
__shared__ float s_sumy[32 * 4];
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size)
return;
// Calc X, Y, angle and store it to shared memory
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
float X = 0.0f, Y = 0.0f, angle = 0.0f;
if (tid < ORI_SAMPLES)
{
const float margin = (float)(grad_wav_size - 1) / 2.0f;
const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin);
const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin);
if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size &&
x >= 0 && x < (c_img_cols + 1) - grad_wav_size)
{
X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x);
Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x);
angle = atan2f(Y, X);
if (angle < 0)
angle += 2.0f * CV_PI_F;
angle *= 180.0f / CV_PI_F;
}
}
s_X[tid] = X;
s_Y[tid] = Y;
s_angle[tid] = angle;
__syncthreads();
float bestx = 0, besty = 0, best_mod = 0;
#if __CUDA_ARCH__ >= 200
#pragma unroll
#endif
for (int i = 0; i < 18; ++i)
{
const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC;
float sumx = 0.0f, sumy = 0.0f;
int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx = s_X[threadIdx.x];
sumy = s_Y[threadIdx.x];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 32];
sumy += s_Y[threadIdx.x + 32];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 64];
sumy += s_Y[threadIdx.x + 64];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 96];
sumy += s_Y[threadIdx.x + 96];
}
plus<float> op;
cudev::reduce<32>(smem_tuple(s_sumx + threadIdx.y * 32, s_sumy + threadIdx.y * 32),
thrust::tie(sumx, sumy), threadIdx.x, thrust::make_tuple(op, op));
const float temp_mod = sumx * sumx + sumy * sumy;
if (temp_mod > best_mod)
{
best_mod = temp_mod;
bestx = sumx;
besty = sumy;
}
__syncthreads();
}
if (threadIdx.x == 0)
{
s_X[threadIdx.y] = bestx;
s_Y[threadIdx.y] = besty;
s_angle[threadIdx.y] = best_mod;
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0)
{
int bestIdx = 0;
if (s_angle[1] > s_angle[bestIdx])
bestIdx = 1;
if (s_angle[2] > s_angle[bestIdx])
bestIdx = 2;
if (s_angle[3] > s_angle[bestIdx])
bestIdx = 3;
float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]);
if (kp_dir < 0)
kp_dir += 2.0f * CV_PI_F;
kp_dir *= 180.0f / CV_PI_F;
kp_dir = 360.0f - kp_dir;
if (::fabsf(kp_dir - 360.f) < numeric_limits<float>::epsilon())
kp_dir = 0.f;
featureDir[blockIdx.x] = kp_dir;
}
}
#undef ORI_SEARCH_INC
#undef ORI_WIN
#undef ORI_SAMPLES
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures)
{
dim3 threads;
threads.x = 32;
threads.y = 4;
dim3 grid;
grid.x = nFeatures;
icvCalcOrientation<<<grid, threads>>>(featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Descriptors
#define PATCH_SZ 20
__constant__ float c_DW[PATCH_SZ * PATCH_SZ] =
{
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f
};
struct WinReader
{
typedef uchar elem_type;
__device__ __forceinline__ uchar operator ()(int i, int j) const
{
float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir;
float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir;
return tex2D(imgTex, pixel_x, pixel_y);
}
float centerX;
float centerY;
float win_offset;
float cos_dir;
float sin_dir;
int width;
int height;
};
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy);
__device__ void calc_dx_dy(const float* featureX, const float* featureY, const float* featureSize, const float* featureDir,
float& dx, float& dy)
{
__shared__ float s_PATCH[PATCH_SZ + 1][PATCH_SZ + 1];
dx = dy = 0.0f;
WinReader win;
win.centerX = featureX[blockIdx.x];
win.centerY = featureY[blockIdx.x];
// The sampling intervals and wavelet sized for selecting an orientation
// and building the keypoint descriptor are defined relative to 's'
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
// Extract a window of pixels around the keypoint of size 20s
const int win_size = (int)((PATCH_SZ + 1) * s);
win.width = win.height = win_size;
// Nearest neighbour version (faster)
win.win_offset = -(win_size - 1.0f) / 2.0f;
float descriptor_dir = 360.0f - featureDir[blockIdx.x];
if (::fabsf(descriptor_dir - 360.f) < numeric_limits<float>::epsilon())
descriptor_dir = 0.f;
descriptor_dir *= CV_PI_F / 180.0f;
sincosf(descriptor_dir, &win.sin_dir, &win.cos_dir);
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int xLoadInd = tid % (PATCH_SZ + 1);
const int yLoadInd = tid / (PATCH_SZ + 1);
if (yLoadInd < (PATCH_SZ + 1))
{
if (s > 1)
{
AreaFilter<WinReader> filter(win, s, s);
s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd, xLoadInd);
}
else
{
LinearFilter<WinReader> filter(win);
s_PATCH[yLoadInd][xLoadInd] = filter(yLoadInd * s, xLoadInd * s);
}
}
__syncthreads();
const int xPatchInd = threadIdx.x % 5;
const int yPatchInd = threadIdx.x / 5;
if (yPatchInd < 5)
{
const int xBlockInd = threadIdx.y % 4;
const int yBlockInd = threadIdx.y / 4;
const int xInd = xBlockInd * 5 + xPatchInd;
const int yInd = yBlockInd * 5 + yPatchInd;
const float dw = c_DW[yInd * PATCH_SZ + xInd];
dx = (s_PATCH[yInd ][xInd + 1] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd + 1][xInd ]) * dw;
dy = (s_PATCH[yInd + 1][xInd ] - s_PATCH[yInd][xInd] + s_PATCH[yInd + 1][xInd + 1] - s_PATCH[yInd ][xInd + 1]) * dw;
}
}
__global__ void compute_descriptors_64(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float smem[32 * 16];
float* sRow = smem + threadIdx.y * 32;
float dx, dy;
calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
float dxabs = ::fabsf(dx);
float dyabs = ::fabsf(dy);
plus<float> op;
reduce<32>(sRow, dx, threadIdx.x, op);
reduce<32>(sRow, dy, threadIdx.x, op);
reduce<32>(sRow, dxabs, threadIdx.x, op);
reduce<32>(sRow, dyabs, threadIdx.x, op);
float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y;
// write dx, dy, |dx|, |dy|
if (threadIdx.x == 0)
*descriptors_block = make_float4(dx, dy, dxabs, dyabs);
}
__global__ void compute_descriptors_128(PtrStep<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float smem[32 * 16];
float* sRow = smem + threadIdx.y * 32;
float dx, dy;
calc_dx_dy(featureX, featureY, featureSize, featureDir, dx, dy);
float4* descriptors_block = descriptors.ptr(blockIdx.x) + threadIdx.y * 2;
plus<float> op;
float d1 = 0.0f;
float d2 = 0.0f;
float abs1 = 0.0f;
float abs2 = 0.0f;
if (dy >= 0)
{
d1 = dx;
abs1 = ::fabsf(dx);
}
else
{
d2 = dx;
abs2 = ::fabsf(dx);
}
reduce<32>(sRow, d1, threadIdx.x, op);
reduce<32>(sRow, d2, threadIdx.x, op);
reduce<32>(sRow, abs1, threadIdx.x, op);
reduce<32>(sRow, abs2, threadIdx.x, op);
// write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0)
if (threadIdx.x == 0)
descriptors_block[0] = make_float4(d1, abs1, d2, abs2);
if (dx >= 0)
{
d1 = dy;
abs1 = ::fabsf(dy);
d2 = 0.0f;
abs2 = 0.0f;
}
else
{
d1 = 0.0f;
abs1 = 0.0f;
d2 = dy;
abs2 = ::fabsf(dy);
}
reduce<32>(sRow, d1, threadIdx.x, op);
reduce<32>(sRow, d2, threadIdx.x, op);
reduce<32>(sRow, abs1, threadIdx.x, op);
reduce<32>(sRow, abs2, threadIdx.x, op);
// write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0)
if (threadIdx.x == 0)
descriptors_block[1] = make_float4(d1, abs1, d2, abs2);
}
template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors)
{
__shared__ float smem[BLOCK_DIM_X];
__shared__ float s_len;
// no need for thread ID
float* descriptor_base = descriptors.ptr(blockIdx.x);
// read in the unnormalized descriptor values (squared)
const float val = descriptor_base[threadIdx.x];
float len = val * val;
reduce<BLOCK_DIM_X>(smem, len, threadIdx.x, plus<float>());
if (threadIdx.x == 0)
s_len = ::sqrtf(len);
__syncthreads();
// normalize and store in output
descriptor_base[threadIdx.x] = val / s_len;
}
void compute_descriptors_gpu(PtrStepSz<float4> descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures)
{
// compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D
if (descriptors.cols == 64)
{
compute_descriptors_64<<<nFeatures, dim3(32, 16)>>>(descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
normalize_descriptors<64><<<nFeatures, 64>>>((PtrStepSzf) descriptors);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
else
{
compute_descriptors_128<<<nFeatures, dim3(32, 16)>>>(descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
normalize_descriptors<128><<<nFeatures, 128>>>((PtrStepSzf) descriptors);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
}
} // namespace surf
}}} // namespace cv { namespace gpu { namespace cudev
#endif /* CUDA_DISABLER */
|
9d59c4480bdc4ae6b05229a4858869abd2b84581.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ComputePhiMag_GPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *phiR = NULL;
hipMalloc(&phiR, XSIZE*YSIZE);
float *phiI = NULL;
hipMalloc(&phiI, XSIZE*YSIZE);
float *phiMag = NULL;
hipMalloc(&phiMag, XSIZE*YSIZE);
int numK = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ComputePhiMag_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, phiR,phiI,phiMag,numK);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ComputePhiMag_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, phiR,phiI,phiMag,numK);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ComputePhiMag_GPU), dim3(gridBlock),dim3(threadBlock), 0, 0, phiR,phiI,phiMag,numK);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9d59c4480bdc4ae6b05229a4858869abd2b84581.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ComputePhiMag_GPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *phiR = NULL;
cudaMalloc(&phiR, XSIZE*YSIZE);
float *phiI = NULL;
cudaMalloc(&phiI, XSIZE*YSIZE);
float *phiMag = NULL;
cudaMalloc(&phiMag, XSIZE*YSIZE);
int numK = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ComputePhiMag_GPU<<<gridBlock,threadBlock>>>(phiR,phiI,phiMag,numK);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ComputePhiMag_GPU<<<gridBlock,threadBlock>>>(phiR,phiI,phiMag,numK);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ComputePhiMag_GPU<<<gridBlock,threadBlock>>>(phiR,phiI,phiMag,numK);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bc4d8242b6e6b79b1a2eed1187c5bd195c26c892.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/UpSample.cuh>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void upsample_linear1d_out_frame(
const int n,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 3> idata,
PackedTensorAccessor64<scalar_t, 3> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int width1 = idata.size(2);
const int width2 = odata.size(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = idata[n][c][w1];
odata[n][c][w2] = val;
}
}
return;
}
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const accscalar_t val =
w0lambda * idata[n][c][w1] + w1lambda * idata[n][c][w1 + w1p];
odata[n][c][w2] = static_cast<scalar_t>(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void upsample_linear1d_out_frame_backward(
const int n,
const accscalar_t rwidth,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 3> idata,
const PackedTensorAccessor64<scalar_t, 3> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int width1 = idata.size(2);
const int width2 = odata.size(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][w1];
idata[n][c][w2] = val;
}
}
return;
}
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t d2val = odata[n][c][w2];
atomicAdd(&idata[n][c][w1], static_cast<scalar_t>(w0lambda * d2val));
atomicAdd(
&idata[n][c][w1 + w1p], static_cast<scalar_t>(w1lambda * d2val));
}
}
}
}
static void upsample_linear1d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
double scales_1) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_linear1d_out_cuda", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 1,
"It is expected output_size equals to 1, but got size ",
output_size.size());
int output_width = output_size[0];
int nbatch = input.size(0);
int channels = input.size(1);
int input_width = input.size(2);
upsample_1d_shape_check(
input, Tensor(), nbatch, channels, input_width, output_width);
output.resize_({input.size(0), input.size(1), output_width});
output.zero_();
AT_ASSERT(input_width > 0 && output_width > 0);
const int num_kernels = output_width;
const int num_threads =
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_linear1d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 3>();
auto odata = output.packed_accessor64<scalar_t, 3>();
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_1);
hipLaunchKernelGGL(( upsample_linear1d_out_frame<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_kernels, num_threads)),
dim3(num_threads),
0,
stream, num_kernels, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
static void upsample_linear1d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
double scales_1) {
TensorArg grad_output_arg{grad_output_, "grad_output_", 1},
grad_input_arg{grad_input, "grad_input", 2};
checkAllSameGPU(
"upsample_linear1d_backward_out_cuda", {grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 1,
"It is expected output_size equals to 1, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 3,
"It is expected input_size equals to 3, but got size ",
input_size.size());
int output_width = output_size[0];
int nbatch = input_size[0];
int channels = input_size[1];
int input_width = input_size[2];
upsample_1d_shape_check(
Tensor(), grad_output_, nbatch, channels, input_width, output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_width});
grad_input.zero_();
const int num_kernels = output_width;
const int num_threads =
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_linear1d_out_frame_backward", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor64<scalar_t, 3>();
auto odata = grad_output.packed_accessor64<scalar_t, 3>();
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_1);
hipLaunchKernelGGL(( upsample_linear1d_out_frame_backward<scalar_t, accscalar_t>)
, dim3(cuda::ATenCeilDiv(num_kernels, num_threads)),
dim3(num_threads),
0,
stream, num_kernels, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
} // namespace
Tensor& upsample_linear1d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
double scales_1) {
upsample_linear1d_out_cuda_template(
output, input, output_size, align_corners, scales_1);
return output;
}
Tensor upsample_linear1d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
double scales_1) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_linear1d_out_cuda_template(
output, input, output_size, align_corners, scales_1);
return output;
}
Tensor& upsample_linear1d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
double scales_1) {
upsample_linear1d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_1);
return grad_input;
}
Tensor upsample_linear1d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
double scales_1) {
Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_linear1d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_1);
return grad_input;
}
} // namespace native
} // namespace at
| bc4d8242b6e6b79b1a2eed1187c5bd195c26c892.cu | // Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/UpSample.cuh>
namespace at {
namespace native {
namespace {
template <typename scalar_t, typename accscalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void upsample_linear1d_out_frame(
const int n,
const accscalar_t rwidth,
const bool align_corners,
const PackedTensorAccessor64<scalar_t, 3> idata,
PackedTensorAccessor64<scalar_t, 3> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int width1 = idata.size(2);
const int width2 = odata.size(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = idata[n][c][w1];
odata[n][c][w2] = val;
}
}
return;
}
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const accscalar_t val =
w0lambda * idata[n][c][w1] + w1lambda * idata[n][c][w1 + w1p];
odata[n][c][w2] = static_cast<scalar_t>(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename scalar_t, typename accscalar_t>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void upsample_linear1d_out_frame_backward(
const int n,
const accscalar_t rwidth,
const bool align_corners,
PackedTensorAccessor64<scalar_t, 3> idata,
const PackedTensorAccessor64<scalar_t, 3> odata) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = idata.size(0);
const int channels = idata.size(1);
const int width1 = idata.size(2);
const int width2 = odata.size(2);
if (index < n) {
const int w2 = index % width2;
// special case: just copy
if (width1 == width2) {
const int w1 = w2;
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t val = odata[n][c][w1];
idata[n][c][w2] = val;
}
}
return;
}
//
const accscalar_t w1r = area_pixel_compute_source_index<accscalar_t>(
rwidth, w2, align_corners, /*cubic=*/false);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const accscalar_t w1lambda = w1r - w1;
const accscalar_t w0lambda = static_cast<accscalar_t>(1) - w1lambda;
//
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const scalar_t d2val = odata[n][c][w2];
atomicAdd(&idata[n][c][w1], static_cast<scalar_t>(w0lambda * d2val));
atomicAdd(
&idata[n][c][w1 + w1p], static_cast<scalar_t>(w1lambda * d2val));
}
}
}
}
static void upsample_linear1d_out_cuda_template(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
double scales_1) {
TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_linear1d_out_cuda", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 1,
"It is expected output_size equals to 1, but got size ",
output_size.size());
int output_width = output_size[0];
int nbatch = input.size(0);
int channels = input.size(1);
int input_width = input.size(2);
upsample_1d_shape_check(
input, Tensor(), nbatch, channels, input_width, output_width);
output.resize_({input.size(0), input.size(1), output_width});
output.zero_();
AT_ASSERT(input_width > 0 && output_width > 0);
const int num_kernels = output_width;
const int num_threads =
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_linear1d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.packed_accessor64<scalar_t, 3>();
auto odata = output.packed_accessor64<scalar_t, 3>();
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_1);
upsample_linear1d_out_frame<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_kernels, num_threads),
num_threads,
0,
stream>>>(num_kernels, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
static void upsample_linear1d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
double scales_1) {
TensorArg grad_output_arg{grad_output_, "grad_output_", 1},
grad_input_arg{grad_input, "grad_input", 2};
checkAllSameGPU(
"upsample_linear1d_backward_out_cuda", {grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 1,
"It is expected output_size equals to 1, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 3,
"It is expected input_size equals to 3, but got size ",
input_size.size());
int output_width = output_size[0];
int nbatch = input_size[0];
int channels = input_size[1];
int input_width = input_size[2];
upsample_1d_shape_check(
Tensor(), grad_output_, nbatch, channels, input_width, output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_({nbatch, channels, input_width});
grad_input.zero_();
const int num_kernels = output_width;
const int num_threads =
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_linear1d_out_frame_backward", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.packed_accessor64<scalar_t, 3>();
auto odata = grad_output.packed_accessor64<scalar_t, 3>();
const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>(
input_width, output_width, align_corners, scales_1);
upsample_linear1d_out_frame_backward<scalar_t, accscalar_t>
<<<cuda::ATenCeilDiv(num_kernels, num_threads),
num_threads,
0,
stream>>>(num_kernels, rwidth, align_corners, idata, odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
} // namespace
Tensor& upsample_linear1d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
double scales_1) {
upsample_linear1d_out_cuda_template(
output, input, output_size, align_corners, scales_1);
return output;
}
Tensor upsample_linear1d_cuda(
const Tensor& input,
IntArrayRef output_size,
bool align_corners,
double scales_1) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_linear1d_out_cuda_template(
output, input, output_size, align_corners, scales_1);
return output;
}
Tensor& upsample_linear1d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
double scales_1) {
upsample_linear1d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_1);
return grad_input;
}
Tensor upsample_linear1d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size,
bool align_corners,
double scales_1) {
Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
upsample_linear1d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size, align_corners, scales_1);
return grad_input;
}
} // namespace native
} // namespace at
|
d4ba22008aa6fe6de3b01ebd0847d7f1a85bfb6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* FDAS host functions */
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <math.h>
#include "headers/fdas_host.h"
#include "headers/params.h"
#include "headers/fdas_test_parameters.h"
//#include <helper_functions.h>
#include <helper_cuda.h>
#include <hiprand/hiprand.h>
#include <libgen.h>
//#include <random> // C++11 to use normal distribution
void fdas_print_params_h()
{
printf("\n\nParameters defined in params.h:\n\t-------------------\n");
// printf("\nSampling time: TSAMP %g\n", TSAMP);
printf("\nSpeed of light: SLIGHT %g\n", SLIGHT);
printf("\nTemplate length for FFT: KERNLEN = RADIX*POTWO %d\n", KERNLEN);
printf("\nAcceleration step in fourier bins (z): ACCEL_STEP %f\n", ACCEL_STEP);
printf("\nAcceleration step in fourier bins (z) reciprocal: ACCEL_STEP_R %f\n", ACCEL_STEP_R);
printf("\nMaximum acceleration in fourier bins (z): ZMAX %d\n", ZMAX);
printf("\nNumber of templates including zero acceleration: NKERN %d\n", NKERN);
// printf("\nLowest acceleration in fourier bins (z) (for harmonic sum): ZLO %d\n", ZLO);
printf("\nThread block size in x direction for 2-D thread block convolution GPU kernels : TBSIZEX %d\n", TBSIZEX);
printf("\nThread block size in Y direction for 2-D thread block convolution GPU kernels : TBSIZEY %d\n", TBSIZEY);
printf("\nThread block size in x direction for 2-D thread block power spectrum GPU kernels : PTBSIZEX %d\n", PTBSIZEX);
printf("\nThread block size in y direction for 2-D thread block power spectrum GPU kernels : PTBSIZEY %d\n", PTBSIZEY);
printf("\n\nCustom FFT specific parameters:\n\t------------------\n" );
printf("\nTAPS \t%d\n", TAPS);
printf("\n\n\t--------------\n\n");
}
void fdas_cuda_check_devices(int devid)
{
//int dev = 0;
int devcount;
//hipDeviceProp_t deviceProp;
/* ******* Detect CUDA devices ******* */
checkCudaErrors(hipGetDeviceCount(&devcount));
printf("\nDetected %d CUDA Capable device(s)\n", devcount);
/*
for (dev = 0; dev < devcount; ++dev)
{
hipSetDevice(dev);
hipGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
}
if (devid<devcount){
printf("\nsetting device %d (default)\n", devid);
hipSetDevice(devid);
}
else{
printf("\nDevice %d not found, setting device 0 (default)\n", devid);
hipSetDevice(0);
}
*/
// hipSetDevice(CARD);
}
void fdas_alloc_gpu_arrays(fdas_gpuarrays *arrays, cmd_args *cmdargs)
{
printf("\nAllocating gpu arrays:\n");
if (cmdargs->inbin){
printf("\nF-fdot array will be interbinned\n");
}
double gbyte = 1024.0*1024.0*1024.0;
//double mbyte = 1024.0*1024.0;
// Memory allocations for gpu real fft input / output signal
checkCudaErrors(hipMalloc((void**)&arrays->d_in_signal, arrays->mem_insig));
checkCudaErrors(hipMalloc((void**)&arrays->d_fft_signal, arrays->mem_rfft));
//Allocating arrays for fourier domain convolution
checkCudaErrors(hipMalloc((void**)&arrays->d_ext_data, arrays->mem_extsig));
//templates
checkCudaErrors(hipMalloc((void**)&arrays->d_kernel, KERNLEN*sizeof(float2)*NKERN ));
//ffdot planes
checkCudaErrors(hipMalloc((void**)&arrays->d_ffdot_pwr, arrays->mem_ffdot ));
//initialise array
checkCudaErrors(hipMemset(arrays->d_ffdot_pwr, 0, arrays->mem_ffdot));
printf("ffdot x size: %zu",arrays->mem_ffdot/sizeof(float)/NKERN);
if(cmdargs->basic==1){
checkCudaErrors(hipMalloc(&arrays->d_ffdot_cpx, arrays->mem_ffdot_cpx));
}
if(cmdargs->kfft && cmdargs->inbin){
// printf("mem_ipedge = %u ",mem_ipedge/);
checkCudaErrors(hipMalloc(&arrays->ip_edge_points, arrays->mem_ipedge));
}
// Added by KA
if ( hipSuccess != hipMalloc((void**) &arrays->d_fdas_peak_list, arrays->mem_max_list_size)) printf("Allocation error in FDAS: d_fdas_peak_list\n");
// check allocated/free memory
size_t mfree, mtotal;
checkCudaErrors(hipMemGetInfo ( &mfree, &mtotal ));
printf("\nMemory allocation finished: Total memory for this device: %.2f GB\nAvailable memory left on this device: %.2f GB \n", mtotal/gbyte, mfree/gbyte);
}
void fdas_free_gpu_arrays(fdas_gpuarrays *arrays, cmd_args *cmdargs)
{
checkCudaErrors(hipFree(arrays->d_in_signal));
checkCudaErrors(hipFree(arrays->d_fft_signal));
checkCudaErrors(hipFree(arrays->d_ext_data));
checkCudaErrors(hipFree(arrays->d_ffdot_pwr));
checkCudaErrors(hipFree(arrays->d_kernel));
if(cmdargs->basic)
checkCudaErrors(hipFree(arrays->d_ffdot_cpx));
if(cmdargs->kfft && cmdargs->inbin)
checkCudaErrors(hipFree(arrays->ip_edge_points));
// Added by KA
hipFree(arrays->d_fdas_peak_list);
}
/*
void fdas_create_acc_sig(fdas_new_acc_sig *acc_sig, cmd_args *cmdargs)
/* Create accelerated signal with given parameters in a float array */
/*
{
double t0, tau;
double omega = 2*M_PI*acc_sig->freq0;
double accel;
double tobs;
// gaussian distribution from C++ <random>
std::default_random_engine rgen;
std::normal_distribution<float> gdist(0.0,cmdargs->nsig);
tobs = (double) (TSAMP*acc_sig->nsamps);
accel = ((double)acc_sig->zval * SLIGHT) / (acc_sig->freq0*tobs*tobs);
printf("\n\npreparing test signal, observation time = %f s, %d nsamps f0 = %f Hz with %d harmonics\n", tobs, acc_sig->nsamps, acc_sig->freq0, acc_sig->nharms);
printf("\nz = %d accelereation = %f m/s^2\n", acc_sig->zval, accel);
acc_sig->acc_signal = (float*)malloc(acc_sig->nsamps*sizeof(float));
printf("\nNow creating accelerated signal with fc=%f, accel=%f, harmonics=%d, duty cycle=%.1f%, noise=%d signal samples=%d, signal level: %.2f\n", acc_sig->freq0, accel, acc_sig->nharms, acc_sig->duty*100.0, cmdargs->nsig, acc_sig->nsamps,acc_sig->sigamp);
for ( int i=0; i<acc_sig->nsamps; ++i){
t0 = i*TSAMP;
tau = t0 + (t0*(accel*t0) / SLIGHT /2.0);
if (cmdargs->nsig!=0){
acc_sig->acc_signal[i] = gdist(rgen);
}
for (int j = 1; j <= acc_sig->nharms; ++j){
acc_sig->acc_signal[i] += (2.0/(j*M_PI)*sin(j*M_PI*acc_sig->duty))*acc_sig->sigamp*cos(j*omega*tau);
}
}
//Write to file
char afname[200];
sprintf(afname, "data/acc_sig_8192x%d_%dharms_%dduty_%.3fHz_%dz_%dnsigma.dat", acc_sig->mul, acc_sig->nharms, (int)( acc_sig->duty*100.0), acc_sig->freq0, acc_sig->zval, acc_sig->nsig );
write_output_file(afname, &acc_sig->acc_signal, acc_sig->nsamps );
free(acc_sig->acc_signal);
}
*/
void fdas_create_acc_kernels(hipfftComplex* d_kernel, cmd_args *cmdargs )
{
/* Create kernel templates for the correlation technique (Ransom et. al. 2002), */
/* and upload + FFT to GPU memory. */
/* Using functions from the original PRESTO accelsearch code */
/* (small adaptations for variables and remove normal interpolation management */
/* - input is already interpolated signal) */
/* -credit to Scott Ransom */
int ii;
int inbin = 1;
hipfftComplex *h_kernel, *tempkern;
hipfftHandle templates_plan; // for host kernel fft
int nrank = 1;
int n[] = {KERNLEN};
int idist = n[0], odist =n[0];
int *inembed = n, *onembed = n;
int istride =1, ostride = 1;
//allocate kernel array and prepare fft
h_kernel = (hipfftComplex*) malloc(NKERN*KERNLEN*sizeof(float2));
// batched fft plan for the templates array
hipfftPlanMany( &templates_plan, nrank, n, inembed , istride,
idist, onembed, ostride,
odist, HIPFFT_C2C, NKERN);
for (ii = 0; ii < NKERN; ii++){
double z = (-ZMAX+ii*ACCEL_STEP);
int halfwidth = presto_z_resp_halfwidth(z, LOWACC) ;
int numkern = 2 * halfwidth * inbin;
tempkern = presto_gen_z_response( z, numkern, inbin);
presto_place_complex_kernel(tempkern, numkern, (h_kernel+ii*KERNLEN), KERNLEN);
free(tempkern);
}
//!TEST!: replace templates here. Template width: numkern; padded width: KERNLEN
#ifdef FDAS_CONV_TEST
for (ii = 0; ii < NKERN; ii++){
int boxcar_width=ii*FDAS_TEST_FILTER_INCREMENT;
for(int f=0; f<KERNLEN; f++){
h_kernel[ii*KERNLEN + f].x = 0;
h_kernel[ii*KERNLEN + f].y = 0;
if(f<boxcar_width/2) h_kernel[ii*KERNLEN + f].x = 1.0;
if(f>=(KERNLEN-boxcar_width/2)) h_kernel[ii*KERNLEN + f].x = 1.0;
}
}
#endif
//!TEST!: replace templates here. Template width: numkern; padded width: KERNLEN
checkCudaErrors( hipMemcpy( d_kernel, h_kernel, KERNLEN*sizeof(float2)* NKERN, hipMemcpyHostToDevice) ); // upload kernels to GPU
#ifndef NOCUST
//use kerel's non-reordered fft
if (cmdargs->kfft)
hipLaunchKernelGGL(( customfft_fwd_temps_no_reorder), dim3(NKERN),dim3(KERNLEN), 0, 0, d_kernel);
#endif
//use cuFFT to transform the templates
if (cmdargs->basic)
hipfftExecC2C(templates_plan, d_kernel, d_kernel, HIPFFT_FORWARD);
free(h_kernel);
}
void fdas_cuda_create_fftplans(fdas_cufftplan *fftplans, fdas_params *params) {
/*check plan memory overhead and create plans */
double mbyte = 1024.0*1024.0;
//double gbyte = mbyte*1024.0;
//set cufft plan parameters
size_t sig_worksize, real_worksize;
int nrank = 1;
int n[] = {KERNLEN};
int idist = n[0], odist =n[0];
int *inembed = n, *onembed = n;
int istride =1, ostride = 1;
//estimate plan memory for real fft
checkCudaErrors(hipfftEstimate1d( params->nsamps, HIPFFT_R2C, 1, &real_worksize));
printf("\nsignal real fft plan requires extra %f MB of memory\n", real_worksize / mbyte);
//estimate plan memory for forward fft
checkCudaErrors(hipfftEstimateMany(nrank, n,inembed, istride, idist, onembed, ostride, odist, HIPFFT_C2C, params->nblocks, &sig_worksize));
printf("\nsignal forward fft plan requires extra %f MB of memory\n the same plan is used for the inverse fft", sig_worksize / mbyte);
// real plan
size_t rworksize;
int rn[] = {params->nsamps};
int *rinembed = rn, *ronembed = rn;
int ridist = rn[0], rodist = params->rfftlen;
hipfftCreate(&fftplans->realplan);
checkCudaErrors(hipfftMakePlanMany( fftplans->realplan, nrank, rn, rinembed, istride, ridist, ronembed, ostride, rodist, HIPFFT_R2C, 1, &rworksize));
hipDeviceSynchronize();
getLastCudaError("\nCuda Error real fft plan\n");
// forward batched plan - same used for inverse
checkCudaErrors(hipfftCreate(&fftplans->forwardplan));
checkCudaErrors(hipfftMakePlanMany( fftplans->forwardplan, nrank, n, inembed, istride, idist, onembed, ostride, odist, HIPFFT_C2C, params->nblocks, &sig_worksize));
hipDeviceSynchronize();
getLastCudaError("\nCuda Error forward fft plan\n");
printf("\ncuFFT plans done \n");
}
void fdas_cuda_basic(fdas_cufftplan *fftplans, fdas_gpuarrays *gpuarrays, cmd_args *cmdargs, fdas_params *params)
{
/* Basic GPU fdas algorithm using cuFFT */
//int inbin;
int cthreads = TBSIZEX;
int cblocks = KERNLEN/TBSIZEX;
dim3 pwthreads(PTBSIZEX, PTBSIZEY);
dim3 pwblocks((params->sigblock / PTBSIZEX) + 1, NKERN/PTBSIZEY);
/* if (cmdargs->inbin)
inbin = 2;
else
inbin = 1;
*/
//real fft
#ifndef FDAS_CONV_TEST
hipfftExecR2C(fftplans->realplan, gpuarrays->d_in_signal, gpuarrays->d_fft_signal);
#endif
#ifdef FDAS_CONV_TEST
float2 *f2temp;
float *ftemp;
ftemp = (float *)malloc(params->rfftlen*sizeof(float));
f2temp = (float2 *)malloc(params->rfftlen*sizeof(float2));
checkCudaErrors( hipMemcpy(ftemp, gpuarrays->d_in_signal, (params->rfftlen)*sizeof(float), hipMemcpyDeviceToHost));
for(int f=0; f<params->rfftlen; f++){
f2temp[f].x = ftemp[f];
f2temp[f].y = 0;
}
checkCudaErrors( hipMemcpy(gpuarrays->d_fft_signal, f2temp, (params->rfftlen)*sizeof(float2), hipMemcpyHostToDevice));
free(ftemp);
free(f2temp);
#endif
if (cmdargs->norm){
// PRESTO deredden - remove red noise.
// TODO: replace with GPU version
float2 *fftsig;
fftsig = (float2*)malloc((params->rfftlen)*sizeof(float2));
checkCudaErrors( hipMemcpy(fftsig, gpuarrays->d_fft_signal, (params->rfftlen)*sizeof(float2), hipMemcpyDeviceToHost));
presto_dered_sig(fftsig, params->rfftlen);
checkCudaErrors( hipMemcpy(gpuarrays->d_fft_signal, fftsig, (params->rfftlen)*sizeof(float2), hipMemcpyHostToDevice));
free(fftsig);
}
//overlap-copy
hipLaunchKernelGGL(( cuda_overlap_copy), dim3(KERNLEN/64), dim3(64) , 0, 0, gpuarrays->d_ext_data, gpuarrays->d_fft_signal, params->sigblock, params->rfftlen, params->extlen, params->offset, params->nblocks );
if (cmdargs->norm){
// PRESTO block median normalization
// TODO: replace with GPU version
float2 *extsig;
extsig = (float2*)malloc((params->extlen)*sizeof(float2));
checkCudaErrors( hipMemcpy(extsig, gpuarrays->d_ext_data, (params->extlen)*sizeof(float2), hipMemcpyDeviceToHost));
for(int b=0; b<params->nblocks; ++b)
presto_norm(extsig+b*KERNLEN, KERNLEN);
checkCudaErrors( hipMemcpy(gpuarrays->d_ext_data, extsig, (params->extlen)*sizeof(float2), hipMemcpyHostToDevice));
free(extsig);
}
//complex block fft
hipfftExecC2C(fftplans->forwardplan, gpuarrays->d_ext_data, gpuarrays->d_ext_data, HIPFFT_FORWARD);
//complex multiplication kernel
hipLaunchKernelGGL(( cuda_convolve_reg_1d_halftemps), dim3(cblocks), dim3(cthreads) , 0, 0, gpuarrays->d_kernel, gpuarrays->d_ext_data, gpuarrays->d_ffdot_cpx, params->extlen, params->scale);
//inverse fft
for (int k=0; k < ZMAX/2; k++){
hipfftExecC2C(fftplans->forwardplan, gpuarrays->d_ffdot_cpx + k * params->extlen, gpuarrays->d_ffdot_cpx + k *params->extlen, HIPFFT_BACKWARD);
hipfftExecC2C(fftplans->forwardplan, gpuarrays->d_ffdot_cpx + (ZMAX-k) * params->extlen, gpuarrays->d_ffdot_cpx + (ZMAX-k) *params->extlen, HIPFFT_BACKWARD);
}
// z=0
hipfftExecC2C(fftplans->forwardplan, gpuarrays->d_ffdot_cpx + ((ZMAX/2) * params->extlen), gpuarrays->d_ffdot_cpx + ((ZMAX/2) * params->extlen), HIPFFT_BACKWARD);
//power spectrum
if (cmdargs->inbin){
hipLaunchKernelGGL(( cuda_ffdotpow_concat_2d_inbin), dim3(pwblocks), dim3(pwthreads) , 0, 0, gpuarrays->d_ffdot_cpx, gpuarrays->d_ffdot_pwr, params->sigblock, params->offset, params->nblocks, params->extlen, params->siglen);
}
else{
hipLaunchKernelGGL(( cuda_ffdotpow_concat_2d) , dim3(pwblocks), dim3(pwthreads) , 0, 0, gpuarrays->d_ffdot_cpx, gpuarrays->d_ffdot_pwr, params->sigblock, params->offset, params->nblocks, params->extlen, params->siglen);
}
}
#ifndef NOCUST
void fdas_cuda_customfft(fdas_cufftplan *fftplans, fdas_gpuarrays *gpuarrays, cmd_args *cmdargs, fdas_params *params) {
//int nthreads;
dim3 cblocks(params->nblocks, NKERN/2);
//real fft
#ifndef FDAS_CONV_TEST
hipfftExecR2C(fftplans->realplan, gpuarrays->d_in_signal, gpuarrays->d_fft_signal);
#endif
#ifdef FDAS_CONV_TEST
float2 *f2temp;
float *ftemp;
ftemp = (float *)malloc(params->rfftlen*sizeof(float));
f2temp = (float2 *)malloc(params->rfftlen*sizeof(float2));
checkCudaErrors( hipMemcpy(ftemp, gpuarrays->d_in_signal, (params->rfftlen)*sizeof(float), hipMemcpyDeviceToHost));
for(int f=0; f<params->rfftlen; f++){
f2temp[f].x = ftemp[f];
f2temp[f].y = 0;
}
checkCudaErrors( hipMemcpy(gpuarrays->d_fft_signal, f2temp, (params->rfftlen)*sizeof(float2), hipMemcpyHostToDevice));
free(ftemp);
free(f2temp);
#endif
if (cmdargs->norm){
// PRESTO deredden - remove red noise.
// TODO: replace with GPU version
float2 *fftsig;
fftsig = (float2*)malloc((params->rfftlen)*sizeof(float2));
checkCudaErrors( hipMemcpy(fftsig, gpuarrays->d_fft_signal, (params->rfftlen)*sizeof(float2), hipMemcpyDeviceToHost));
presto_dered_sig(fftsig, params->rfftlen);
checkCudaErrors( hipMemcpy(gpuarrays->d_fft_signal, fftsig, (params->rfftlen)*sizeof(float2), hipMemcpyHostToDevice));
free(fftsig);
}
//overlap-copy
hipLaunchKernelGGL(( cuda_overlap_copy_smallblk), dim3(params->nblocks), dim3(KERNLEN) , 0, 0, gpuarrays->d_ext_data, gpuarrays->d_fft_signal, params->sigblock, params->rfftlen, params->extlen, params->offset, params->nblocks );
if (cmdargs->norm){
// PRESTO block median normalization
// TODO: replace with GPU version
float2 *extsig;
extsig = (float2*)malloc((params->extlen)*sizeof(float2));
checkCudaErrors( hipMemcpy(extsig, gpuarrays->d_ext_data, (params->extlen)*sizeof(float2), hipMemcpyDeviceToHost));
for(int b=0; b<params->nblocks; ++b)
presto_norm(extsig+b*KERNLEN, KERNLEN);
checkCudaErrors( hipMemcpy(gpuarrays->d_ext_data, extsig, (params->extlen)*sizeof(float2), hipMemcpyHostToDevice));
free(extsig);
}
// Custom FFT convolution kernel
if(cmdargs->inbin){
hipLaunchKernelGGL(( cuda_convolve_customfft_wes_no_reorder02_inbin), dim3(params->nblocks), dim3(KERNLEN) , 0, 0, gpuarrays->d_kernel, gpuarrays->d_ext_data, gpuarrays->d_ffdot_pwr, params->sigblock, params->extlen, params->siglen, params->offset, params->scale, gpuarrays->ip_edge_points);
}
else{
//cuda_convolve_customfft_wes_no_reorder02<<< params->nblocks, KERNLEN >>>( gpuarrays->d_kernel, gpuarrays->d_ext_data, gpuarrays->d_ffdot_pwr, params->sigblock, params->extlen, params->siglen, params->offset, params->scale);
//-------------------------------------------
dim3 gridSize(1, 1, 1);
dim3 blockSize(1, 1, 1);
/*
//-------------------------------------------
//Two elements per thread
gridSize.x = params->nblocks;
gridSize.y = 1;
gridSize.z = 1;
blockSize.x = KERNLEN/2;
GPU_CONV_kFFT_mk11_2elem_2v<<<gridSize,blockSize>>>(gpuarrays->d_ext_data, gpuarrays->d_ffdot_pwr, gpuarrays->d_kernel, params->sigblock, params->offset, params->nblocks, params->scale);
*/
//-------------------------------------------
//Four elements per thread
gridSize.x = params->nblocks;
gridSize.y = 1;
gridSize.z = 1;
blockSize.x = KERNLEN/4;
hipLaunchKernelGGL(( GPU_CONV_kFFT_mk11_4elem_2v), dim3(gridSize),dim3(blockSize), 0, 0, gpuarrays->d_ext_data, gpuarrays->d_ffdot_pwr, gpuarrays->d_kernel, params->sigblock, params->offset, params->nblocks, params->scale);
}
}
#endif
void fdas_write_list(fdas_gpuarrays *gpuarrays, cmd_args *cmdargs, fdas_params *params, float *h_MSD, float dm_low, int dm_count, float dm_step, unsigned int list_size){
int ibin=1;
if (cmdargs->inbin) ibin=2;
double tobs = (double)params->tsamp* (double)params->nsamps*ibin;
if( !isnan(h_MSD[0]) || !isinf(h_MSD[0]) || !isnan(h_MSD[1]) || !isinf(h_MSD[1]) ){
printf("Number of peaks:%d; mean:%f; strdev:%f\n", list_size, h_MSD[0], h_MSD[1]);
float *h_fdas_peak_list = (float*)malloc(list_size*4*sizeof(float));
checkCudaErrors(hipMemcpy(h_fdas_peak_list, gpuarrays->d_fdas_peak_list, list_size*4*sizeof(float), hipMemcpyDeviceToHost));
//prepare file
const char *dirname= "output_data";
struct stat st = {0};
if (stat(dirname, &st) == -1) {
printf("\nDirectory %s does not exist, creating...\n", dirname);
mkdir(dirname, 0700);
}
FILE *fp_c;
char pfname[200];
sprintf(pfname, "acc_list_%f.dat", dm_low + ((float)dm_count)*dm_step);
if ((fp_c=fopen(pfname, "w")) == NULL) {
fprintf(stderr, "Error opening %s file for writing: %s\n",pfname, strerror(errno));
}
for(int f=0; f<list_size; f++){
int j;
double a, acc, acc1, jfreq, pow, SNR;
a = h_fdas_peak_list[4*f];
j = (int) h_fdas_peak_list[4*f + 1];
pow = h_fdas_peak_list[4*f + 2];
SNR = (pow-h_MSD[0])/h_MSD[1];
jfreq = (double)(j) / tobs;
acc = (double) (ZMAX - a* ACCEL_STEP);
acc1 = acc*SLIGHT / jfreq / tobs / tobs;
fprintf(fp_c, "%.2f\t%.3f\t%u\t%.3f\t%.3f\t%.3f\n", acc, acc1, j , jfreq, pow, SNR);
}
fclose(fp_c);
free(h_fdas_peak_list);
}
else {
printf("Error: mean or standard deviation was NaN or Inf!\n");
}
}
void fdas_write_ffdot(fdas_gpuarrays *gpuarrays, cmd_args *cmdargs, fdas_params *params, float dm_low, int dm_count, float dm_step ) {
int ibin=1;
if (cmdargs->inbin)
ibin=2;
// Download, threshold and write ffdot data to file
//int nsamps = params->nsamps;
printf("\n\nWrite data for signal with %d samples\nf-fdot size=%u\n",params->nsamps, params->ffdotlen);
float *h_ffdotpwr = (float*)malloc(params->ffdotlen* sizeof(float));
//download data
checkCudaErrors(hipMemcpy(h_ffdotpwr, gpuarrays->d_ffdot_pwr, params->ffdotlen*sizeof(float), hipMemcpyDeviceToHost));
// calculating statistics
double total = 0.0;
double mean;
double stddev;
// unsigned int j;
for ( int j = 0; j < params->ffdotlen; ++j){
total += (double)(h_ffdotpwr[j]);
if(isnan(total)){
printf("\nnan detected during sum for mean at j=%d\nValue at j:%f\n",j,h_ffdotpwr[j]);
exit(1);
}
}
mean = total / ((double)(params->ffdotlen));
printf("\ntotal ffdot:%lf\tmean ffdot: %lf", total, mean);
// Calculate standard deviation
total = 0.0;
for ( int j = 0; j < params->ffdotlen; ++j){
total += ((double)h_ffdotpwr[j] - mean ) * ((double)h_ffdotpwr[j] - mean);
if(isnan(total)||isinf(total)){
printf("\ninf/nan detected during sum for mean at j=%d\nValue at j:%f\n",j,h_ffdotpwr[j]);
exit(1);
}
}
stddev = sqrt(abs(total) / (double)(params->ffdotlen - 1));
printf("\nmean ffdot: %f\tstd ffdot: %lf\n", mean, stddev);
//prepare file
const char *dirname= "output_data";
struct stat st = {0};
if (stat(dirname, &st) == -1) {
printf("\nDirectory %s does not exist, creating...\n", dirname);
mkdir(dirname, 0700);
}
FILE *fp_c;
char pfname[200];
// char *infilename;
// infilename = basename(cmdargs->afname);
// filename needs to be acc_dm_%f, dm_low[i] + ((float)dm_count)*dm_step[i]
//sprintf(pfname, "%s/out_inbin%d_%s",dirname,ibin,infilename);
sprintf(pfname, "acc_%f.dat", dm_low + ((float)dm_count)*dm_step);
printf("\nwriting results to file %s\n",pfname);
if ((fp_c=fopen(pfname, "w")) == NULL) {
fprintf(stderr, "Error opening %s file for writing: %s\n",pfname, strerror(errno));
exit(1);
}
float pow, sigma;
double tobs = (double)params->tsamp * (double)params->nsamps*ibin;
unsigned int numindep = params->siglen*(NKERN+1)*ACCEL_STEP/6.95; // taken from PRESTO
//write to file
printf("\nWriting ffdot data to file...\n");
for(int a = 0; a < NKERN; a++) {
double acc = (double) (ZMAX - a* ACCEL_STEP);
for( int j = 0; j < ibin*params->siglen; j++){
pow = h_ffdotpwr[a * ibin*params->siglen + j]; //(h_ffdotpwr[a * params->siglen + j]-mean)/stddev;
if( pow > cmdargs->thresh) {
sigma = candidate_sigma(pow, cmdargs->nharms, numindep);//power, number of harmonics, number of independed searches=1...2^harms
// sigma=1.0;
double jfreq = (double)(j) / tobs;
double acc1 = acc*SLIGHT / jfreq / tobs / tobs;
fprintf(fp_c, "%.2f\t%.3f\t%u\t%.3f\t%.3f\t%.3f\n", acc, acc1, j , jfreq, pow, sigma);
}
}
}
fclose(fp_c);
printf("\nFinished writing file %s\n",pfname);
free(h_ffdotpwr);
}
void fdas_write_test_ffdot(fdas_gpuarrays *gpuarrays, cmd_args *cmdargs, fdas_params *params, float dm_low, int dm_count, float dm_step ) {
int ibin=1;
if (cmdargs->inbin)
ibin=2;
/* Download, threshold and write ffdot data to file */
//int nsamps = params->nsamps;
printf("\n\nWrite data for signal with %d samples\nf-fdot size=%u\n",params->nsamps, params->ffdotlen);
float *h_ffdotpwr = (float*)malloc(params->ffdotlen* sizeof(float));
//download data
checkCudaErrors(hipMemcpy(h_ffdotpwr, gpuarrays->d_ffdot_pwr, params->ffdotlen*sizeof(float), hipMemcpyDeviceToHost));
// calculating statistics
double total = 0.0;
double mean;
double stddev;
// unsigned int j;
for ( int j = 0; j < params->ffdotlen; ++j){
total += (double)(h_ffdotpwr[j]);
if(isnan(total)){
printf("\nnan detected during sum for mean at j=%d\nValue at j:%f\n",j,h_ffdotpwr[j]);
exit(1);
}
}
mean = total / ((double)(params->ffdotlen));
printf("\ntotal ffdot:%lf\tmean ffdot: %lf", total, mean);
// Calculate standard deviation
total = 0.0;
for ( int j = 0; j < params->ffdotlen; ++j){
total += ((double)h_ffdotpwr[j] - mean ) * ((double)h_ffdotpwr[j] - mean);
if(isnan(total)||isinf(total)){
printf("\ninf/nan detected during sum for mean at j=%d\nValue at j:%f\n",j,h_ffdotpwr[j]);
exit(1);
}
}
stddev = sqrt(abs(total) / (double)(params->ffdotlen - 1));
printf("\nmean ffdot: %f\tstd ffdot: %lf\n", mean, stddev);
//prepare file
const char *dirname= "output_data";
struct stat st = {0};
if (stat(dirname, &st) == -1) {
printf("\nDirectory %s does not exist, creating...\n", dirname);
mkdir(dirname, 0700);
}
FILE *fp_c;
char pfname[200];
sprintf(pfname, "acc_fdas_conv_test.dat");
printf("\nwriting results to file %s\n",pfname);
if ((fp_c=fopen(pfname, "w")) == NULL) {
fprintf(stderr, "Error opening %s file for writing: %s\n",pfname, strerror(errno));
exit(1);
}
float pow;
//write to file
printf("\nWriting ffdot data to file...\n");
for(int a = 0; a < NKERN; a++) {
for( int j = 0; j < ibin*params->siglen; j++){
pow = h_ffdotpwr[a * ibin*params->siglen + j]; //(h_ffdotpwr[a * params->siglen + j]-mean)/stddev;
fprintf(fp_c, "%u\t%u\t%f\n", a, j, pow);
}
}
fclose(fp_c);
printf("\nFinished writing file %s\n",pfname);
free(h_ffdotpwr);
}
| d4ba22008aa6fe6de3b01ebd0847d7f1a85bfb6b.cu | /* FDAS host functions */
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <math.h>
#include "headers/fdas_host.h"
#include "headers/params.h"
#include "headers/fdas_test_parameters.h"
//#include <helper_functions.h>
#include <helper_cuda.h>
#include <curand.h>
#include <libgen.h>
//#include <random> // C++11 to use normal distribution
void fdas_print_params_h()
{
printf("\n\nParameters defined in params.h:\n\t-------------------\n");
// printf("\nSampling time: TSAMP %g\n", TSAMP);
printf("\nSpeed of light: SLIGHT %g\n", SLIGHT);
printf("\nTemplate length for FFT: KERNLEN = RADIX*POTWO %d\n", KERNLEN);
printf("\nAcceleration step in fourier bins (z): ACCEL_STEP %f\n", ACCEL_STEP);
printf("\nAcceleration step in fourier bins (z) reciprocal: ACCEL_STEP_R %f\n", ACCEL_STEP_R);
printf("\nMaximum acceleration in fourier bins (z): ZMAX %d\n", ZMAX);
printf("\nNumber of templates including zero acceleration: NKERN %d\n", NKERN);
// printf("\nLowest acceleration in fourier bins (z) (for harmonic sum): ZLO %d\n", ZLO);
printf("\nThread block size in x direction for 2-D thread block convolution GPU kernels : TBSIZEX %d\n", TBSIZEX);
printf("\nThread block size in Y direction for 2-D thread block convolution GPU kernels : TBSIZEY %d\n", TBSIZEY);
printf("\nThread block size in x direction for 2-D thread block power spectrum GPU kernels : PTBSIZEX %d\n", PTBSIZEX);
printf("\nThread block size in y direction for 2-D thread block power spectrum GPU kernels : PTBSIZEY %d\n", PTBSIZEY);
printf("\n\nCustom FFT specific parameters:\n\t------------------\n" );
printf("\nTAPS \t%d\n", TAPS);
printf("\n\n\t--------------\n\n");
}
void fdas_cuda_check_devices(int devid)
{
//int dev = 0;
int devcount;
//cudaDeviceProp deviceProp;
/* ******* Detect CUDA devices ******* */
checkCudaErrors(cudaGetDeviceCount(&devcount));
printf("\nDetected %d CUDA Capable device(s)\n", devcount);
/*
for (dev = 0; dev < devcount; ++dev)
{
cudaSetDevice(dev);
cudaGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
}
if (devid<devcount){
printf("\nsetting device %d (default)\n", devid);
cudaSetDevice(devid);
}
else{
printf("\nDevice %d not found, setting device 0 (default)\n", devid);
cudaSetDevice(0);
}
*/
// cudaSetDevice(CARD);
}
void fdas_alloc_gpu_arrays(fdas_gpuarrays *arrays, cmd_args *cmdargs)
{
printf("\nAllocating gpu arrays:\n");
if (cmdargs->inbin){
printf("\nF-fdot array will be interbinned\n");
}
double gbyte = 1024.0*1024.0*1024.0;
//double mbyte = 1024.0*1024.0;
// Memory allocations for gpu real fft input / output signal
checkCudaErrors(cudaMalloc((void**)&arrays->d_in_signal, arrays->mem_insig));
checkCudaErrors(cudaMalloc((void**)&arrays->d_fft_signal, arrays->mem_rfft));
//Allocating arrays for fourier domain convolution
checkCudaErrors(cudaMalloc((void**)&arrays->d_ext_data, arrays->mem_extsig));
//templates
checkCudaErrors(cudaMalloc((void**)&arrays->d_kernel, KERNLEN*sizeof(float2)*NKERN ));
//ffdot planes
checkCudaErrors(cudaMalloc((void**)&arrays->d_ffdot_pwr, arrays->mem_ffdot ));
//initialise array
checkCudaErrors(cudaMemset(arrays->d_ffdot_pwr, 0, arrays->mem_ffdot));
printf("ffdot x size: %zu",arrays->mem_ffdot/sizeof(float)/NKERN);
if(cmdargs->basic==1){
checkCudaErrors(cudaMalloc(&arrays->d_ffdot_cpx, arrays->mem_ffdot_cpx));
}
if(cmdargs->kfft && cmdargs->inbin){
// printf("mem_ipedge = %u ",mem_ipedge/);
checkCudaErrors(cudaMalloc(&arrays->ip_edge_points, arrays->mem_ipedge));
}
// Added by KA
if ( cudaSuccess != cudaMalloc((void**) &arrays->d_fdas_peak_list, arrays->mem_max_list_size)) printf("Allocation error in FDAS: d_fdas_peak_list\n");
// check allocated/free memory
size_t mfree, mtotal;
checkCudaErrors(cudaMemGetInfo ( &mfree, &mtotal ));
printf("\nMemory allocation finished: Total memory for this device: %.2f GB\nAvailable memory left on this device: %.2f GB \n", mtotal/gbyte, mfree/gbyte);
}
void fdas_free_gpu_arrays(fdas_gpuarrays *arrays, cmd_args *cmdargs)
{
checkCudaErrors(cudaFree(arrays->d_in_signal));
checkCudaErrors(cudaFree(arrays->d_fft_signal));
checkCudaErrors(cudaFree(arrays->d_ext_data));
checkCudaErrors(cudaFree(arrays->d_ffdot_pwr));
checkCudaErrors(cudaFree(arrays->d_kernel));
if(cmdargs->basic)
checkCudaErrors(cudaFree(arrays->d_ffdot_cpx));
if(cmdargs->kfft && cmdargs->inbin)
checkCudaErrors(cudaFree(arrays->ip_edge_points));
// Added by KA
cudaFree(arrays->d_fdas_peak_list);
}
/*
void fdas_create_acc_sig(fdas_new_acc_sig *acc_sig, cmd_args *cmdargs)
/* Create accelerated signal with given parameters in a float array */
/*
{
double t0, tau;
double omega = 2*M_PI*acc_sig->freq0;
double accel;
double tobs;
// gaussian distribution from C++ <random>
std::default_random_engine rgen;
std::normal_distribution<float> gdist(0.0,cmdargs->nsig);
tobs = (double) (TSAMP*acc_sig->nsamps);
accel = ((double)acc_sig->zval * SLIGHT) / (acc_sig->freq0*tobs*tobs);
printf("\n\npreparing test signal, observation time = %f s, %d nsamps f0 = %f Hz with %d harmonics\n", tobs, acc_sig->nsamps, acc_sig->freq0, acc_sig->nharms);
printf("\nz = %d accelereation = %f m/s^2\n", acc_sig->zval, accel);
acc_sig->acc_signal = (float*)malloc(acc_sig->nsamps*sizeof(float));
printf("\nNow creating accelerated signal with fc=%f, accel=%f, harmonics=%d, duty cycle=%.1f%, noise=%d signal samples=%d, signal level: %.2f\n", acc_sig->freq0, accel, acc_sig->nharms, acc_sig->duty*100.0, cmdargs->nsig, acc_sig->nsamps,acc_sig->sigamp);
for ( int i=0; i<acc_sig->nsamps; ++i){
t0 = i*TSAMP;
tau = t0 + (t0*(accel*t0) / SLIGHT /2.0);
if (cmdargs->nsig!=0){
acc_sig->acc_signal[i] = gdist(rgen);
}
for (int j = 1; j <= acc_sig->nharms; ++j){
acc_sig->acc_signal[i] += (2.0/(j*M_PI)*sin(j*M_PI*acc_sig->duty))*acc_sig->sigamp*cos(j*omega*tau);
}
}
//Write to file
char afname[200];
sprintf(afname, "data/acc_sig_8192x%d_%dharms_%dduty_%.3fHz_%dz_%dnsigma.dat", acc_sig->mul, acc_sig->nharms, (int)( acc_sig->duty*100.0), acc_sig->freq0, acc_sig->zval, acc_sig->nsig );
write_output_file(afname, &acc_sig->acc_signal, acc_sig->nsamps );
free(acc_sig->acc_signal);
}
*/
void fdas_create_acc_kernels(cufftComplex* d_kernel, cmd_args *cmdargs )
{
/* Create kernel templates for the correlation technique (Ransom et. al. 2002), */
/* and upload + FFT to GPU memory. */
/* Using functions from the original PRESTO accelsearch code */
/* (small adaptations for variables and remove normal interpolation management */
/* - input is already interpolated signal) */
/* -credit to Scott Ransom */
int ii;
int inbin = 1;
cufftComplex *h_kernel, *tempkern;
cufftHandle templates_plan; // for host kernel fft
int nrank = 1;
int n[] = {KERNLEN};
int idist = n[0], odist =n[0];
int *inembed = n, *onembed = n;
int istride =1, ostride = 1;
//allocate kernel array and prepare fft
h_kernel = (cufftComplex*) malloc(NKERN*KERNLEN*sizeof(float2));
// batched fft plan for the templates array
cufftPlanMany( &templates_plan, nrank, n, inembed , istride,
idist, onembed, ostride,
odist, CUFFT_C2C, NKERN);
for (ii = 0; ii < NKERN; ii++){
double z = (-ZMAX+ii*ACCEL_STEP);
int halfwidth = presto_z_resp_halfwidth(z, LOWACC) ;
int numkern = 2 * halfwidth * inbin;
tempkern = presto_gen_z_response( z, numkern, inbin);
presto_place_complex_kernel(tempkern, numkern, (h_kernel+ii*KERNLEN), KERNLEN);
free(tempkern);
}
//!TEST!: replace templates here. Template width: numkern; padded width: KERNLEN
#ifdef FDAS_CONV_TEST
for (ii = 0; ii < NKERN; ii++){
int boxcar_width=ii*FDAS_TEST_FILTER_INCREMENT;
for(int f=0; f<KERNLEN; f++){
h_kernel[ii*KERNLEN + f].x = 0;
h_kernel[ii*KERNLEN + f].y = 0;
if(f<boxcar_width/2) h_kernel[ii*KERNLEN + f].x = 1.0;
if(f>=(KERNLEN-boxcar_width/2)) h_kernel[ii*KERNLEN + f].x = 1.0;
}
}
#endif
//!TEST!: replace templates here. Template width: numkern; padded width: KERNLEN
checkCudaErrors( cudaMemcpy( d_kernel, h_kernel, KERNLEN*sizeof(float2)* NKERN, cudaMemcpyHostToDevice) ); // upload kernels to GPU
#ifndef NOCUST
//use kerel's non-reordered fft
if (cmdargs->kfft)
customfft_fwd_temps_no_reorder<<<NKERN,KERNLEN>>>( d_kernel);
#endif
//use cuFFT to transform the templates
if (cmdargs->basic)
cufftExecC2C(templates_plan, d_kernel, d_kernel, CUFFT_FORWARD);
free(h_kernel);
}
void fdas_cuda_create_fftplans(fdas_cufftplan *fftplans, fdas_params *params) {
/*check plan memory overhead and create plans */
double mbyte = 1024.0*1024.0;
//double gbyte = mbyte*1024.0;
//set cufft plan parameters
size_t sig_worksize, real_worksize;
int nrank = 1;
int n[] = {KERNLEN};
int idist = n[0], odist =n[0];
int *inembed = n, *onembed = n;
int istride =1, ostride = 1;
//estimate plan memory for real fft
checkCudaErrors(cufftEstimate1d( params->nsamps, CUFFT_R2C, 1, &real_worksize));
printf("\nsignal real fft plan requires extra %f MB of memory\n", real_worksize / mbyte);
//estimate plan memory for forward fft
checkCudaErrors(cufftEstimateMany(nrank, n,inembed, istride, idist, onembed, ostride, odist, CUFFT_C2C, params->nblocks, &sig_worksize));
printf("\nsignal forward fft plan requires extra %f MB of memory\n the same plan is used for the inverse fft", sig_worksize / mbyte);
// real plan
size_t rworksize;
int rn[] = {params->nsamps};
int *rinembed = rn, *ronembed = rn;
int ridist = rn[0], rodist = params->rfftlen;
cufftCreate(&fftplans->realplan);
checkCudaErrors(cufftMakePlanMany( fftplans->realplan, nrank, rn, rinembed, istride, ridist, ronembed, ostride, rodist, CUFFT_R2C, 1, &rworksize));
cudaDeviceSynchronize();
getLastCudaError("\nCuda Error real fft plan\n");
// forward batched plan - same used for inverse
checkCudaErrors(cufftCreate(&fftplans->forwardplan));
checkCudaErrors(cufftMakePlanMany( fftplans->forwardplan, nrank, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_C2C, params->nblocks, &sig_worksize));
cudaDeviceSynchronize();
getLastCudaError("\nCuda Error forward fft plan\n");
printf("\ncuFFT plans done \n");
}
void fdas_cuda_basic(fdas_cufftplan *fftplans, fdas_gpuarrays *gpuarrays, cmd_args *cmdargs, fdas_params *params)
{
/* Basic GPU fdas algorithm using cuFFT */
//int inbin;
int cthreads = TBSIZEX;
int cblocks = KERNLEN/TBSIZEX;
dim3 pwthreads(PTBSIZEX, PTBSIZEY);
dim3 pwblocks((params->sigblock / PTBSIZEX) + 1, NKERN/PTBSIZEY);
/* if (cmdargs->inbin)
inbin = 2;
else
inbin = 1;
*/
//real fft
#ifndef FDAS_CONV_TEST
cufftExecR2C(fftplans->realplan, gpuarrays->d_in_signal, gpuarrays->d_fft_signal);
#endif
#ifdef FDAS_CONV_TEST
float2 *f2temp;
float *ftemp;
ftemp = (float *)malloc(params->rfftlen*sizeof(float));
f2temp = (float2 *)malloc(params->rfftlen*sizeof(float2));
checkCudaErrors( cudaMemcpy(ftemp, gpuarrays->d_in_signal, (params->rfftlen)*sizeof(float), cudaMemcpyDeviceToHost));
for(int f=0; f<params->rfftlen; f++){
f2temp[f].x = ftemp[f];
f2temp[f].y = 0;
}
checkCudaErrors( cudaMemcpy(gpuarrays->d_fft_signal, f2temp, (params->rfftlen)*sizeof(float2), cudaMemcpyHostToDevice));
free(ftemp);
free(f2temp);
#endif
if (cmdargs->norm){
// PRESTO deredden - remove red noise.
// TODO: replace with GPU version
float2 *fftsig;
fftsig = (float2*)malloc((params->rfftlen)*sizeof(float2));
checkCudaErrors( cudaMemcpy(fftsig, gpuarrays->d_fft_signal, (params->rfftlen)*sizeof(float2), cudaMemcpyDeviceToHost));
presto_dered_sig(fftsig, params->rfftlen);
checkCudaErrors( cudaMemcpy(gpuarrays->d_fft_signal, fftsig, (params->rfftlen)*sizeof(float2), cudaMemcpyHostToDevice));
free(fftsig);
}
//overlap-copy
cuda_overlap_copy<<<KERNLEN/64, 64 >>>(gpuarrays->d_ext_data, gpuarrays->d_fft_signal, params->sigblock, params->rfftlen, params->extlen, params->offset, params->nblocks );
if (cmdargs->norm){
// PRESTO block median normalization
// TODO: replace with GPU version
float2 *extsig;
extsig = (float2*)malloc((params->extlen)*sizeof(float2));
checkCudaErrors( cudaMemcpy(extsig, gpuarrays->d_ext_data, (params->extlen)*sizeof(float2), cudaMemcpyDeviceToHost));
for(int b=0; b<params->nblocks; ++b)
presto_norm(extsig+b*KERNLEN, KERNLEN);
checkCudaErrors( cudaMemcpy(gpuarrays->d_ext_data, extsig, (params->extlen)*sizeof(float2), cudaMemcpyHostToDevice));
free(extsig);
}
//complex block fft
cufftExecC2C(fftplans->forwardplan, gpuarrays->d_ext_data, gpuarrays->d_ext_data, CUFFT_FORWARD);
//complex multiplication kernel
cuda_convolve_reg_1d_halftemps<<<cblocks, cthreads >>>( gpuarrays->d_kernel, gpuarrays->d_ext_data, gpuarrays->d_ffdot_cpx, params->extlen, params->scale);
//inverse fft
for (int k=0; k < ZMAX/2; k++){
cufftExecC2C(fftplans->forwardplan, gpuarrays->d_ffdot_cpx + k * params->extlen, gpuarrays->d_ffdot_cpx + k *params->extlen, CUFFT_INVERSE);
cufftExecC2C(fftplans->forwardplan, gpuarrays->d_ffdot_cpx + (ZMAX-k) * params->extlen, gpuarrays->d_ffdot_cpx + (ZMAX-k) *params->extlen, CUFFT_INVERSE);
}
// z=0
cufftExecC2C(fftplans->forwardplan, gpuarrays->d_ffdot_cpx + ((ZMAX/2) * params->extlen), gpuarrays->d_ffdot_cpx + ((ZMAX/2) * params->extlen), CUFFT_INVERSE);
//power spectrum
if (cmdargs->inbin){
cuda_ffdotpow_concat_2d_inbin<<< pwblocks, pwthreads >>>(gpuarrays->d_ffdot_cpx, gpuarrays->d_ffdot_pwr, params->sigblock, params->offset, params->nblocks, params->extlen, params->siglen);
}
else{
cuda_ffdotpow_concat_2d <<< pwblocks, pwthreads >>>(gpuarrays->d_ffdot_cpx, gpuarrays->d_ffdot_pwr, params->sigblock, params->offset, params->nblocks, params->extlen, params->siglen);
}
}
#ifndef NOCUST
void fdas_cuda_customfft(fdas_cufftplan *fftplans, fdas_gpuarrays *gpuarrays, cmd_args *cmdargs, fdas_params *params) {
//int nthreads;
dim3 cblocks(params->nblocks, NKERN/2);
//real fft
#ifndef FDAS_CONV_TEST
cufftExecR2C(fftplans->realplan, gpuarrays->d_in_signal, gpuarrays->d_fft_signal);
#endif
#ifdef FDAS_CONV_TEST
float2 *f2temp;
float *ftemp;
ftemp = (float *)malloc(params->rfftlen*sizeof(float));
f2temp = (float2 *)malloc(params->rfftlen*sizeof(float2));
checkCudaErrors( cudaMemcpy(ftemp, gpuarrays->d_in_signal, (params->rfftlen)*sizeof(float), cudaMemcpyDeviceToHost));
for(int f=0; f<params->rfftlen; f++){
f2temp[f].x = ftemp[f];
f2temp[f].y = 0;
}
checkCudaErrors( cudaMemcpy(gpuarrays->d_fft_signal, f2temp, (params->rfftlen)*sizeof(float2), cudaMemcpyHostToDevice));
free(ftemp);
free(f2temp);
#endif
if (cmdargs->norm){
// PRESTO deredden - remove red noise.
// TODO: replace with GPU version
float2 *fftsig;
fftsig = (float2*)malloc((params->rfftlen)*sizeof(float2));
checkCudaErrors( cudaMemcpy(fftsig, gpuarrays->d_fft_signal, (params->rfftlen)*sizeof(float2), cudaMemcpyDeviceToHost));
presto_dered_sig(fftsig, params->rfftlen);
checkCudaErrors( cudaMemcpy(gpuarrays->d_fft_signal, fftsig, (params->rfftlen)*sizeof(float2), cudaMemcpyHostToDevice));
free(fftsig);
}
//overlap-copy
cuda_overlap_copy_smallblk<<<params->nblocks, KERNLEN >>>(gpuarrays->d_ext_data, gpuarrays->d_fft_signal, params->sigblock, params->rfftlen, params->extlen, params->offset, params->nblocks );
if (cmdargs->norm){
// PRESTO block median normalization
// TODO: replace with GPU version
float2 *extsig;
extsig = (float2*)malloc((params->extlen)*sizeof(float2));
checkCudaErrors( cudaMemcpy(extsig, gpuarrays->d_ext_data, (params->extlen)*sizeof(float2), cudaMemcpyDeviceToHost));
for(int b=0; b<params->nblocks; ++b)
presto_norm(extsig+b*KERNLEN, KERNLEN);
checkCudaErrors( cudaMemcpy(gpuarrays->d_ext_data, extsig, (params->extlen)*sizeof(float2), cudaMemcpyHostToDevice));
free(extsig);
}
// Custom FFT convolution kernel
if(cmdargs->inbin){
cuda_convolve_customfft_wes_no_reorder02_inbin<<< params->nblocks, KERNLEN >>>( gpuarrays->d_kernel, gpuarrays->d_ext_data, gpuarrays->d_ffdot_pwr, params->sigblock, params->extlen, params->siglen, params->offset, params->scale, gpuarrays->ip_edge_points);
}
else{
//cuda_convolve_customfft_wes_no_reorder02<<< params->nblocks, KERNLEN >>>( gpuarrays->d_kernel, gpuarrays->d_ext_data, gpuarrays->d_ffdot_pwr, params->sigblock, params->extlen, params->siglen, params->offset, params->scale);
//-------------------------------------------
dim3 gridSize(1, 1, 1);
dim3 blockSize(1, 1, 1);
/*
//-------------------------------------------
//Two elements per thread
gridSize.x = params->nblocks;
gridSize.y = 1;
gridSize.z = 1;
blockSize.x = KERNLEN/2;
GPU_CONV_kFFT_mk11_2elem_2v<<<gridSize,blockSize>>>(gpuarrays->d_ext_data, gpuarrays->d_ffdot_pwr, gpuarrays->d_kernel, params->sigblock, params->offset, params->nblocks, params->scale);
*/
//-------------------------------------------
//Four elements per thread
gridSize.x = params->nblocks;
gridSize.y = 1;
gridSize.z = 1;
blockSize.x = KERNLEN/4;
GPU_CONV_kFFT_mk11_4elem_2v<<<gridSize,blockSize>>>(gpuarrays->d_ext_data, gpuarrays->d_ffdot_pwr, gpuarrays->d_kernel, params->sigblock, params->offset, params->nblocks, params->scale);
}
}
#endif
void fdas_write_list(fdas_gpuarrays *gpuarrays, cmd_args *cmdargs, fdas_params *params, float *h_MSD, float dm_low, int dm_count, float dm_step, unsigned int list_size){
int ibin=1;
if (cmdargs->inbin) ibin=2;
double tobs = (double)params->tsamp* (double)params->nsamps*ibin;
if( !isnan(h_MSD[0]) || !isinf(h_MSD[0]) || !isnan(h_MSD[1]) || !isinf(h_MSD[1]) ){
printf("Number of peaks:%d; mean:%f; strdev:%f\n", list_size, h_MSD[0], h_MSD[1]);
float *h_fdas_peak_list = (float*)malloc(list_size*4*sizeof(float));
checkCudaErrors(cudaMemcpy(h_fdas_peak_list, gpuarrays->d_fdas_peak_list, list_size*4*sizeof(float), cudaMemcpyDeviceToHost));
//prepare file
const char *dirname= "output_data";
struct stat st = {0};
if (stat(dirname, &st) == -1) {
printf("\nDirectory %s does not exist, creating...\n", dirname);
mkdir(dirname, 0700);
}
FILE *fp_c;
char pfname[200];
sprintf(pfname, "acc_list_%f.dat", dm_low + ((float)dm_count)*dm_step);
if ((fp_c=fopen(pfname, "w")) == NULL) {
fprintf(stderr, "Error opening %s file for writing: %s\n",pfname, strerror(errno));
}
for(int f=0; f<list_size; f++){
int j;
double a, acc, acc1, jfreq, pow, SNR;
a = h_fdas_peak_list[4*f];
j = (int) h_fdas_peak_list[4*f + 1];
pow = h_fdas_peak_list[4*f + 2];
SNR = (pow-h_MSD[0])/h_MSD[1];
jfreq = (double)(j) / tobs;
acc = (double) (ZMAX - a* ACCEL_STEP);
acc1 = acc*SLIGHT / jfreq / tobs / tobs;
fprintf(fp_c, "%.2f\t%.3f\t%u\t%.3f\t%.3f\t%.3f\n", acc, acc1, j , jfreq, pow, SNR);
}
fclose(fp_c);
free(h_fdas_peak_list);
}
else {
printf("Error: mean or standard deviation was NaN or Inf!\n");
}
}
void fdas_write_ffdot(fdas_gpuarrays *gpuarrays, cmd_args *cmdargs, fdas_params *params, float dm_low, int dm_count, float dm_step ) {
int ibin=1;
if (cmdargs->inbin)
ibin=2;
// Download, threshold and write ffdot data to file
//int nsamps = params->nsamps;
printf("\n\nWrite data for signal with %d samples\nf-fdot size=%u\n",params->nsamps, params->ffdotlen);
float *h_ffdotpwr = (float*)malloc(params->ffdotlen* sizeof(float));
//download data
checkCudaErrors(cudaMemcpy(h_ffdotpwr, gpuarrays->d_ffdot_pwr, params->ffdotlen*sizeof(float), cudaMemcpyDeviceToHost));
// calculating statistics
double total = 0.0;
double mean;
double stddev;
// unsigned int j;
for ( int j = 0; j < params->ffdotlen; ++j){
total += (double)(h_ffdotpwr[j]);
if(isnan(total)){
printf("\nnan detected during sum for mean at j=%d\nValue at j:%f\n",j,h_ffdotpwr[j]);
exit(1);
}
}
mean = total / ((double)(params->ffdotlen));
printf("\ntotal ffdot:%lf\tmean ffdot: %lf", total, mean);
// Calculate standard deviation
total = 0.0;
for ( int j = 0; j < params->ffdotlen; ++j){
total += ((double)h_ffdotpwr[j] - mean ) * ((double)h_ffdotpwr[j] - mean);
if(isnan(total)||isinf(total)){
printf("\ninf/nan detected during sum for mean at j=%d\nValue at j:%f\n",j,h_ffdotpwr[j]);
exit(1);
}
}
stddev = sqrt(abs(total) / (double)(params->ffdotlen - 1));
printf("\nmean ffdot: %f\tstd ffdot: %lf\n", mean, stddev);
//prepare file
const char *dirname= "output_data";
struct stat st = {0};
if (stat(dirname, &st) == -1) {
printf("\nDirectory %s does not exist, creating...\n", dirname);
mkdir(dirname, 0700);
}
FILE *fp_c;
char pfname[200];
// char *infilename;
// infilename = basename(cmdargs->afname);
// filename needs to be acc_dm_%f, dm_low[i] + ((float)dm_count)*dm_step[i]
//sprintf(pfname, "%s/out_inbin%d_%s",dirname,ibin,infilename);
sprintf(pfname, "acc_%f.dat", dm_low + ((float)dm_count)*dm_step);
printf("\nwriting results to file %s\n",pfname);
if ((fp_c=fopen(pfname, "w")) == NULL) {
fprintf(stderr, "Error opening %s file for writing: %s\n",pfname, strerror(errno));
exit(1);
}
float pow, sigma;
double tobs = (double)params->tsamp * (double)params->nsamps*ibin;
unsigned int numindep = params->siglen*(NKERN+1)*ACCEL_STEP/6.95; // taken from PRESTO
//write to file
printf("\nWriting ffdot data to file...\n");
for(int a = 0; a < NKERN; a++) {
double acc = (double) (ZMAX - a* ACCEL_STEP);
for( int j = 0; j < ibin*params->siglen; j++){
pow = h_ffdotpwr[a * ibin*params->siglen + j]; //(h_ffdotpwr[a * params->siglen + j]-mean)/stddev;
if( pow > cmdargs->thresh) {
sigma = candidate_sigma(pow, cmdargs->nharms, numindep);//power, number of harmonics, number of independed searches=1...2^harms
// sigma=1.0;
double jfreq = (double)(j) / tobs;
double acc1 = acc*SLIGHT / jfreq / tobs / tobs;
fprintf(fp_c, "%.2f\t%.3f\t%u\t%.3f\t%.3f\t%.3f\n", acc, acc1, j , jfreq, pow, sigma);
}
}
}
fclose(fp_c);
printf("\nFinished writing file %s\n",pfname);
free(h_ffdotpwr);
}
void fdas_write_test_ffdot(fdas_gpuarrays *gpuarrays, cmd_args *cmdargs, fdas_params *params, float dm_low, int dm_count, float dm_step ) {
int ibin=1;
if (cmdargs->inbin)
ibin=2;
/* Download, threshold and write ffdot data to file */
//int nsamps = params->nsamps;
printf("\n\nWrite data for signal with %d samples\nf-fdot size=%u\n",params->nsamps, params->ffdotlen);
float *h_ffdotpwr = (float*)malloc(params->ffdotlen* sizeof(float));
//download data
checkCudaErrors(cudaMemcpy(h_ffdotpwr, gpuarrays->d_ffdot_pwr, params->ffdotlen*sizeof(float), cudaMemcpyDeviceToHost));
// calculating statistics
double total = 0.0;
double mean;
double stddev;
// unsigned int j;
for ( int j = 0; j < params->ffdotlen; ++j){
total += (double)(h_ffdotpwr[j]);
if(isnan(total)){
printf("\nnan detected during sum for mean at j=%d\nValue at j:%f\n",j,h_ffdotpwr[j]);
exit(1);
}
}
mean = total / ((double)(params->ffdotlen));
printf("\ntotal ffdot:%lf\tmean ffdot: %lf", total, mean);
// Calculate standard deviation
total = 0.0;
for ( int j = 0; j < params->ffdotlen; ++j){
total += ((double)h_ffdotpwr[j] - mean ) * ((double)h_ffdotpwr[j] - mean);
if(isnan(total)||isinf(total)){
printf("\ninf/nan detected during sum for mean at j=%d\nValue at j:%f\n",j,h_ffdotpwr[j]);
exit(1);
}
}
stddev = sqrt(abs(total) / (double)(params->ffdotlen - 1));
printf("\nmean ffdot: %f\tstd ffdot: %lf\n", mean, stddev);
//prepare file
const char *dirname= "output_data";
struct stat st = {0};
if (stat(dirname, &st) == -1) {
printf("\nDirectory %s does not exist, creating...\n", dirname);
mkdir(dirname, 0700);
}
FILE *fp_c;
char pfname[200];
sprintf(pfname, "acc_fdas_conv_test.dat");
printf("\nwriting results to file %s\n",pfname);
if ((fp_c=fopen(pfname, "w")) == NULL) {
fprintf(stderr, "Error opening %s file for writing: %s\n",pfname, strerror(errno));
exit(1);
}
float pow;
//write to file
printf("\nWriting ffdot data to file...\n");
for(int a = 0; a < NKERN; a++) {
for( int j = 0; j < ibin*params->siglen; j++){
pow = h_ffdotpwr[a * ibin*params->siglen + j]; //(h_ffdotpwr[a * params->siglen + j]-mean)/stddev;
fprintf(fp_c, "%u\t%u\t%f\n", a, j, pow);
}
}
fclose(fp_c);
printf("\nFinished writing file %s\n",pfname);
free(h_ffdotpwr);
}
|
acb6493ce69ea385fad7986be01a3fad19db1ef5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/edit_distance_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/cuda_helper.h"
#include "paddle/fluid/platform/gpu_info.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void FillFirstRow(T* dist, const int N) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N + 1) {
dist[idx] = idx;
}
}
template <typename T>
__global__ void FillFirstColumn(T* dist, const int M, const int N) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < M + 1) {
dist[idx * (N + 1)] = idx;
}
}
template <typename T>
__global__ void Levenshtein(T* dist, const int64_t* x1, const int64_t* x2,
const int M, const int N, const int start) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = N;
int index = start + idx * offset;
int row = index / (N + 1);
int col = index % (N + 1);
if (row > 0 && col > 0 && row < M + 1 && col < N + 1) {
int cost = x1[row - 1] == x2[col - 1] ? 0 : 1;
int dels = dist[(row - 1) * (N + 1) + col] + 1;
int ins = dist[row * (N + 1) + col - 1] + 1;
int subs = dist[(row - 1) * (N + 1) + (col - 1)] + cost;
dist[index] = min(dels, min(ins, subs));
}
}
template <typename T>
__global__ void SetOutput(T* out, const T* dist, const int M, const int N,
bool normalized) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx == 0) {
out[0] = normalized ? dist[M * (N + 1) + N] / N : dist[M * (N + 1) + N];
}
}
template <typename Place, typename T>
class EditDistanceGPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto* out_t = ctx.Output<framework::Tensor>("Out");
auto* x1_t = ctx.Input<framework::LoDTensor>("Hyps");
auto* x2_t = ctx.Input<framework::LoDTensor>("Refs");
auto* sequence_num = ctx.Output<framework::Tensor>("SequenceNum");
sequence_num->mutable_data<int64_t>(ctx.GetPlace());
auto normalized = ctx.Attr<bool>("normalized");
auto stream = reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream();
auto hyp_lod = x1_t->lod()[0];
auto ref_lod = x2_t->lod()[0];
PADDLE_ENFORCE(
hyp_lod.size() == ref_lod.size(),
"Input(Hyps) and Input(Refs) must have the same batch size.");
for (size_t i = 1; i < ref_lod.size(); ++i) {
PADDLE_ENFORCE(ref_lod[i] > ref_lod[i - 1],
"Reference string %d is empty.", i);
}
const size_t num_strs = hyp_lod.size() - 1;
math::SetConstant<platform::CUDADeviceContext, int64_t> set_constant;
set_constant(ctx.template device_context<platform::CUDADeviceContext>(),
sequence_num, static_cast<int64_t>(num_strs));
out_t->Resize({static_cast<int64_t>(num_strs), 1});
out_t->mutable_data<T>(ctx.GetPlace());
auto out = out_t->data<T>();
T distance = 0.0;
for (size_t num = 0; num < num_strs; num++) {
auto m = static_cast<int64_t>(hyp_lod[num + 1] - hyp_lod[num]);
auto n = static_cast<int64_t>(ref_lod[num + 1] - ref_lod[num]);
if (m == 0 || n == 0) {
distance = ::max(m, n);
if (normalized) {
PADDLE_ENFORCE(n > 0,
"The reference string (#%d) cannot be empty "
"when Attr(normalized) is enabled.",
n);
distance = distance / n;
}
memory::Copy(boost::get<Place>(ctx.GetPlace()), out + num,
platform::CPUPlace(), &distance, sizeof(T), stream);
} else {
framework::Tensor dist_t;
dist_t.Resize({m + 1, n + 1});
dist_t.mutable_data<T>(ctx.GetPlace());
auto dist = dist_t.data<T>();
auto x1 = x1_t->data<int64_t>() + hyp_lod[num];
auto x2 = x2_t->data<int64_t>() + ref_lod[num];
hipLaunchKernelGGL(( FillFirstColumn<T>), dim3(1 + m / PADDLE_CUDA_NUM_THREADS),
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, dist, m, n);
hipLaunchKernelGGL(( FillFirstRow<T>), dim3(1 + n / PADDLE_CUDA_NUM_THREADS),
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, dist, n);
// Compute the elements of distance matrix in the anti-diagonal diretion
for (int64_t slice = 2; slice < m + n + 1; ++slice) {
int z_m = slice < m + 1 ? 0 : slice - m;
int z_n = slice < n + 1 ? 0 : slice - n;
int size = slice - (z_m + z_n) + 1; // number of elments in the same
// anti-diagonal line to update
// the start index at which computes from
int start = slice < n + 1 ? slice : (z_n + 1) * (n + 1) - 1;
hipLaunchKernelGGL(( Levenshtein<T>), dim3(1 + (size - 1) / PADDLE_CUDA_NUM_THREADS),
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, dist, x1, x2,
m, n, start);
}
hipLaunchKernelGGL(( SetOutput<T>), dim3(1), dim3(1), 0, stream, out + num, dist, m, n, normalized);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
edit_distance,
ops::EditDistanceGPUKernel<paddle::platform::CUDAPlace, float>);
| acb6493ce69ea385fad7986be01a3fad19db1ef5.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/edit_distance_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/cuda_helper.h"
#include "paddle/fluid/platform/gpu_info.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T>
__global__ void FillFirstRow(T* dist, const int N) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N + 1) {
dist[idx] = idx;
}
}
template <typename T>
__global__ void FillFirstColumn(T* dist, const int M, const int N) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < M + 1) {
dist[idx * (N + 1)] = idx;
}
}
template <typename T>
__global__ void Levenshtein(T* dist, const int64_t* x1, const int64_t* x2,
const int M, const int N, const int start) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = N;
int index = start + idx * offset;
int row = index / (N + 1);
int col = index % (N + 1);
if (row > 0 && col > 0 && row < M + 1 && col < N + 1) {
int cost = x1[row - 1] == x2[col - 1] ? 0 : 1;
int dels = dist[(row - 1) * (N + 1) + col] + 1;
int ins = dist[row * (N + 1) + col - 1] + 1;
int subs = dist[(row - 1) * (N + 1) + (col - 1)] + cost;
dist[index] = min(dels, min(ins, subs));
}
}
template <typename T>
__global__ void SetOutput(T* out, const T* dist, const int M, const int N,
bool normalized) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx == 0) {
out[0] = normalized ? dist[M * (N + 1) + N] / N : dist[M * (N + 1) + N];
}
}
template <typename Place, typename T>
class EditDistanceGPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto* out_t = ctx.Output<framework::Tensor>("Out");
auto* x1_t = ctx.Input<framework::LoDTensor>("Hyps");
auto* x2_t = ctx.Input<framework::LoDTensor>("Refs");
auto* sequence_num = ctx.Output<framework::Tensor>("SequenceNum");
sequence_num->mutable_data<int64_t>(ctx.GetPlace());
auto normalized = ctx.Attr<bool>("normalized");
auto stream = reinterpret_cast<const platform::CUDADeviceContext&>(
ctx.device_context())
.stream();
auto hyp_lod = x1_t->lod()[0];
auto ref_lod = x2_t->lod()[0];
PADDLE_ENFORCE(
hyp_lod.size() == ref_lod.size(),
"Input(Hyps) and Input(Refs) must have the same batch size.");
for (size_t i = 1; i < ref_lod.size(); ++i) {
PADDLE_ENFORCE(ref_lod[i] > ref_lod[i - 1],
"Reference string %d is empty.", i);
}
const size_t num_strs = hyp_lod.size() - 1;
math::SetConstant<platform::CUDADeviceContext, int64_t> set_constant;
set_constant(ctx.template device_context<platform::CUDADeviceContext>(),
sequence_num, static_cast<int64_t>(num_strs));
out_t->Resize({static_cast<int64_t>(num_strs), 1});
out_t->mutable_data<T>(ctx.GetPlace());
auto out = out_t->data<T>();
T distance = 0.0;
for (size_t num = 0; num < num_strs; num++) {
auto m = static_cast<int64_t>(hyp_lod[num + 1] - hyp_lod[num]);
auto n = static_cast<int64_t>(ref_lod[num + 1] - ref_lod[num]);
if (m == 0 || n == 0) {
distance = std::max(m, n);
if (normalized) {
PADDLE_ENFORCE(n > 0,
"The reference string (#%d) cannot be empty "
"when Attr(normalized) is enabled.",
n);
distance = distance / n;
}
memory::Copy(boost::get<Place>(ctx.GetPlace()), out + num,
platform::CPUPlace(), &distance, sizeof(T), stream);
} else {
framework::Tensor dist_t;
dist_t.Resize({m + 1, n + 1});
dist_t.mutable_data<T>(ctx.GetPlace());
auto dist = dist_t.data<T>();
auto x1 = x1_t->data<int64_t>() + hyp_lod[num];
auto x2 = x2_t->data<int64_t>() + ref_lod[num];
FillFirstColumn<T><<<1 + m / PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(dist, m, n);
FillFirstRow<T><<<1 + n / PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(dist, n);
// Compute the elements of distance matrix in the anti-diagonal diretion
for (int64_t slice = 2; slice < m + n + 1; ++slice) {
int z_m = slice < m + 1 ? 0 : slice - m;
int z_n = slice < n + 1 ? 0 : slice - n;
int size = slice - (z_m + z_n) + 1; // number of elments in the same
// anti-diagonal line to update
// the start index at which computes from
int start = slice < n + 1 ? slice : (z_n + 1) * (n + 1) - 1;
Levenshtein<T><<<1 + (size - 1) / PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(dist, x1, x2,
m, n, start);
}
SetOutput<T><<<1, 1, 0, stream>>>(out + num, dist, m, n, normalized);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
edit_distance,
ops::EditDistanceGPUKernel<paddle::platform::CUDAPlace, float>);
|
41dc5ec2b726b39152ccf236c632ebceffe3f2f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10) {
if (comp == (+1.1633E-35f / ceilf(var_3 / +1.3331E35f * var_4))) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
comp = floorf(-1.1702E36f);
for (int i=0; i < var_5; ++i) {
comp = logf((+0.0f * (var_6 / (var_7 - -0.0f / -1.0583E13f))));
}
if (comp == (-1.2608E34f - var_8)) {
float tmp_1 = +1.3991E35f;
float tmp_2 = +0.0f / (+1.4080E-43f + var_9);
comp = tmp_2 / tmp_1 / sqrtf((-1.7990E36f / var_10));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11);
hipDeviceSynchronize();
return 0;
}
| 41dc5ec2b726b39152ccf236c632ebceffe3f2f5.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10) {
if (comp == (+1.1633E-35f / ceilf(var_3 / +1.3331E35f * var_4))) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
comp = floorf(-1.1702E36f);
for (int i=0; i < var_5; ++i) {
comp = logf((+0.0f * (var_6 / (var_7 - -0.0f / -1.0583E13f))));
}
if (comp == (-1.2608E34f - var_8)) {
float tmp_1 = +1.3991E35f;
float tmp_2 = +0.0f / (+1.4080E-43f + var_9);
comp = tmp_2 / tmp_1 / sqrtf((-1.7990E36f / var_10));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11);
cudaDeviceSynchronize();
return 0;
}
|
dfd991c96ae1a2ad803813a957786f1a2fe00f93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <cassert>
#include <zlib.h>
#include <png.h>
#define MASK_N 2
#define MASK_X 5
#define MASK_Y 5
#define SCALE 8
/* Hint 7 */
// this variable is used by device
__device__ int mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
int read_png(const char* filename, unsigned char** image, unsigned* height,
unsigned* width, unsigned* channels) {
unsigned char sig[8];
FILE* infile;
infile = fopen(filename, "rb");
fread(sig, 1, 8, infile);
if (!png_check_sig(sig, 8))
return 1; /* bad signature */
png_structp png_ptr;
png_infop info_ptr;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png_ptr)
return 4; /* out of memory */
info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_read_struct(&png_ptr, NULL, NULL);
return 4; /* out of memory */
}
png_init_io(png_ptr, infile);
png_set_sig_bytes(png_ptr, 8);
png_read_info(png_ptr, info_ptr);
int bit_depth, color_type;
png_get_IHDR(png_ptr, info_ptr, width, height, &bit_depth, &color_type, NULL, NULL, NULL);
png_uint_32 i, rowbytes;
png_bytep row_pointers[*height];
png_read_update_info(png_ptr, info_ptr);
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
*channels = (int) png_get_channels(png_ptr, info_ptr);
if ((*image = (unsigned char *) malloc(rowbytes * *height)) == NULL) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return 3;
}
for (i = 0; i < *height; ++i)
row_pointers[i] = *image + i * rowbytes;
png_read_image(png_ptr, row_pointers);
png_read_end(png_ptr, NULL);
return 0;
}
void write_png(const char* filename, png_bytep image, const unsigned height, const unsigned width,
const unsigned channels) {
FILE* fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr = png_create_info_struct(png_ptr);
png_init_io(png_ptr, fp);
png_set_IHDR(png_ptr, info_ptr, width, height, 8,
PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_filter(png_ptr, 0, PNG_NO_FILTERS);
png_write_info(png_ptr, info_ptr);
png_set_compression_level(png_ptr, 1);
png_bytep row_ptr[height];
for (int i = 0; i < height; ++ i) {
row_ptr[i] = image + i * width * channels * sizeof(unsigned char);
}
png_write_image(png_ptr, row_ptr);
png_write_end(png_ptr, NULL);
png_destroy_write_struct(&png_ptr, &info_ptr);
fclose(fp);
}
/* Hint 5 */
// this function is called by host and executed by device
__global__ void sobel (unsigned char* s, unsigned char* t, unsigned *h, unsigned *w, unsigned *c) {
int x, y, i, v, u;
int R, G, B;
double val[MASK_N*3] = {0.0};
int adjustX, adjustY, xBound, yBound;
int width = *w;
int height = *h;
int channels = *c;
y = blockDim.x * blockIdx.x + threadIdx.x ;
// x = threadIdx.x;
// i = threadIdx.y;
if(y >= height){ return;}
/* Hint 6 */
// parallel job by blockIdx, blockDim, threadIdx
// for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
for (i = 0; i < MASK_N; ++i) {
adjustX = (MASK_X % 2) ? 1 : 0;
adjustY = (MASK_Y % 2) ? 1 : 0;
xBound = MASK_X /2;
yBound = MASK_Y /2;
val[i*3+2] = 0.0;
val[i*3+1] = 0.0;
val[i*3] = 0.0;
for (v = -yBound; v < yBound + adjustY; ++v) {
for (u = -xBound; u < xBound + adjustX; ++u) {
if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
R = s[channels * (width * (y+v) + (x+u)) + 2];
G = s[channels * (width * (y+v) + (x+u)) + 1];
B = s[channels * (width * (y+v) + (x+u)) + 0];
val[i*3+2] += R * mask[i][u + xBound][v + yBound];
val[i*3+1] += G * mask[i][u + xBound][v + yBound];
val[i*3+0] += B * mask[i][u + xBound][v + yBound];
}
}
}
}
double totalR = 0.0;
double totalG = 0.0;
double totalB = 0.0;
for (i = 0; i < MASK_N; ++i) {
totalR += val[i * 3 + 2] * val[i * 3 + 2];
totalG += val[i * 3 + 1] * val[i * 3 + 1];
totalB += val[i * 3 + 0] * val[i * 3 + 0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = (totalR > 255.0) ? 255 : totalR;
const unsigned char cG = (totalG > 255.0) ? 255 : totalG;
const unsigned char cB = (totalB > 255.0) ? 255 : totalB;
t[channels * (width * y + x) + 2] = cR;
t[channels * (width * y + x) + 1] = cG;
t[channels * (width * y + x) + 0] = cB;
// __syncthreads();
}
// }
}
int main(int argc, char** argv) {
assert(argc == 3);
unsigned height, width, channels;
unsigned char* host_s = NULL;
read_png(argv[1], &host_s, &height, &width, &channels);
unsigned char* host_t = (unsigned char*) malloc(height * width * channels * sizeof(unsigned char));
/* Hint 1 */
// hipMalloc(...) for device src and device dst
unsigned *d_h, *d_w, *d_c;
unsigned char *h_s, *h_t;
// int *d_mask = [MASK_N * MASK_X * MASK_Y];
hipMalloc((void **) &d_h, sizeof(unsigned));
hipMalloc((void **) &d_w, sizeof(unsigned));
hipMalloc((void **) &d_c, sizeof(unsigned));
hipMalloc((void **) &h_s, height * width * channels * sizeof(unsigned char));
hipMalloc((void **) &h_t, height * width * channels * sizeof(unsigned char));
// hipMalloc((void **) &d_mask, sizeof(MASK_N * MASK_X * MASK_Y * sizeof(int)));
/* Hint 2 */
// hipMemcpy(...) copy source image to device (filter matrix if necessary)
hipMemcpy(d_h, &height, sizeof(unsigned), hipMemcpyHostToDevice);
hipMemcpy(d_w, &width, sizeof(unsigned), hipMemcpyHostToDevice);
hipMemcpy(d_c, &channels, sizeof(unsigned), hipMemcpyHostToDevice);
hipMemcpy(h_s, host_s, height * width * channels * sizeof(unsigned char), hipMemcpyHostToDevice);
// hipMemcpy(h_t, &host_t, height * width * channels * sizeof(unsigned char), hipMemcpyHostToDevice);
/* Hint 3 */
// acclerate this function
int n_threads = 64;
int n_blocks = height / n_threads + 1;
// dim3 threadsPerBlock(width, MASK_N);
hipLaunchKernelGGL(( sobel), dim3(n_blocks), dim3(n_threads) , 0, 0, h_s, h_t, d_h, d_w, d_c);
/* Hint 4 */
// hipMemcpy(...) copy result image to host
hipMemcpy(host_t, h_t, height * width * channels * sizeof(unsigned char), hipMemcpyDeviceToHost);
write_png(argv[2], host_t, height, width, channels);
hipFree(d_c);
hipFree(d_w);
hipFree(d_h);
hipFree(h_s);
hipFree(h_t);
return 0;
}
| dfd991c96ae1a2ad803813a957786f1a2fe00f93.cu | #include <iostream>
#include <cstdlib>
#include <cassert>
#include <zlib.h>
#include <png.h>
#define MASK_N 2
#define MASK_X 5
#define MASK_Y 5
#define SCALE 8
/* Hint 7 */
// this variable is used by device
__device__ int mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
int read_png(const char* filename, unsigned char** image, unsigned* height,
unsigned* width, unsigned* channels) {
unsigned char sig[8];
FILE* infile;
infile = fopen(filename, "rb");
fread(sig, 1, 8, infile);
if (!png_check_sig(sig, 8))
return 1; /* bad signature */
png_structp png_ptr;
png_infop info_ptr;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png_ptr)
return 4; /* out of memory */
info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_read_struct(&png_ptr, NULL, NULL);
return 4; /* out of memory */
}
png_init_io(png_ptr, infile);
png_set_sig_bytes(png_ptr, 8);
png_read_info(png_ptr, info_ptr);
int bit_depth, color_type;
png_get_IHDR(png_ptr, info_ptr, width, height, &bit_depth, &color_type, NULL, NULL, NULL);
png_uint_32 i, rowbytes;
png_bytep row_pointers[*height];
png_read_update_info(png_ptr, info_ptr);
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
*channels = (int) png_get_channels(png_ptr, info_ptr);
if ((*image = (unsigned char *) malloc(rowbytes * *height)) == NULL) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return 3;
}
for (i = 0; i < *height; ++i)
row_pointers[i] = *image + i * rowbytes;
png_read_image(png_ptr, row_pointers);
png_read_end(png_ptr, NULL);
return 0;
}
void write_png(const char* filename, png_bytep image, const unsigned height, const unsigned width,
const unsigned channels) {
FILE* fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr = png_create_info_struct(png_ptr);
png_init_io(png_ptr, fp);
png_set_IHDR(png_ptr, info_ptr, width, height, 8,
PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_filter(png_ptr, 0, PNG_NO_FILTERS);
png_write_info(png_ptr, info_ptr);
png_set_compression_level(png_ptr, 1);
png_bytep row_ptr[height];
for (int i = 0; i < height; ++ i) {
row_ptr[i] = image + i * width * channels * sizeof(unsigned char);
}
png_write_image(png_ptr, row_ptr);
png_write_end(png_ptr, NULL);
png_destroy_write_struct(&png_ptr, &info_ptr);
fclose(fp);
}
/* Hint 5 */
// this function is called by host and executed by device
__global__ void sobel (unsigned char* s, unsigned char* t, unsigned *h, unsigned *w, unsigned *c) {
int x, y, i, v, u;
int R, G, B;
double val[MASK_N*3] = {0.0};
int adjustX, adjustY, xBound, yBound;
int width = *w;
int height = *h;
int channels = *c;
y = blockDim.x * blockIdx.x + threadIdx.x ;
// x = threadIdx.x;
// i = threadIdx.y;
if(y >= height){ return;}
/* Hint 6 */
// parallel job by blockIdx, blockDim, threadIdx
// for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
for (i = 0; i < MASK_N; ++i) {
adjustX = (MASK_X % 2) ? 1 : 0;
adjustY = (MASK_Y % 2) ? 1 : 0;
xBound = MASK_X /2;
yBound = MASK_Y /2;
val[i*3+2] = 0.0;
val[i*3+1] = 0.0;
val[i*3] = 0.0;
for (v = -yBound; v < yBound + adjustY; ++v) {
for (u = -xBound; u < xBound + adjustX; ++u) {
if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
R = s[channels * (width * (y+v) + (x+u)) + 2];
G = s[channels * (width * (y+v) + (x+u)) + 1];
B = s[channels * (width * (y+v) + (x+u)) + 0];
val[i*3+2] += R * mask[i][u + xBound][v + yBound];
val[i*3+1] += G * mask[i][u + xBound][v + yBound];
val[i*3+0] += B * mask[i][u + xBound][v + yBound];
}
}
}
}
double totalR = 0.0;
double totalG = 0.0;
double totalB = 0.0;
for (i = 0; i < MASK_N; ++i) {
totalR += val[i * 3 + 2] * val[i * 3 + 2];
totalG += val[i * 3 + 1] * val[i * 3 + 1];
totalB += val[i * 3 + 0] * val[i * 3 + 0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = (totalR > 255.0) ? 255 : totalR;
const unsigned char cG = (totalG > 255.0) ? 255 : totalG;
const unsigned char cB = (totalB > 255.0) ? 255 : totalB;
t[channels * (width * y + x) + 2] = cR;
t[channels * (width * y + x) + 1] = cG;
t[channels * (width * y + x) + 0] = cB;
// __syncthreads();
}
// }
}
int main(int argc, char** argv) {
assert(argc == 3);
unsigned height, width, channels;
unsigned char* host_s = NULL;
read_png(argv[1], &host_s, &height, &width, &channels);
unsigned char* host_t = (unsigned char*) malloc(height * width * channels * sizeof(unsigned char));
/* Hint 1 */
// cudaMalloc(...) for device src and device dst
unsigned *d_h, *d_w, *d_c;
unsigned char *h_s, *h_t;
// int *d_mask = [MASK_N * MASK_X * MASK_Y];
cudaMalloc((void **) &d_h, sizeof(unsigned));
cudaMalloc((void **) &d_w, sizeof(unsigned));
cudaMalloc((void **) &d_c, sizeof(unsigned));
cudaMalloc((void **) &h_s, height * width * channels * sizeof(unsigned char));
cudaMalloc((void **) &h_t, height * width * channels * sizeof(unsigned char));
// cudaMalloc((void **) &d_mask, sizeof(MASK_N * MASK_X * MASK_Y * sizeof(int)));
/* Hint 2 */
// cudaMemcpy(...) copy source image to device (filter matrix if necessary)
cudaMemcpy(d_h, &height, sizeof(unsigned), cudaMemcpyHostToDevice);
cudaMemcpy(d_w, &width, sizeof(unsigned), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, &channels, sizeof(unsigned), cudaMemcpyHostToDevice);
cudaMemcpy(h_s, host_s, height * width * channels * sizeof(unsigned char), cudaMemcpyHostToDevice);
// cudaMemcpy(h_t, &host_t, height * width * channels * sizeof(unsigned char), cudaMemcpyHostToDevice);
/* Hint 3 */
// acclerate this function
int n_threads = 64;
int n_blocks = height / n_threads + 1;
// dim3 threadsPerBlock(width, MASK_N);
sobel<<< n_blocks, n_threads >>>(h_s, h_t, d_h, d_w, d_c);
/* Hint 4 */
// cudaMemcpy(...) copy result image to host
cudaMemcpy(host_t, h_t, height * width * channels * sizeof(unsigned char), cudaMemcpyDeviceToHost);
write_png(argv[2], host_t, height, width, channels);
cudaFree(d_c);
cudaFree(d_w);
cudaFree(d_h);
cudaFree(h_s);
cudaFree(h_t);
return 0;
}
|
ce629ed44682220373c63bd1b9dd938ec4c5a99e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/MultiMarginCriterion.cu"
#else
// TODO: improve error messages
void THNN_(MultiMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
int64_t reduction,
int p,
THCTensor *weights,
accreal margin_)
{
real margin = ScalarConvert<accreal, real>::to(margin_);
THCUNN_assertSameGPU(state, 2, input, target);
input = THCTensor_(newContiguous)(state, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (input->dim() == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
THCTensor_(resize1d)(state, output, 1);
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, input->size(0),
reduction == Reduction::ElementwiseMean,
margin
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, input->size(0),
reduction == Reduction::ElementwiseMean,
margin
);
}
THCudaCheck(hipGetLastError());
}
else if (input->dim() == 2)
{
int nframe = input->size(0);
THArgCheck(!target->is_empty() && (target->dim() == 1) && (target->size(0) == nframe), 3,
"inconsistent target size");
dim3 blocks(input->size(0));
dim3 threads(MULTIMARGIN_THREADS);
if (reduction == Reduction::None)
{
THCTensor_(resize1d)(state, output, input->size(0));
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
false,
margin
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
false,
margin
);
}
THCudaCheck(hipGetLastError());
}
else
{
THCTensor_(resize1d)(state, output, 1);
THCTensor *output_ = THCTensor_(newWithSize1d)(state, input->size(0)); // tmp output buffer
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
reduction == Reduction::ElementwiseMean,
margin
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
input->size(0), input->size(1),
reduction == Reduction::ElementwiseMean,
margin
);
}
THCudaCheck(hipGetLastError());
float sum = THCTensor_(sumall)(state, output_);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, real>::to(sum));
THCTensor_(free)(state, output_);
}
}
else
{
AT_ERROR("non-empty vector or matrix expected, got sizes: ", input->sizes());
}
THCTensor_(free)(state, input);
if(weights)
THCTensor_(free)(state, weights);
}
void THNN_(MultiMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction,
int p,
THCTensor *weights,
accreal margin_)
{
real margin = ScalarConvert<accreal, real>::to(margin_);
THCUNN_assertSameGPU(state, 3, input, gradInput, target);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (input->dim() == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<1, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, gradInput->size(0),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<2, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, gradInput->size(0),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
THCudaCheck(hipGetLastError());
}
else if (input->dim() == 2)
{
int nframe = gradInput->size(0);
THArgCheck(!target->is_empty() && (target->dim() == 1) && (target->size(0) == nframe), 3,
"inconsistent target size");
dim3 blocks(gradInput->size(0));
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<1, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size(1),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<2, real, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size(1),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
THCudaCheck(hipGetLastError());
}
else
{
AT_ERROR("non-empty vector or matrix expected, got ", input->sizes());
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
if(weights)
THCTensor_(free)(state, weights);
}
#endif
| ce629ed44682220373c63bd1b9dd938ec4c5a99e.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/MultiMarginCriterion.cu"
#else
// TODO: improve error messages
void THNN_(MultiMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
int64_t reduction,
int p,
THCTensor *weights,
accreal margin_)
{
real margin = ScalarConvert<accreal, real>::to(margin_);
THCUNN_assertSameGPU(state, 2, input, target);
input = THCTensor_(newContiguous)(state, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (input->dim() == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
THCTensor_(resize1d)(state, output, 1);
if (p == 1)
{
cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, input->size(0),
reduction == Reduction::ElementwiseMean,
margin
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, input->size(0),
reduction == Reduction::ElementwiseMean,
margin
);
}
THCudaCheck(cudaGetLastError());
}
else if (input->dim() == 2)
{
int nframe = input->size(0);
THArgCheck(!target->is_empty() && (target->dim() == 1) && (target->size(0) == nframe), 3,
"inconsistent target size");
dim3 blocks(input->size(0));
dim3 threads(MULTIMARGIN_THREADS);
if (reduction == Reduction::None)
{
THCTensor_(resize1d)(state, output, input->size(0));
if (p == 1)
{
cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
false,
margin
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
false,
margin
);
}
THCudaCheck(cudaGetLastError());
}
else
{
THCTensor_(resize1d)(state, output, 1);
THCTensor *output_ = THCTensor_(newWithSize1d)(state, input->size(0)); // tmp output buffer
if (p == 1)
{
cunn_MultiMarginCriterion_updateOutput_kernel<1, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
reduction == Reduction::ElementwiseMean,
margin
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateOutput_kernel<2, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
input->size(0), input->size(1),
reduction == Reduction::ElementwiseMean,
margin
);
}
THCudaCheck(cudaGetLastError());
float sum = THCTensor_(sumall)(state, output_);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, real>::to(sum));
THCTensor_(free)(state, output_);
}
}
else
{
AT_ERROR("non-empty vector or matrix expected, got sizes: ", input->sizes());
}
THCTensor_(free)(state, input);
if(weights)
THCTensor_(free)(state, weights);
}
void THNN_(MultiMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction,
int p,
THCTensor *weights,
accreal margin_)
{
real margin = ScalarConvert<accreal, real>::to(margin_);
THCUNN_assertSameGPU(state, 3, input, gradInput, target);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (input->dim() == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<1, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, gradInput->size(0),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<2, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, gradInput->size(0),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
THCudaCheck(cudaGetLastError());
}
else if (input->dim() == 2)
{
int nframe = gradInput->size(0);
THArgCheck(!target->is_empty() && (target->dim() == 1) && (target->size(0) == nframe), 3,
"inconsistent target size");
dim3 blocks(gradInput->size(0));
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<1, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size(1),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<2, real, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size(1),
reduction == Reduction::ElementwiseMean,
margin,
reduction != Reduction::None
);
}
THCudaCheck(cudaGetLastError());
}
else
{
AT_ERROR("non-empty vector or matrix expected, got ", input->sizes());
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
if(weights)
THCTensor_(free)(state, weights);
}
#endif
|
223e956fac7b65939793ae7d86861e746cb1709b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2021-2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/binary/binary_op.h"
#include "cunumeric/binary/binary_op_template.inl"
#include "cunumeric/cuda_help.h"
namespace cunumeric {
template <typename Function, typename LHS, typename RHS1, typename RHS2>
static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
dense_kernel(size_t volume, Function func, LHS* out, const RHS1* in1, const RHS2* in2)
{
const size_t idx = global_tid_1d();
if (idx >= volume) return;
out[idx] = func(in1[idx], in2[idx]);
}
template <typename Function,
typename WriteAcc,
typename ReadAcc1,
typename ReadAcc2,
typename Pitches,
typename Rect>
static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
generic_kernel(size_t volume,
Function func,
WriteAcc out,
ReadAcc1 in1,
ReadAcc2 in2,
Pitches pitches,
Rect rect)
{
const size_t idx = global_tid_1d();
if (idx >= volume) return;
auto point = pitches.unflatten(idx, rect.lo);
out[point] = func(in1[point], in2[point]);
}
template <BinaryOpCode OP_CODE, Type::Code CODE, int DIM>
struct BinaryOpImplBody<VariantKind::GPU, OP_CODE, CODE, DIM> {
using OP = BinaryOp<OP_CODE, CODE>;
using RHS1 = legate_type_of<CODE>;
using RHS2 = rhs2_of_binary_op<OP_CODE, CODE>;
using LHS = std::result_of_t<OP(RHS1, RHS2)>;
void operator()(OP func,
AccessorWO<LHS, DIM> out,
AccessorRO<RHS1, DIM> in1,
AccessorRO<RHS2, DIM> in2,
const Pitches<DIM - 1>& pitches,
const Rect<DIM>& rect,
bool dense) const
{
size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
auto stream = get_cached_stream();
if (dense) {
auto outptr = out.ptr(rect);
auto in1ptr = in1.ptr(rect);
auto in2ptr = in2.ptr(rect);
hipLaunchKernelGGL(( dense_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream, volume, func, outptr, in1ptr, in2ptr);
} else {
hipLaunchKernelGGL(( generic_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, stream,
volume, func, out, in1, in2, pitches, rect);
}
CHECK_CUDA_STREAM(stream);
}
};
/*static*/ void BinaryOpTask::gpu_variant(TaskContext& context)
{
binary_op_template<VariantKind::GPU>(context);
}
} // namespace cunumeric
| 223e956fac7b65939793ae7d86861e746cb1709b.cu | /* Copyright 2021-2022 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cunumeric/binary/binary_op.h"
#include "cunumeric/binary/binary_op_template.inl"
#include "cunumeric/cuda_help.h"
namespace cunumeric {
template <typename Function, typename LHS, typename RHS1, typename RHS2>
static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
dense_kernel(size_t volume, Function func, LHS* out, const RHS1* in1, const RHS2* in2)
{
const size_t idx = global_tid_1d();
if (idx >= volume) return;
out[idx] = func(in1[idx], in2[idx]);
}
template <typename Function,
typename WriteAcc,
typename ReadAcc1,
typename ReadAcc2,
typename Pitches,
typename Rect>
static __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
generic_kernel(size_t volume,
Function func,
WriteAcc out,
ReadAcc1 in1,
ReadAcc2 in2,
Pitches pitches,
Rect rect)
{
const size_t idx = global_tid_1d();
if (idx >= volume) return;
auto point = pitches.unflatten(idx, rect.lo);
out[point] = func(in1[point], in2[point]);
}
template <BinaryOpCode OP_CODE, Type::Code CODE, int DIM>
struct BinaryOpImplBody<VariantKind::GPU, OP_CODE, CODE, DIM> {
using OP = BinaryOp<OP_CODE, CODE>;
using RHS1 = legate_type_of<CODE>;
using RHS2 = rhs2_of_binary_op<OP_CODE, CODE>;
using LHS = std::result_of_t<OP(RHS1, RHS2)>;
void operator()(OP func,
AccessorWO<LHS, DIM> out,
AccessorRO<RHS1, DIM> in1,
AccessorRO<RHS2, DIM> in2,
const Pitches<DIM - 1>& pitches,
const Rect<DIM>& rect,
bool dense) const
{
size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
auto stream = get_cached_stream();
if (dense) {
auto outptr = out.ptr(rect);
auto in1ptr = in1.ptr(rect);
auto in2ptr = in2.ptr(rect);
dense_kernel<<<blocks, THREADS_PER_BLOCK, 0, stream>>>(volume, func, outptr, in1ptr, in2ptr);
} else {
generic_kernel<<<blocks, THREADS_PER_BLOCK, 0, stream>>>(
volume, func, out, in1, in2, pitches, rect);
}
CHECK_CUDA_STREAM(stream);
}
};
/*static*/ void BinaryOpTask::gpu_variant(TaskContext& context)
{
binary_op_template<VariantKind::GPU>(context);
}
} // namespace cunumeric
|
34dbe0933bc5fb5b2d758442a533227be569dd43.hip | // !!! This is a file automatically generated by hipify!!!
#define NO_QT
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <iostream>
#include <render/mlmRender/mlmRender.h>
#include <sstream>
#include <texture_types.h>
#include <utility/math.h>
#include <vector_functions.h>
#include <hip/hip_vector_types.h>
//#define DEBUG_AABB
#pragma region MACROS
#define SMRAY rayCache[threadIdx.x]
#define IDRAY rayid[threadIdx.x]
#define IDRAY2D rayid[threadIdx.x + blockDim.x * threadIdx.y]
#ifdef __INTELLISENSE__
#define gridDim int3{32,1,1}
#define blockDim int3{32,1,1}
#define threadIdx int3{0,0,0}
#define blockIdx int3{0,0,0}
#endif
#define SMRAY_DIR_X SMRAY.direction.x
#define SMRAY_DIR_Y SMRAY.direction.y
#define SMRAY_DIR_Z SMRAY.direction.z
#define SMRAY_ORIG_X SMRAY.origin.x
#define SMRAY_ORIG_Y SMRAY.origin.y
#define SMRAY_ORIG_Z SMRAY.origin.z
#define SMRAY_IDX SMRAY.index
#define SMRAY_BOUNCES SMRAY.bounces
#define SMRAY_DEPTH SMRAY.depth
#define SMRAY_MASK_X SMRAY.mask.x
#define SMRAY_MASK_Y SMRAY.mask.y
#define SMRAY_MASK_Z SMRAY.mask.z
#define SMRAY_DIR SMRAY.direction
#define SMRAY_ORIG SMRAY.origin
#define SMRAY_MASK SMRAY.mask
#define IDRAY_DIR_X IDRAY.direction.x
#define IDRAY_DIR_Y IDRAY.direction.y
#define IDRAY_DIR_Z IDRAY.direction.z
#define IDRAY_ORIG_X IDRAY.origin.x
#define IDRAY_ORIG_Y IDRAY.origin.y
#define IDRAY_ORIG_Z IDRAY.origin.z
#define IDRAY_IDX IDRAY.index
#define IDRAY_BOUNCES IDRAY.bounces
#define IDRAY_DEPTH IDRAY.depth
#define IDRAY_DIR IDRAY.direction
#define IDRAY_ORIG IDRAY.origin
#define GET_NEXT_RAY {if(!scheduler::grabRay()) return; else continue;}
#define GET_NEXT_IDRAY {if(!scheduler::grabIDRay()) return; else continue;}
#pragma endregion
namespace rtx {
enum Refl_t { DIFF, METAL, SPEC, REFR, COAT };
struct uint10_3 {
uint32_t x : 10;
uint32_t y : 10;
uint32_t z : 10;
uint32_t valid : 1;
};
struct Ray {
float3 orig, dir;
};
struct ScheduledRay {
float3 origin;
int32_t index;
float3 direction;
float depth;
float3 mask;
int32_t bounces;
};
struct RayWithIndex {
float3 origin;
float3 direction;
int32_t index;
};
struct RayWithIndexAndDepth {
float3 origin;
float depth;
float3 direction;
uint32_t index : 24;
uint32_t bounces : 4;
uint32_t geomType : 4;
};
struct FluidIntersection {
float3 normal;
float depth = 1e21f;
};
struct RayIntersection {
float depth;
Refl_t materialType;
float3 surfaceColor;
float3 surfaceNormal;
float3 emission;
};
struct rayHit {
float3 position;
float depth;
float3 normal;
bool status;
int32_t voxelIdx;
};
struct rayHitLean {
int3 voxel;
float depth;
};
struct rayHitSuperLean {
float depth;
};
struct AABBHit {
bool hit;
float tmin;
float tmax;
};
struct Pixel {
float3 color;
float3 mask;
};
struct bounceRay {
int32_t pixelIdx;
float3 orig;
float3 dir;
};
struct Sphere {
float rad;
float3 pos, emi, col;
Refl_t refl;
__device__ float intersect(const Ray &r) const {
float3 op = pos - r.orig; //
float t, epsilon = 0.01f;
float b = math::dot(op, r.dir);
float disc = b * b - math::dot(op, op) + rad * rad; // discriminant
if (disc < 0)
return 0;
else
disc = sqrtf(disc);
return (t = b - disc) > epsilon ? t : ((t = b + disc) > epsilon ? t : 0);
}
__device__ float intersectSM() const;
__device__ float intersectID() const;
__device__ float intersectID2D() const;
};
hiprandState_t* cuRandStates;
Pixel* cuImage;
Ray* cuCurrentRays;
RayIntersection* cuRayIntersections;
RayWithIndex* cuScheduledRays;
FluidIntersection* cuFluidIntersections;
RayWithIndexAndDepth* cuBlockedRays;
int32_t* rayCounter;
uint32_t* cRNGSeeds;
__device__ __constant__ SceneInformation cScene;
__device__ __constant__ FluidSystem fluidSystem;
__device__ __constant__ FluidMemory fluidMemory;
__device__ __constant__ Sphere spheres[] = {
//{16, {128.0f, 128, 128}, {6, 4, 2}, {0.f, 0.f, 0.f}, DIFF},
{10000, {50.0f, 40.8f, -1060.f}, {0.55f, 0.55f, 0.55f}, {0.175f, 0.175f, 0.175f}, DIFF},
//{100000, {0.0f, 0, -100000.}, {0, 0, 0}, {0.5f, 0.0f, 0.0f}, DIFF},
//{100000, {0.0f, 0, -100000.1}, {0, 0, 0}, {0.3f, 0.3f, 0.3f}, DIFF}
};
__device__ __constant__ int32_t cNumRays;
__device__ __constant__ RayWithIndex* cRays;
__device__ __constant__ RayWithIndexAndDepth* cRaysDepth;
__device__ __constant__ int32_t* cRayCounter;
__device__ __constant__ Pixel* cImage;
__device__ __constant__ hiprandState_t* cRandStates;
__device__ __constant__ FluidIntersection* cFluidIntersections;
__device__ __constant__ uint32_t* cuSeeds;
__device__ __constant__ int32_t cMsaaRate;
#pragma region MORTON_LUT
__device__ __constant__ int32_t morton256_x[256] = {
0x00000000,
0x00000001, 0x00000008, 0x00000009, 0x00000040, 0x00000041, 0x00000048, 0x00000049, 0x00000200,
0x00000201, 0x00000208, 0x00000209, 0x00000240, 0x00000241, 0x00000248, 0x00000249, 0x00001000,
0x00001001, 0x00001008, 0x00001009, 0x00001040, 0x00001041, 0x00001048, 0x00001049, 0x00001200,
0x00001201, 0x00001208, 0x00001209, 0x00001240, 0x00001241, 0x00001248, 0x00001249, 0x00008000,
0x00008001, 0x00008008, 0x00008009, 0x00008040, 0x00008041, 0x00008048, 0x00008049, 0x00008200,
0x00008201, 0x00008208, 0x00008209, 0x00008240, 0x00008241, 0x00008248, 0x00008249, 0x00009000,
0x00009001, 0x00009008, 0x00009009, 0x00009040, 0x00009041, 0x00009048, 0x00009049, 0x00009200,
0x00009201, 0x00009208, 0x00009209, 0x00009240, 0x00009241, 0x00009248, 0x00009249, 0x00040000,
0x00040001, 0x00040008, 0x00040009, 0x00040040, 0x00040041, 0x00040048, 0x00040049, 0x00040200,
0x00040201, 0x00040208, 0x00040209, 0x00040240, 0x00040241, 0x00040248, 0x00040249, 0x00041000,
0x00041001, 0x00041008, 0x00041009, 0x00041040, 0x00041041, 0x00041048, 0x00041049, 0x00041200,
0x00041201, 0x00041208, 0x00041209, 0x00041240, 0x00041241, 0x00041248, 0x00041249, 0x00048000,
0x00048001, 0x00048008, 0x00048009, 0x00048040, 0x00048041, 0x00048048, 0x00048049, 0x00048200,
0x00048201, 0x00048208, 0x00048209, 0x00048240, 0x00048241, 0x00048248, 0x00048249, 0x00049000,
0x00049001, 0x00049008, 0x00049009, 0x00049040, 0x00049041, 0x00049048, 0x00049049, 0x00049200,
0x00049201, 0x00049208, 0x00049209, 0x00049240, 0x00049241, 0x00049248, 0x00049249, 0x00200000,
0x00200001, 0x00200008, 0x00200009, 0x00200040, 0x00200041, 0x00200048, 0x00200049, 0x00200200,
0x00200201, 0x00200208, 0x00200209, 0x00200240, 0x00200241, 0x00200248, 0x00200249, 0x00201000,
0x00201001, 0x00201008, 0x00201009, 0x00201040, 0x00201041, 0x00201048, 0x00201049, 0x00201200,
0x00201201, 0x00201208, 0x00201209, 0x00201240, 0x00201241, 0x00201248, 0x00201249, 0x00208000,
0x00208001, 0x00208008, 0x00208009, 0x00208040, 0x00208041, 0x00208048, 0x00208049, 0x00208200,
0x00208201, 0x00208208, 0x00208209, 0x00208240, 0x00208241, 0x00208248, 0x00208249, 0x00209000,
0x00209001, 0x00209008, 0x00209009, 0x00209040, 0x00209041, 0x00209048, 0x00209049, 0x00209200,
0x00209201, 0x00209208, 0x00209209, 0x00209240, 0x00209241, 0x00209248, 0x00209249, 0x00240000,
0x00240001, 0x00240008, 0x00240009, 0x00240040, 0x00240041, 0x00240048, 0x00240049, 0x00240200,
0x00240201, 0x00240208, 0x00240209, 0x00240240, 0x00240241, 0x00240248, 0x00240249, 0x00241000,
0x00241001, 0x00241008, 0x00241009, 0x00241040, 0x00241041, 0x00241048, 0x00241049, 0x00241200,
0x00241201, 0x00241208, 0x00241209, 0x00241240, 0x00241241, 0x00241248, 0x00241249, 0x00248000,
0x00248001, 0x00248008, 0x00248009, 0x00248040, 0x00248041, 0x00248048, 0x00248049, 0x00248200,
0x00248201, 0x00248208, 0x00248209, 0x00248240, 0x00248241, 0x00248248, 0x00248249, 0x00249000,
0x00249001, 0x00249008, 0x00249009, 0x00249040, 0x00249041, 0x00249048, 0x00249049, 0x00249200,
0x00249201, 0x00249208, 0x00249209, 0x00249240, 0x00249241, 0x00249248, 0x00249249
};
__device__ __constant__ int32_t morton256_y[256] = {
0x00000000,
0x00000002, 0x00000010, 0x00000012, 0x00000080, 0x00000082, 0x00000090, 0x00000092, 0x00000400,
0x00000402, 0x00000410, 0x00000412, 0x00000480, 0x00000482, 0x00000490, 0x00000492, 0x00002000,
0x00002002, 0x00002010, 0x00002012, 0x00002080, 0x00002082, 0x00002090, 0x00002092, 0x00002400,
0x00002402, 0x00002410, 0x00002412, 0x00002480, 0x00002482, 0x00002490, 0x00002492, 0x00010000,
0x00010002, 0x00010010, 0x00010012, 0x00010080, 0x00010082, 0x00010090, 0x00010092, 0x00010400,
0x00010402, 0x00010410, 0x00010412, 0x00010480, 0x00010482, 0x00010490, 0x00010492, 0x00012000,
0x00012002, 0x00012010, 0x00012012, 0x00012080, 0x00012082, 0x00012090, 0x00012092, 0x00012400,
0x00012402, 0x00012410, 0x00012412, 0x00012480, 0x00012482, 0x00012490, 0x00012492, 0x00080000,
0x00080002, 0x00080010, 0x00080012, 0x00080080, 0x00080082, 0x00080090, 0x00080092, 0x00080400,
0x00080402, 0x00080410, 0x00080412, 0x00080480, 0x00080482, 0x00080490, 0x00080492, 0x00082000,
0x00082002, 0x00082010, 0x00082012, 0x00082080, 0x00082082, 0x00082090, 0x00082092, 0x00082400,
0x00082402, 0x00082410, 0x00082412, 0x00082480, 0x00082482, 0x00082490, 0x00082492, 0x00090000,
0x00090002, 0x00090010, 0x00090012, 0x00090080, 0x00090082, 0x00090090, 0x00090092, 0x00090400,
0x00090402, 0x00090410, 0x00090412, 0x00090480, 0x00090482, 0x00090490, 0x00090492, 0x00092000,
0x00092002, 0x00092010, 0x00092012, 0x00092080, 0x00092082, 0x00092090, 0x00092092, 0x00092400,
0x00092402, 0x00092410, 0x00092412, 0x00092480, 0x00092482, 0x00092490, 0x00092492, 0x00400000,
0x00400002, 0x00400010, 0x00400012, 0x00400080, 0x00400082, 0x00400090, 0x00400092, 0x00400400,
0x00400402, 0x00400410, 0x00400412, 0x00400480, 0x00400482, 0x00400490, 0x00400492, 0x00402000,
0x00402002, 0x00402010, 0x00402012, 0x00402080, 0x00402082, 0x00402090, 0x00402092, 0x00402400,
0x00402402, 0x00402410, 0x00402412, 0x00402480, 0x00402482, 0x00402490, 0x00402492, 0x00410000,
0x00410002, 0x00410010, 0x00410012, 0x00410080, 0x00410082, 0x00410090, 0x00410092, 0x00410400,
0x00410402, 0x00410410, 0x00410412, 0x00410480, 0x00410482, 0x00410490, 0x00410492, 0x00412000,
0x00412002, 0x00412010, 0x00412012, 0x00412080, 0x00412082, 0x00412090, 0x00412092, 0x00412400,
0x00412402, 0x00412410, 0x00412412, 0x00412480, 0x00412482, 0x00412490, 0x00412492, 0x00480000,
0x00480002, 0x00480010, 0x00480012, 0x00480080, 0x00480082, 0x00480090, 0x00480092, 0x00480400,
0x00480402, 0x00480410, 0x00480412, 0x00480480, 0x00480482, 0x00480490, 0x00480492, 0x00482000,
0x00482002, 0x00482010, 0x00482012, 0x00482080, 0x00482082, 0x00482090, 0x00482092, 0x00482400,
0x00482402, 0x00482410, 0x00482412, 0x00482480, 0x00482482, 0x00482490, 0x00482492, 0x00490000,
0x00490002, 0x00490010, 0x00490012, 0x00490080, 0x00490082, 0x00490090, 0x00490092, 0x00490400,
0x00490402, 0x00490410, 0x00490412, 0x00490480, 0x00490482, 0x00490490, 0x00490492, 0x00492000,
0x00492002, 0x00492010, 0x00492012, 0x00492080, 0x00492082, 0x00492090, 0x00492092, 0x00492400,
0x00492402, 0x00492410, 0x00492412, 0x00492480, 0x00492482, 0x00492490, 0x00492492
};
__device__ __constant__ int32_t morton256_z[256] = {
0x00000000,
0x00000004, 0x00000020, 0x00000024, 0x00000100, 0x00000104, 0x00000120, 0x00000124, 0x00000800,
0x00000804, 0x00000820, 0x00000824, 0x00000900, 0x00000904, 0x00000920, 0x00000924, 0x00004000,
0x00004004, 0x00004020, 0x00004024, 0x00004100, 0x00004104, 0x00004120, 0x00004124, 0x00004800,
0x00004804, 0x00004820, 0x00004824, 0x00004900, 0x00004904, 0x00004920, 0x00004924, 0x00020000,
0x00020004, 0x00020020, 0x00020024, 0x00020100, 0x00020104, 0x00020120, 0x00020124, 0x00020800,
0x00020804, 0x00020820, 0x00020824, 0x00020900, 0x00020904, 0x00020920, 0x00020924, 0x00024000,
0x00024004, 0x00024020, 0x00024024, 0x00024100, 0x00024104, 0x00024120, 0x00024124, 0x00024800,
0x00024804, 0x00024820, 0x00024824, 0x00024900, 0x00024904, 0x00024920, 0x00024924, 0x00100000,
0x00100004, 0x00100020, 0x00100024, 0x00100100, 0x00100104, 0x00100120, 0x00100124, 0x00100800,
0x00100804, 0x00100820, 0x00100824, 0x00100900, 0x00100904, 0x00100920, 0x00100924, 0x00104000,
0x00104004, 0x00104020, 0x00104024, 0x00104100, 0x00104104, 0x00104120, 0x00104124, 0x00104800,
0x00104804, 0x00104820, 0x00104824, 0x00104900, 0x00104904, 0x00104920, 0x00104924, 0x00120000,
0x00120004, 0x00120020, 0x00120024, 0x00120100, 0x00120104, 0x00120120, 0x00120124, 0x00120800,
0x00120804, 0x00120820, 0x00120824, 0x00120900, 0x00120904, 0x00120920, 0x00120924, 0x00124000,
0x00124004, 0x00124020, 0x00124024, 0x00124100, 0x00124104, 0x00124120, 0x00124124, 0x00124800,
0x00124804, 0x00124820, 0x00124824, 0x00124900, 0x00124904, 0x00124920, 0x00124924, 0x00800000,
0x00800004, 0x00800020, 0x00800024, 0x00800100, 0x00800104, 0x00800120, 0x00800124, 0x00800800,
0x00800804, 0x00800820, 0x00800824, 0x00800900, 0x00800904, 0x00800920, 0x00800924, 0x00804000,
0x00804004, 0x00804020, 0x00804024, 0x00804100, 0x00804104, 0x00804120, 0x00804124, 0x00804800,
0x00804804, 0x00804820, 0x00804824, 0x00804900, 0x00804904, 0x00804920, 0x00804924, 0x00820000,
0x00820004, 0x00820020, 0x00820024, 0x00820100, 0x00820104, 0x00820120, 0x00820124, 0x00820800,
0x00820804, 0x00820820, 0x00820824, 0x00820900, 0x00820904, 0x00820920, 0x00820924, 0x00824000,
0x00824004, 0x00824020, 0x00824024, 0x00824100, 0x00824104, 0x00824120, 0x00824124, 0x00824800,
0x00824804, 0x00824820, 0x00824824, 0x00824900, 0x00824904, 0x00824920, 0x00824924, 0x00900000,
0x00900004, 0x00900020, 0x00900024, 0x00900100, 0x00900104, 0x00900120, 0x00900124, 0x00900800,
0x00900804, 0x00900820, 0x00900824, 0x00900900, 0x00900904, 0x00900920, 0x00900924, 0x00904000,
0x00904004, 0x00904020, 0x00904024, 0x00904100, 0x00904104, 0x00904120, 0x00904124, 0x00904800,
0x00904804, 0x00904820, 0x00904824, 0x00904900, 0x00904904, 0x00904920, 0x00904924, 0x00920000,
0x00920004, 0x00920020, 0x00920024, 0x00920100, 0x00920104, 0x00920120, 0x00920124, 0x00920800,
0x00920804, 0x00920820, 0x00920824, 0x00920900, 0x00920904, 0x00920920, 0x00920924, 0x00924000,
0x00924004, 0x00924020, 0x00924024, 0x00924100, 0x00924104, 0x00924120, 0x00924124, 0x00924800,
0x00924804, 0x00924820, 0x00924824, 0x00924900, 0x00924904, 0x00924920, 0x00924924
};
#pragma endregion
__shared__ extern RayWithIndexAndDepth rayid[];
__shared__ extern RayIntersection rayi[];
__shared__ extern Ray raySM[];
__shared__ extern ScheduledRay rayCache[];
__shared__ extern float SMCache[];
surface<void, cudaSurfaceType2D> surfaceWriteOut;
__device__ __host__ __inline__ float sgn(float x) {
return x > 0.f ? 1.f : (x < 0.f ? -1.f : 0.f);
}
__device__ auto idx3D_to_mortonLUT(uint10_3 idx) {
return morton256_x[idx.x] | morton256_y[idx.y] | morton256_z[idx.z];
}
__device__ auto idx3D_to_mortonLUT(int3 idx) {
return morton256_x[idx.x] | morton256_y[idx.y] | morton256_z[idx.z];
}
template<typename T, typename U>
__device__ auto position_to_mortonLUT(T p, U& arrays, float factor = 1.f) {
return idx3D_to_mortonLUT(position_to_idx3D_i(p, arrays.min_coord, math::unit_get<1>(arrays.cell_size) * factor));
}
__device__ auto idx3D_to_hash(uint10_3 idx, uint32_t hash_entries) {
return (idx.x * 73856093 + idx.y * 19349663 + idx.z * 83492791) % hash_entries;
}
__device__ float Sphere::intersectSM() const {
float3 op = pos - SMRAY_ORIG; //
float t, epsilon = 0.01f;
float b = math::dot(op, SMRAY_DIR);
float disc = b * b - math::dot(op, op) + rad * rad; // discriminant
if (disc < 0)
return 0;
else
disc = sqrtf(disc);
return (t = b - disc) > epsilon ? t : ((t = b + disc) > epsilon ? t : 0);
}
__device__ float Sphere::intersectID() const {
float3 op = pos - IDRAY_ORIG; //
float t, epsilon = 0.01f;
float b = math::dot(op, IDRAY_DIR);
float disc = b * b - math::dot(op, op) + rad * rad; // discriminant
if (disc < 0)
return 0;
else
disc = sqrtf(disc);
return (t = b - disc) > epsilon ? t : ((t = b + disc) > epsilon ? t : 0);
}
__device__ float Sphere::intersectID2D() const {
float3 op = pos - IDRAY2D.origin; //
float t, epsilon = 0.01f;
float b = math::dot(op, IDRAY2D.direction);
float disc = b * b - math::dot(op, op) + rad * rad; // discriminant
if (disc < 0)
return 0;
else
disc = sqrtf(disc);
return (t = b - disc) > epsilon ? t : ((t = b + disc) > epsilon ? t : 0);
}
__device__ auto randf() {
auto x = cuSeeds[threadIdx.x + blockIdx.x * blockDim.x];
x ^= x >> 13;
x ^= x << 17;
x ^= x >> 5;
cuSeeds[threadIdx.x + blockIdx.x * blockDim.x] = x;
auto r = (x & 0x007FFFFF) | 0x3F800000;
return *reinterpret_cast<float*>(&r) - 1.f;
}
namespace common {
__device__ Ray generateCameraRay(int32_t x, int32_t y, hiprandState_t& randState) {
float3 rendercampos = float3{ cScene.m_camera.position.x, cScene.m_camera.position.y, cScene.m_camera.position.z };
int32_t pixelx = x;
int32_t pixely = cScene.height - y - 1;
//float3 finalcol = float3{ 0.0f, 0.0f, 0.0f };
float3 rendercamview =
math::normalize(float3{ cScene.m_camera.view.x, cScene.m_camera.view.y, cScene.m_camera.view.z });
float3 rendercamup = math::normalize(float3{ cScene.m_camera.up.x, cScene.m_camera.up.y, cScene.m_camera.up.z });
float3 horizontalAxis = math::normalize(math::cross(rendercamview, rendercamup));
float3 verticalAxis = math::normalize(math::cross(horizontalAxis, rendercamview));
float3 middle = rendercampos + rendercamview;
float3 horizontal = horizontalAxis * tanf(cScene.m_camera.fov.x * 0.5f * (CUDART_PI_F / 180));
float3 vertical = -verticalAxis * tanf(-cScene.m_camera.fov.y * 0.5f * (CUDART_PI_F / 180));
float jitterValueX = hiprand_uniform(&randState) - 0.5f;
float jitterValueY = hiprand_uniform(&randState) - 0.5f;
float sx = (jitterValueX + pixelx) / (cScene.width - 1);
float sy = (jitterValueY + pixely) / (cScene.height - 1);
// compute pixel on screen
float3 pointOnPlaneOneUnitAwayFromEye = middle + (horizontal * ((2 * sx) - 1)) + (vertical * ((2 * sy) - 1));
float3 pointOnImagePlane =
rendercampos + ((pointOnPlaneOneUnitAwayFromEye - rendercampos) * cScene.m_camera.focalDistance);
float3 aperturePoint;
if (cScene.m_camera.apertureRadius > 0.00001f) {
float random1 = hiprand_uniform(&randState);
float random2 = hiprand_uniform(&randState);
float angle = 2.f * CUDART_PI_F * random1;
float distance = cScene.m_camera.apertureRadius * sqrtf(random2);
float apertureX = cos(angle) * distance;
float apertureY = sin(angle) * distance;
aperturePoint = rendercampos + (horizontalAxis * apertureX) + (verticalAxis * apertureY);
}
else
{
aperturePoint = rendercampos;
}
float3 apertureToImagePlane = pointOnImagePlane - aperturePoint;
apertureToImagePlane = math::normalize(apertureToImagePlane);
float3 rayInWorldSpace = math::normalize(apertureToImagePlane);
float3 originInWorldSpace = aperturePoint;
return Ray{ originInWorldSpace, rayInWorldSpace };
}
__global__ void initRNGSeeds(uint32_t* rngStates, int32_t seed) {
int32_t gIdx = threadIdx.x + blockIdx.x * blockDim.x;
rngStates[gIdx] = gIdx ^ seed;
}
__global__ void generatePrimaryRays(int32_t seed, Pixel* image, Ray* rays) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
int32_t threadId =
(blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
hiprandState_t randState;
hiprand_init(seed + threadId, 0, 0, &randState);
auto worldRay = generateCameraRay(x, y, randState);
image[i] = Pixel{ float3{0.f,0.f,0.f}, float3{1.f,1.f,1.f} };
rays[i] = Ray{ worldRay.orig, worldRay.dir };
}
__global__ void generateScheduledRays(int32_t seed, Pixel* image, RayWithIndex* rays, Ray* oldRays, int32_t msaa_factor) {
int32_t x = blockIdx.x * blockDim.y + threadIdx.y;
int32_t y = blockIdx.y * blockDim.z + threadIdx.z;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
int32_t blockId = blockIdx.x + blockIdx.y * gridDim.x;
int32_t threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
hiprandState_t randState;
hiprand_init(seed + threadId, 0, 0, &randState);
image[i] = Pixel{ float3{0.f,0.f,0.f}, float3{1.f,1.f,1.f} };
auto worldRay = generateCameraRay(x, y, randState);
rays[i * msaa_factor + threadIdx.x] = RayWithIndex{ worldRay.orig, worldRay.dir, i };
}
__global__ void generateBlockedRays(int32_t seed, Pixel* image, RayWithIndexAndDepth* rays, Ray* oldRays, int32_t msaa_factor) {
int32_t x = blockIdx.x * blockDim.y + threadIdx.y;
int32_t y = blockIdx.y * blockDim.z + threadIdx.z;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
int32_t blockId = blockIdx.x + blockIdx.y * gridDim.x;
int32_t threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
hiprandState_t randState;
hiprand_init(seed + threadId, 0, 0, &randState);
image[i] = Pixel{ float3{0.f,0.f,0.f}, float3{1.f,1.f,1.f} };
auto worldRay = generateCameraRay(x, y, randState);
rays[i * msaa_factor + threadIdx.x] = RayWithIndexAndDepth{ worldRay.orig, FLT_MAX, worldRay.dir, (uint32_t) i, 0u, 0u};
}
__global__ void toneMap(int32_t frameNumber, float3* accumBuffer, Pixel* image, float rate) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
accumBuffer[i] += image[i].color / rate;
float3 tempcol = (accumBuffer[i] / frameNumber);
float3 colour = float3{ math::clamp(tempcol.x, 0.0f, 1.0f), math::clamp(tempcol.y, 0.0f, 1.0f),
math::clamp(tempcol.z, 0.0f, 1.0f) };
float4 out{ (powf(colour.x, 1 / 2.2f)), (powf(colour.y, 1 / 2.2f)), (powf(colour.z, 1 / 2.2f)), 1.f };
//out = float4{ colour.x, colour.y, colour.z, 1.f };
surf2Dwrite(out, surfaceWriteOut, x * sizeof(float4), y, hipBoundaryModeClamp);
}
}
namespace aabb {
__device__ __host__ auto rayIntersectAABB(Ray worldRay, float3 aabb_min, float3 aabb_max) {
//float tmin, tmax, tymin, tymax, tzmin, tzmax;
//float invdirx = 1.f / worldRay.dir.x;
//float invdiry = 1.f / worldRay.dir.y;
//float invdirz = 1.f / worldRay.dir.z;
float tmin = ((worldRay.dir.x < 0.f ? aabb_max.x : aabb_min.x) - worldRay.orig.x) / worldRay.dir.x;
float tmax = ((worldRay.dir.x < 0.f ? aabb_min.x : aabb_max.x) - worldRay.orig.x) / worldRay.dir.x;
float tymin = ((worldRay.dir.y < 0.f ? aabb_max.y : aabb_min.y) - worldRay.orig.y) / worldRay.dir.y;
float tymax = ((worldRay.dir.y < 0.f ? aabb_min.y : aabb_max.y) - worldRay.orig.y) / worldRay.dir.y;
#ifndef __CUDA_ARCH__
std::cout << worldRay.orig << worldRay.dir << aabb_min << aabb_max << std::endl;
std::cout << tmin << " " << tmax << " " << tymin << " " << tymax << std::endl;
#endif
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float tzmin = ((worldRay.dir.z < 0.f ? aabb_max.z : aabb_min.z) - worldRay.orig.z) / worldRay.dir.z;
float tzmax = ((worldRay.dir.z < 0.f ? aabb_min.z : aabb_max.z) - worldRay.orig.z) / worldRay.dir.z;
#ifndef __CUDA_ARCH__
std::cout << tzmin << " " << tzmax << std::endl;
#endif
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
__device__ AABBHit rayIntersectFluidAABB(Ray worldRay) {
float tmin, tmax, tymin, tymax, tzmin, tzmax;
float invdirx = 1.f / worldRay.dir.x;
float invdiry = 1.f / worldRay.dir.y;
float invdirz = 1.f / worldRay.dir.z;
tmin = (fluidSystem.bounds[invdirx < 0.f].x - worldRay.orig.x) * invdirx;
tmax = (fluidSystem.bounds[1 - (invdirx < 0.f)].x - worldRay.orig.x) * invdirx;
tymin = (fluidSystem.bounds[invdiry < 0.f].y - worldRay.orig.y) * invdiry;
tymax = (fluidSystem.bounds[1 - (invdiry < 0.f)].y - worldRay.orig.y) * invdiry;
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
tzmin = (fluidSystem.bounds[invdirz < 0.f].z - worldRay.orig.z) * invdirz;
tzmax = (fluidSystem.bounds[1 - (invdirz < 0.f)].z - worldRay.orig.z) * invdirz;
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
__device__ AABBHit rayIntersectFluidAABBSMRAY() {
const auto aabb_min = fluidSystem.bounds[0];
const auto aabb_max = fluidSystem.bounds[1];
float tmin = ((SMRAY_DIR_X < 0.f ? aabb_max.x : aabb_min.x) - SMRAY_ORIG_X) / SMRAY_DIR_X;
float tmax = ((SMRAY_DIR_X < 0.f ? aabb_min.x : aabb_max.x) - SMRAY_ORIG_X) / SMRAY_DIR_X;
float tymin = ((SMRAY_DIR_Y < 0.f ? aabb_max.y : aabb_min.y) - SMRAY_ORIG_Y) / SMRAY_DIR_Y;
float tymax = ((SMRAY_DIR_Y < 0.f ? aabb_min.y : aabb_max.y) - SMRAY_ORIG_Y) / SMRAY_DIR_Y;
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float tzmin = ((SMRAY_DIR_Z < 0.f ? aabb_max.z : aabb_min.z) - SMRAY_ORIG_Z) / SMRAY_DIR_Z;
float tzmax = ((SMRAY_DIR_Z < 0.f ? aabb_min.z : aabb_max.z) - SMRAY_ORIG_Z) / SMRAY_DIR_Z;
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
__device__ AABBHit rayIntersectFluidAABBIDRAY() {
const auto aabb_min = fluidSystem.bounds[0];
const auto aabb_max = fluidSystem.bounds[1];
float tmin = ((IDRAY_DIR_X < 0.f ? aabb_max.x : aabb_min.x) - IDRAY_ORIG_X) / IDRAY_DIR_X;
float tmax = ((IDRAY_DIR_X < 0.f ? aabb_min.x : aabb_max.x) - IDRAY_ORIG_X) / IDRAY_DIR_X;
float tymin = ((IDRAY_DIR_Y < 0.f ? aabb_max.y : aabb_min.y) - IDRAY_ORIG_Y) / IDRAY_DIR_Y;
float tymax = ((IDRAY_DIR_Y < 0.f ? aabb_min.y : aabb_max.y) - IDRAY_ORIG_Y) / IDRAY_DIR_Y;
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float tzmin = ((IDRAY_DIR_Z < 0.f ? aabb_max.z : aabb_min.z) - IDRAY_ORIG_Z) / IDRAY_DIR_Z;
float tzmax = ((IDRAY_DIR_Z < 0.f ? aabb_min.z : aabb_max.z) - IDRAY_ORIG_Z) / IDRAY_DIR_Z;
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
__device__ auto rayIntersectAABBSM(float3 aabb_min, float3 aabb_max) {
float tmin = ((SMRAY_DIR_X < 0.f ? aabb_max.x : aabb_min.x) - SMRAY_ORIG_X) / SMRAY_DIR_X;
float tmax = ((SMRAY_DIR_X < 0.f ? aabb_min.x : aabb_max.x) - SMRAY_ORIG_X) / SMRAY_DIR_X;
float tymin = ((SMRAY_DIR_Y < 0.f ? aabb_max.y : aabb_min.y) - SMRAY_ORIG_Y) / SMRAY_DIR_Y;
float tymax = ((SMRAY_DIR_Y < 0.f ? aabb_min.y : aabb_max.y) - SMRAY_ORIG_Y) / SMRAY_DIR_Y;
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float tzmin = ((SMRAY_DIR_Z < 0.f ? aabb_max.z : aabb_min.z) - SMRAY_ORIG_Z) / SMRAY_DIR_Z;
float tzmax = ((SMRAY_DIR_Z < 0.f ? aabb_min.z : aabb_max.z) - SMRAY_ORIG_Z) / SMRAY_DIR_Z;
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
__device__ auto rayIntersectAABBID(float3 aabb_min, float3 aabb_max) {
float tmin = ((IDRAY_DIR_X < 0.f ? aabb_max.x : aabb_min.x) - IDRAY_ORIG_X) / IDRAY_DIR_X;
float tmax = ((IDRAY_DIR_X < 0.f ? aabb_min.x : aabb_max.x) - IDRAY_ORIG_X) / IDRAY_DIR_X;
float tymin = ((IDRAY_DIR_Y < 0.f ? aabb_max.y : aabb_min.y) - IDRAY_ORIG_Y) / IDRAY_DIR_Y;
float tymax = ((IDRAY_DIR_Y < 0.f ? aabb_min.y : aabb_max.y) - IDRAY_ORIG_Y) / IDRAY_DIR_Y;
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float tzmin = ((IDRAY_DIR_Z < 0.f ? aabb_max.z : aabb_min.z) - IDRAY_ORIG_Z) / IDRAY_DIR_Z;
float tzmax = ((IDRAY_DIR_Z < 0.f ? aabb_min.z : aabb_max.z) - IDRAY_ORIG_Z) / IDRAY_DIR_Z;
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
__device__ auto rayIntersectAABBSMU(float3 aabb_min, float len) {
float tmin = ((SMRAY_DIR_X < 0.f ? aabb_min.x + len : aabb_min.x) - SMRAY_ORIG_X) / SMRAY_DIR_X;
float tmax = ((SMRAY_DIR_X < 0.f ? aabb_min.x : aabb_min.x + len) - SMRAY_ORIG_X) / SMRAY_DIR_X;
float tymin = ((SMRAY_DIR_Y < 0.f ? aabb_min.y + len : aabb_min.y) - SMRAY_ORIG_Y) / SMRAY_DIR_Y;
float tymax = ((SMRAY_DIR_Y < 0.f ? aabb_min.y : aabb_min.y + len) - SMRAY_ORIG_Y) / SMRAY_DIR_Y;
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float tzmin = ((SMRAY_DIR_Z < 0.f ? aabb_min.z + len : aabb_min.z) - SMRAY_ORIG_Z) / SMRAY_DIR_Z;
float tzmax = ((SMRAY_DIR_Z < 0.f ? aabb_min.z : aabb_min.z + len) - SMRAY_ORIG_Z) / SMRAY_DIR_Z;
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
}
namespace traversal {
__device__ auto lookup_cell(const int3 &idx) {
if (idx.x >= fluidMemory.grid_size.x || idx.y >= fluidMemory.grid_size.y || idx.z >= fluidMemory.grid_size.z)
return -1;
if (idx.x < 0 || idx.y < 0 || idx.z < 0)
return -1;
auto morton = idx3D_to_hash(idx, fluidMemory.hash_entries);
auto s = fluidMemory.hashMap[idx3D_to_morton(idx) % fluidMemory.hash_entries];
if (s.hash.kind == LIST_ALWAYS_FALSE)
return INT_MAX;
if (s.hash.kind == LIST_ALWAYS_TRUE)
return 1;
if (s.hash.kind == LIST_COMPACT)
if (morton == s.cell.hash)
return 1;
if(s.hash.kind == LIST_ITERATE)
for (int32_t ii = s.hash.beginning; ii < s.hash.beginning + s.hash.length; ++ii)
if (fluidMemory.cellSpan[ii].cell.hash == morton)
return 1;
return INT_MAX;
}
//__device__ auto lookup_cell(const uint10_3& idx) {
// if (idx.x >= fluidMemory.grid_size.x || idx.y >= fluidMemory.grid_size.y || idx.z >= fluidMemory.grid_size.z)
// return INT_MAX;
// //auto morton = idx3D_to_mortonLUT(idx);
// auto s = fluidMemory.hashMap[idx3D_to_hash(idx, fluidMemory.hash_entries)];
// if (s.compacted == 1 && s.beginning != UINT31_MAX) {
// auto cs = cell_span{ (int32_t)s.beginning, s.length };
// auto jj = cs.beginning;
// if (position_to_mortonLUT(fluidMemory.position[jj], fluidMemory, 1.f) == idx3D_to_mortonLUT(idx)) {
// return cs.beginning;
// }
// }
// else {
// auto ii = (int32_t)s.beginning;
// if (s.beginning == UINT31_MAX)
// return INT_MAX;
// int32_t l = idx3D_to_mortonLUT(idx);
// for (; ii < s.beginning + s.length;) {
// auto cs = fluidMemory.cellSpan[ii];
// ++ii;
// auto jj = cs.beginning;
// if (position_to_mortonLUT(fluidMemory.position[jj], fluidMemory, 1.f) == l)
// return ii;
// }
// }
// return INT_MAX;
//}
//__device__ auto lookupVoxelCenter(const float3& voxelCenter) {
// if (voxelCenter.x < fluidSystem.bounds[0].x || voxelCenter.y < fluidSystem.bounds[0].y || voxelCenter.z < fluidSystem.bounds[0].z)
// return INT_MAX;
// if (voxelCenter.x > fluidSystem.bounds[1].x || voxelCenter.y > fluidSystem.bounds[1].y || voxelCenter.z > fluidSystem.bounds[1].z)
// return INT_MAX;
// auto s = fluidMemory.hashMap[position_to_hash(voxelCenter, fluidMemory)];
// if (s.compacted == 1 && s.beginning != UINT31_MAX) {
// auto d = math::max_elem(math::abs((math::castTo<float3>(fluidMemory.position[s.beginning]) - voxelCenter) / fluidMemory.cell_size.x * 0.5f));
// if (d < 1.f)
// return (int32_t)s.beginning;
// }
// else {
// if (s.beginning == UINT31_MAX)
// return INT_MAX;
// for (int32_t ii = s.beginning; ii < s.beginning + s.length; ++ii) {
// auto jj = fluidMemory.cellSpan[ii].beginning;
// auto d = math::max_elem(math::abs((math::castTo<float3>(fluidMemory.position[jj]) - voxelCenter) / fluidMemory.cell_size.x * 0.5f));
// if (d < 1.f)
// return jj;
// }
// }
// return INT_MAX;
//}
__device__ __host__ float mod(float a, float N) {
return a - N * floorf(a / N);
}
__device__ __host__ float intBound2_s(float s, float ds) {
if (s == floorf(s) && ds < 0.f)
return 0.f;
if (ds < 0.f)
return intBound2_s(-s, -ds);
float s2 = mod(s, 1.f);
return (1.f - s2) / ds;
}
__device__ __host__ float3 intBound(const float3& s, const float3& ds) {
return float3{
intBound2_s(s.x, ds.x),
intBound2_s(s.y, ds.y),
intBound2_s(s.z, ds.z)
};
}
template<typename Func> __device__ void iterateVoxels(Func&& fn, const float3& start, const float3& dir, float aabb_min, const float& aabb_max) {
aabb_min = math::max( 0.f, aabb_min);
auto position = start + dir * aabb_min;
auto offset = (position - fluidMemory.min_coord) / fluidMemory.cell_size.x;
int3 voxelPosition = math::castTo<int3>(math::floorf(offset));
float3 tDelta = float3{ sgn(dir.x) / dir.x,sgn(dir.y) / dir.y,sgn(dir.z)/dir.z };
float3 tMax = intBound(offset, dir);
float limit = (aabb_max - aabb_min) / fluidMemory.cell_size.x + 1.f;
while (true) {
auto cell_idx = lookup_cell(voxelPosition);
if (cell_idx == 1) {
if (fn(voxelPosition))
return;
}
if (tMax.x < tMax.y) {
if (tMax.x < tMax.z) {
if (tMax.x > limit) return;
voxelPosition.x += sgn(dir.x);
tMax.x += tDelta.x;
}
else {
if (tMax.z > limit) return;
voxelPosition.z += sgn(dir.z);
tMax.z += tDelta.z;
}
}
else {
if (tMax.y < tMax.z) {
if (tMax.y > limit) return;
voxelPosition.y += sgn(dir.y);
tMax.y += tDelta.y;
}
else {
if (tMax.z > limit) return;
voxelPosition.z += sgn(dir.z);
tMax.z += tDelta.z;
}
}
}
}
//template<typename Func> __device__ void iterateVoxelsCompact(Func&& fn, const float3& start, const float3& dir, const float& aabb_min, const float& aabb_max) {
// //auto intBound = [](auto s, auto ds) {
// // auto sIsInteger = (roundf(s) == s);
// // if (ds < 0 && sIsInteger)
// // return 0.f;
// // return (ds > 0 ? math::ceilf(s) - s : s - math::floorf(s)) / math::abs(ds);
// //};
// auto vP = position_to_idx3D(start, fluidMemory.min_coord, math::unit_get<1>(fluidMemory.cell_size));
// uint10_3 voxelPosition{ vP.x, vP.y, vP.z, 1 };
// auto offset = (start - fluidMemory.min_coord) / fluidMemory.cell_size;
// //float3 tMax{ intBound(offset.x, dir.x), intBound(offset.y, dir.y), intBound(offset.z, dir.z) };
// float3 tMax = intBound(offset, dir);
// //float3 tDelta = math::castTo<float3>(uchar3{ sgn(dir.x), sgn(dir.y), sgn(dir.z) }) / dir;
// while (true) {
// auto cell_idx = lookup_cell(voxelPosition);
// if (cell_idx != UINT31_MAX) {
// if (fn(voxelPosition))
// return;
// }
// if (tMax.x < tMax.y) {
// if (tMax.x < tMax.z) {
// if (tMax.x > aabb_max - aabb_min) return;
// voxelPosition.x += sgn(dir.x);
// tMax.x += sgn(dir.x) / dir.x;
// }
// else {
// if (tMax.z > aabb_max - aabb_min) return;
// voxelPosition.z += sgn(dir.z);
// tMax.z += sgn(dir.z) / dir.z;
// }
// }
// else {
// if (tMax.y < tMax.z) {
// if (tMax.y > aabb_max - aabb_min) return;
// voxelPosition.y += sgn(dir.y);
// tMax.y += sgn(dir.y) / dir.y;
// }
// else {
// if (tMax.z > aabb_max - aabb_min) return;
// voxelPosition.z += sgn(dir.z);
// tMax.z += sgn(dir.z) / dir.z;
// }
// }
// }
//}
//template<typename Func> __device__ void iterateVoxelCenters(Func&& fn, const float3& start, const float3& dir, const float& aabb_min, const float& aabb_max) {
// //auto intBound = [](auto s, auto ds) {
// // auto sIsInteger = (roundf(s) == s);
// // if (ds < 0 && sIsInteger)
// // return 0.f;
// // return (ds > 0 ? math::ceilf(s) - s : s - math::floorf(s)) / math::abs(ds);
// //};
// int3 voxelPosition = position_to_idx3D_i(start, fluidMemory.min_coord, fluidMemory.cell_size.x);
// float3 voxelPos = fluidMemory.min_coord + math::castTo<float3>(voxelPosition) * fluidMemory.cell_size.x + 0.5f * fluidMemory.cell_size.x;
// auto offset = (start - fluidMemory.min_coord) / fluidMemory.cell_size;
// //float3 tMax{ intBound(offset.x, dir.x), intBound(offset.y, dir.y), intBound(offset.z, dir.z) };
// float3 tMax = intBound(offset, dir);
// //float3 tDelta = math::castTo<float3>(uchar3{ sgn(dir.x), sgn(dir.y), sgn(dir.z) }) / dir;
// while (true) {
// auto cell_idx = lookupVoxelCenter(voxelPos);
// if (cell_idx != UINT31_MAX) {
// if (fn(cell_idx))
// return;
// }
// if (tMax.x < tMax.y) {
// if (tMax.x < tMax.z) {
// if (tMax.x > aabb_max - aabb_min) return;
// voxelPos.x += sgn(dir.x) * fluidMemory.cell_size.x;
// tMax.x += sgn(dir.x) / dir.x;
// }
// else {
// if (tMax.z > aabb_max - aabb_min) return;
// voxelPos.z += sgn(dir.z) * fluidMemory.cell_size.x;
// tMax.z += sgn(dir.z) / dir.z;
// }
// }
// else {
// if (tMax.y < tMax.z) {
// if (tMax.y > aabb_max - aabb_min) return;
// voxelPos.y += sgn(dir.y) * fluidMemory.cell_size.x;
// tMax.y += sgn(dir.y) / dir.y;
// }
// else {
// if (tMax.z > aabb_max - aabb_min) return;
// voxelPos.z += sgn(dir.z) * fluidMemory.cell_size.x;
// tMax.z += sgn(dir.z) / dir.z;
// }
// }
// }
//}
template<typename Func> __device__ void iterateVoxelsSMRAY(Func&& fn, const float& aabb_min, const float& aabb_max) {
auto intBound = [](auto s, auto ds) {
auto sIsInteger = (roundf(s) == s);
if (ds < 0 && sIsInteger)
return 0.f;
return (ds > 0 ? math::ceilf(s) - s : s - math::floorf(s)) / math::abs(ds);
};
int3 voxelPosition = position_to_idx3D_i(SMRAY.origin, fluidMemory.min_coord, math::unit_get<1>(fluidMemory.cell_size));
char4 step{ static_cast<char>(sgn(SMRAY.direction.x)), static_cast<char>(sgn(SMRAY.direction.y)), static_cast<char>(sgn(SMRAY.direction.z)), 1 };
auto offset = (SMRAY.origin - fluidMemory.min_coord) / fluidMemory.cell_size;
float3 tMax{ intBound(offset.x, SMRAY.direction.x), intBound(offset.y, SMRAY.direction.y), intBound(offset.z, SMRAY.direction.z) };
float3 tDelta = math::castTo<float3>(step) / SMRAY.direction;
while (true) {
auto cell_idx = lookup_cell(voxelPosition);
if (cell_idx != UINT31_MAX) {
if (fn(voxelPosition))
return;
}
if (tMax.x < tMax.y) {
if (tMax.x < tMax.z) {
if (tMax.x > aabb_max - aabb_min) return;
voxelPosition.x += step.x;
tMax.x += tDelta.x;
}
else {
if (tMax.z > aabb_max - aabb_min) return;
voxelPosition.z += step.z;
tMax.z += tDelta.z;
}
}
else {
if (tMax.y < tMax.z) {
if (tMax.y > aabb_max - aabb_min) return;
voxelPosition.y += step.y;
tMax.y += tDelta.y;
step.w = 2;
}
else {
if (tMax.z > aabb_max - aabb_min) return;
voxelPosition.z += step.z;
tMax.z += tDelta.z;
step.w = 3;
}
}
}
}
}
namespace bvh {
__device__ auto rayIntersectBVHNode(const CFBVHNode& node, Ray worldRay) {
float tmin, tmax, tymin, tymax, tzmin, tzmax;
float invdirx = 1.f / worldRay.dir.x;
float invdiry = 1.f / worldRay.dir.y;
float invdirz = 1.f / worldRay.dir.z;
float3 max = {
invdirx < 0.f ? node.min.x : node.max.x,
invdiry < 0.f ? node.min.y : node.max.y,
invdirz < 0.f ? node.min.z : node.max.z
};
float3 min = {
invdirx < 0.f ? node.max.x : node.min.x,
invdiry < 0.f ? node.max.y : node.min.y,
invdirz < 0.f ? node.max.z : node.min.z
};
tmin = (min.x - worldRay.orig.x) * invdirx;
tmax = (max.x - worldRay.orig.x) * invdirx;
tymin = (min.y - worldRay.orig.y) * invdiry;
tymax = (max.y - worldRay.orig.y) * invdiry;
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
tzmin = (min.z - worldRay.orig.z) * invdirz;
tzmax = (max.z - worldRay.orig.z) * invdirz;
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
__device__ rayHit rayIntersectFluidBVH(Ray worldRay, int32_t oldIdx = -1) {
//auto aabb = rayIntersectFluidAABB(worldRay);
//if (aabb.hit == false)
// return rayHit{ float3{FLT_MAX, FLT_MAX, FLT_MAX}, FLT_MAX, float3{1.f,0.f,0.f}, false, -1 };
auto nodeNormal = [](auto node, auto point) {
constexpr auto epsilon = 1e-4f;
auto c = (node.min + node.max) * 0.5f;
auto prel = point - c;
auto d = math::abs((node.min - node.max) * 0.5f);
auto n = math::castTo<int3>(prel / d * (1.f + epsilon));
return float3{ (float)n.x, (float)n.y, (float)n.z };
};
auto getHitInformation = [=](auto node, auto tmin, auto tmax) {
float3 aabb_min = worldRay.orig + tmin * worldRay.dir;
float3 aabb_max = worldRay.orig + tmax * worldRay.dir;
// DEBUG render for AABB
if (tmin >= 0.f)
return rayHit{ aabb_min, tmin, math::abs(nodeNormal(node, aabb_min)), true };
else
return rayHit{ aabb_max, tmax, math::abs(nodeNormal(node, aabb_max)), true };
};
constexpr auto BVH_STACK_SIZE = 32;
int32_t hitVoxelIdx = -1;
float hitTmin = FLT_MAX;
float hitTmax = -FLT_MAX;
int32_t bvhStack[BVH_STACK_SIZE];
int32_t bvhStackIdx = 0;
bvhStack[bvhStackIdx++] = 0;
while (bvhStackIdx) {
int32_t boxIdx = bvhStack[bvhStackIdx - 1];
bvhStackIdx--;
auto node = fluidSystem.fluidBVH[boxIdx];
if (!(node.u.leaf.count & 0x80000000)) { // INNER NODE
auto intersection = rayIntersectBVHNode(node, worldRay);
if (intersection.hit && intersection.tmin < hitTmin) {
bvhStack[bvhStackIdx++] = node.u.inner.idxLeft;
bvhStack[bvhStackIdx++] = node.u.inner.idxRight;
if (bvhStackIdx > BVH_STACK_SIZE) {
return rayHit{ float3{FLT_MAX, FLT_MAX, FLT_MAX}, FLT_MAX, float3{1.f,0.f,0.f}, false, -1 };
}
//return getHitInformation(node, intersection.tmin, intersection.tmax);
}
}
else {
auto intersection = rayIntersectBVHNode(node, worldRay);
if (intersection.hit && intersection.tmin < hitTmin) {
hitVoxelIdx = boxIdx;
hitTmin = intersection.tmin;
hitTmax = intersection.tmax;
}
}
}
if (hitVoxelIdx != -1) {
auto hitNode = fluidSystem.fluidBVH[hitVoxelIdx];
return getHitInformation(hitNode, hitTmin, hitTmax);
}
else
return rayHit{ float3{FLT_MAX, FLT_MAX, FLT_MAX}, FLT_MAX, float3{1.f,0.f,0.f}, false, -1 };
}
}
namespace fluid {
__device__ rayHit rayIntersectFluid(Ray& worldRay, int32_t oldIdx = -1) {
//#define DEBUG_AABB
auto aabb_center = (fluidSystem.bounds[1] - fluidSystem.bounds[0]) / 2.f;
auto aabb_normal = [](auto v) {
constexpr auto epsilon = 1e-5f;
auto c = (fluidSystem.bounds[0] + fluidSystem.bounds[1]) * 0.5f;
auto prel = v - c;
auto d = math::abs((fluidSystem.bounds[0] - fluidSystem.bounds[1]) * 0.5f);
auto n = math::castTo<int3>(prel / d * (1.f + epsilon));
return float3{ (float)n.x, (float)n.y, (float)n.z };
};
worldRay.dir = math::normalize(worldRay.dir);
auto aabb = aabb::rayIntersectFluidAABB(worldRay);
if (aabb.hit == true) {
#ifdef DEBUG_AABB
float3 aabb_min = worldRay.orig + aabb.tmin * worldRay.dir;
float3 aabb_max = worldRay.orig + aabb.tmax * worldRay.dir;
// DEBUG render for AABB
if (aabb.tmin >= 0.f)
return rayHit{ aabb_min, aabb.tmin, math::abs(aabb_normal(aabb_min)), true };
else
return rayHit{ aabb_max, aabb.tmax, math::abs(aabb_normal(aabb_max)), true };
#endif
float t = FLT_MAX;
char3 nc{ 1,0,0 };
//float3 normal;
traversal::iterateVoxels([&](int3 voxel) {
constexpr auto epsilon = 1e-1f;
float3 min = fluidMemory.min_coord + math::castTo<float3>(voxel) * fluidMemory.cell_size;
float3 max = min + fluidMemory.cell_size;
auto rH = aabb::rayIntersectAABB(worldRay, min, max);
auto hitPosition = worldRay.orig + rH.tmin * worldRay.dir;
auto c = (min + max) * 0.5f;
auto prel = hitPosition - c;
auto d = math::abs((min - max) * 0.5f);
auto n = math::castTo<int3>(prel / d * (1.f + epsilon));
nc = char3{ static_cast<char>(n.x), static_cast<char>(n.y), static_cast<char>(n.z) };
//normal = math::abs(prel / d);
//normal = math::abs(float3{
// math::abs(prel.x) > math::abs(prel.y) && math::abs(prel.x) > math::abs(prel.z) ? prel.x : 0.f,
// math::abs(prel.y) > math::abs(prel.x) && math::abs(prel.y) > math::abs(prel.z) ? prel.y : 0.f,
// math::abs(prel.z) > math::abs(prel.y) && math::abs(prel.z) > math::abs(prel.x) ? prel.z : 0.f
//});
/* normal = math::castTo<float3>(voxel);
normal.x *= 0.001f;
normal.y *= 0.02f;
normal.z *= 0.02f;*/
t = rH.tmin;
return true;
}, worldRay.orig, worldRay.dir, aabb.tmin, aabb.tmax);
float3 hitPosition = worldRay.orig + t * worldRay.dir;
float3 normal = float3{ (float)nc.x, (float)nc.y, (float)nc.z };
if (nc.x == nc.y && nc.y == nc.z && nc.z == 0)
normal = float3{ 1.f,0.f,0.f };
if (t < 1e19f)
return rayHit{ hitPosition, math::distance3(worldRay.orig, hitPosition), normal, true };
}
return rayHit{ float3{FLT_MAX, FLT_MAX, FLT_MAX}, FLT_MAX, float3{1.f,0.f,0.f}, false };
}
__device__ rayHitLean rayIntersectFluidLean(Ray& worldRay, int32_t oldIdx = -1) {
auto aabb = aabb::rayIntersectFluidAABB(worldRay);
if (aabb.hit == true) {
float t = FLT_MAX;
int3 hitVoxel{ INT_MAX, INT_MAX, INT_MAX };
traversal::iterateVoxels([&](int3 voxel) {
hitVoxel = voxel;
t = 0.f;
return true;
}, worldRay.orig, worldRay.dir, math::max(aabb.tmin, 0.f), aabb.tmax);
if (t < 1e19f)
return rayHitLean{ hitVoxel, 0.f };
}
return rayHitLean{ {INT_MAX, INT_MAX, INT_MAX}, FLT_MAX };
}
__device__ rayHitLean rayIntersectFluidLeanSM(int32_t oldIdx = -1) {
int32_t ii = threadIdx.x + blockDim.x * threadIdx.y;
auto aabb = aabb::rayIntersectFluidAABB(raySM[ii]);
if (aabb.hit == true) {
float t = FLT_MAX;
int3 hitVoxel{ INT_MAX, INT_MAX, INT_MAX };
traversal::iterateVoxels([&](int3 voxel) {
hitVoxel = voxel;
t = 0.f;
return true;
}, raySM[ii].orig, raySM[ii].dir, math::max(aabb.tmin, 0.f), aabb.tmax);
if (t < 1e19f)
return rayHitLean{ hitVoxel, 0.f };
}
return rayHitLean{ {INT_MAX, INT_MAX, INT_MAX}, FLT_MAX };
}
__device__ int3 rayIntersectFluidLeanSMRAY(int32_t oldIdx = -1) {
auto aabb = aabb::rayIntersectFluidAABBSMRAY();// (Ray{ SMRAY_ORIG, SMRAY_DIR });
if (aabb.hit == true) {
//float t = FLT_MAX;
int3 idx{ INT_MAX, INT_MAX, INT_MAX };
traversal::iterateVoxels([&](int3 voxel) {
idx = voxel;
//t = 0.f;
return true;
}, SMRAY_ORIG, SMRAY_DIR, math::max(aabb.tmin, 0.f), aabb.tmax);
if (idx.x != INT_MAX)
return idx;
}
return int3{ INT_MAX, INT_MAX, INT_MAX };
}
__device__ int3 rayIntersectFluidLeanIDRAY(int32_t oldIdx = -1) {
auto aabb = aabb::rayIntersectFluidAABBIDRAY();// (Ray{ SMRAY_ORIG, SMRAY_DIR });
if (aabb.hit == true) {
//float t = FLT_MAX;
int3 idx{ INT_MAX, INT_MAX, INT_MAX };
traversal::iterateVoxels([&](int3 voxel) {
idx = voxel;
//t = 0.f;
return true;
}, IDRAY_ORIG, IDRAY_DIR, math::max(aabb.tmin, 0.f), aabb.tmax);
if (idx.x != INT_MAX)
return idx;
}
return int3{ INT_MAX, INT_MAX, INT_MAX };
}
//__device__ uint10_3 rayIntersectFluidLeanSMRAYSuper(int32_t oldIdx = -1) {
// auto aabb = aabb::rayIntersectFluidAABBSMRAY();// (Ray{ SMRAY_ORIG, SMRAY_DIR });
// if (aabb.hit == true) {
// uint10_3 t{ 0, 0, 0, 0 };
// traversal::iterateVoxelsCompact([&](uint10_3 particleIdx) {
// t = particleIdx;
// return true;
// }, SMRAY_ORIG, SMRAY_DIR, 0.f, aabb.tmax);
// if (t.valid == true)
// return t;
// }
// return uint10_3{ 0, 0, 0, 0 };
//}
}
namespace scheduler {
__device__ void update_ray(int32_t counter) {
SMRAY = ScheduledRay{ cRays[counter].origin, cRays[counter].index, cRays[counter].direction, 1e21f, float3{1.f,1.f,1.f}, 0 };
}
__device__ void updateIDRay(int32_t counter) {
IDRAY = cRaysDepth[counter];// RayWithIndexAndDepth{ cRaysDepth[counter].origin, cRaysDepth[counter].depth, cRaysDepth[counter].direction, cRaysDepth[counter].index, cRaysDepth[counter].bounces, cRaysDepth[counter].geomType };
}
__device__ bool grabRay() {
cuda_atomic<int32_t> atomicCounter(cRayCounter);
int32_t counter = atomicCounter.add(1);
if (counter >= cNumRays) return false;
update_ray(counter);
return true;
}
__device__ bool grabIDRay() {
cuda_atomic<int32_t> atomicCounter(cRayCounter);
int32_t counter = atomicCounter.add(1);
if (counter >= cNumRays) return false;
updateIDRay(counter);
return true;
}
}
namespace render{
__global__ void rayScheduler() {
//const int32_t tIdx = threadIdx.x;
//const int32_t gIdx = threadIdx.x + blockIdx.x * blockDim.x;
__syncwarp();
if (threadIdx.x + blockIdx.x * blockDim.x >= cNumRays) return;
__syncwarp();
scheduler::update_ray(threadIdx.x + blockIdx.x * blockDim.x);
__syncwarp();
while (1) {
__syncwarp();
auto fluidHit = fluid::rayIntersectFluidLeanSMRAY();
__syncwarp();
float3 normal, color;
if (fluidHit.x != INT_MAX) {
constexpr auto epsilon = 1e-1f;
const auto cs = fluidMemory.cell_size.x;
//auto hitPosition = SMRAY_ORIG + fluidHit * SMRAY_DIR;
//int3 vIdx = position_to_idx3D_i(fluidMemory.position[fluidHit], fluidMemory.min_coord, cs);
float3 min = fluidSystem.bounds[0] + math::castTo<float3>(fluidHit) * cs;
auto rH = aabb::rayIntersectAABBSM(min, min + cs);
auto t = rH.tmin;
if (t < SMRAY_DEPTH) {
SMRAY_DEPTH = t;
auto hitPosition = SMRAY_ORIG + rH.tmin*1.01f * SMRAY_DIR;
auto c = min + cs * 0.5f;
auto prel = hitPosition - c;
auto d = cs * 0.5f;
auto ni = math::castTo<int3>(prel / d * (1.f + epsilon));
auto nc = char3{ static_cast<char>(ni.x), static_cast<char>(ni.y), static_cast<char>(ni.z) };
auto n = math::castTo<float3>(nc);
//float3 n{ 1.f,0.f,0.f };
auto nl = math::dot(n, SMRAY_DIR) < 0 ? n : n * -1;
auto f = float3{ 0.05098f, 0.23137f, 0.494177f };
normal = nl;
color = f;
}
}
__syncwarp();
int32_t idx = -1;
for (int32_t sphere_id = 0; sphere_id < int32_t(sizeof(spheres) / sizeof(Sphere)); sphere_id++) {
Sphere &sphere = spheres[sphere_id];
float d = spheres[sphere_id].intersectSM();
if (d && d < SMRAY_DEPTH) {
SMRAY_DEPTH = d;
auto x = SMRAY_ORIG + SMRAY_DIR * SMRAY_DEPTH;
auto n = math::normalize(float3{ x.x - sphere.pos.x, x.y - sphere.pos.y, x.z - sphere.pos.z });
normal = math::dot(n, SMRAY_DIR) < 0 ? n : n * -1;
color = sphere.col;
idx = sphere_id;
}
}
if (idx != -1 && math::length3(spheres[idx].emi) > 0.1f) {
cuda_atomic<float3> atomicColor(&cImage[SMRAY_IDX].color);
atomicColor.x += SMRAY_MASK_X * spheres[idx].emi.x;
atomicColor.y += SMRAY_MASK_Y * spheres[idx].emi.y;
atomicColor.z += SMRAY_MASK_Z * spheres[idx].emi.z;
//GET_NEXT_RAY;
}
__syncwarp();
if (SMRAY_DEPTH > 1e19f) GET_NEXT_RAY;
//cuda_atomic<float3> atomicColor(&cImage[SMRAY_IDX].color);
//atomicColor.x += randf();
//atomicColor.y += randf();
//atomicColor.z += randf();
//GET_NEXT_RAY;
SMRAY_MASK *= color;
auto position = SMRAY_ORIG + SMRAY_DIR * SMRAY_DEPTH;
//float phi = 2 * CUDART_PI_F * hiprand_uniform(cRandStates + threadIdx.x + blockIdx.x * blockDim.x);
float phi = 2 * CUDART_PI_F * randf();
//float r2 = hiprand_uniform(cRandStates + threadIdx.x + blockIdx.x * blockDim.x);
float r2 = randf();
float r2s = sqrtf(r2);
float3 w = math::normalize(normal);
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{ 0, 1, 0 } : float3{ 1, 0, 0 }), w));
float3 v = math::cross(w, u);
auto dw = math::normalize(u * cosf(phi) * r2s + v * sinf(phi) * r2s + w * sqrtf(1 - r2));
//SMRAY_MASK_X *= color.x;
//SMRAY_MASK_Y *= color.y;
//SMRAY_MASK_Z *= color.z;
SMRAY_BOUNCES++;
if (SMRAY_BOUNCES == 5)
GET_NEXT_RAY;
SMRAY_ORIG = position + w * 0.01f;
//SMRAY_ORIG_X = position.x + w.x * 0.01f;
//SMRAY_ORIG_Y = position.y + w.y * 0.01f;
//SMRAY_ORIG_Z = position.z + w.z * 0.01f;
SMRAY_DIR = dw;
//SMRAY_DIR_X = dw.x;
//SMRAY_DIR_Y = dw.y;
//SMRAY_DIR_Z = dw.z;
SMRAY_DEPTH = 1e21f;
}
}
__global__ void IDrayScheduler() {
if (threadIdx.x + blockIdx.x * blockDim.x >= cNumRays) return;
cuda_atomic<int32_t> atomicCounter(cRayCounter);
auto counter = threadIdx.x + blockIdx.x * blockDim.x;
IDRAY = cRaysDepth[counter];
while (1) {
auto fluidHit = fluid::rayIntersectFluidLeanIDRAY();
if (fluidHit.x != INT_MAX) {
constexpr auto epsilon = 1e-1f;
const auto cs = fluidMemory.cell_size.x;
float3 min = fluidSystem.bounds[0] + math::castTo<float3>(fluidHit) * cs;
auto rH = aabb::rayIntersectAABBID(min, min + cs);
auto t = rH.tmin;
if (t < IDRAY_DEPTH) {
IDRAY_DEPTH = t;
auto hitPosition = IDRAY_ORIG + rH.tmin * IDRAY_DIR;
auto c = min + cs * 0.5f;
auto prel = hitPosition - c;
auto d = cs * 0.5f;
auto ni = math::castTo<int3>(prel / d * (1.f + epsilon));
auto nc = char3{ static_cast<char>(ni.x), static_cast<char>(ni.y), static_cast<char>(ni.z) };
auto n = math::castTo<float3>(nc);
//float3 n{ 1.f,0.f,0.f };
auto nl = math::dot(n, IDRAY_DIR) < 0 ? n : n * -1;
//auto f = float3{ 0.05098f, 0.23137f, 0.494177f };
cFluidIntersections[counter].depth = rH.tmin;
cFluidIntersections[counter].normal = n;
//cRaysDepth[counter].depth = rH.tmin;
//cRaysDepth[counter].geomType = MAX_VAL_04BIT;
}
else {
cFluidIntersections[counter].depth = FLT_MAX;
}
}
else {
cFluidIntersections[counter].depth = FLT_MAX;
}
counter = atomicCounter.add(1);
if (counter >= cNumRays) return;
IDRAY = cRaysDepth[counter];
}
}
__global__ void intersectAndShadeRaysSM(Ray *rays, Pixel *image, int32_t seed) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
int32_t ii = threadIdx.x + blockDim.x * threadIdx.y;
float scene_t = 1e20f;
raySM[ii] = rays[i];
auto fluidHit = fluid::rayIntersectFluidLeanSM();
float3 normal, color, emission;
if (fluidHit.depth < FLT_MAX) {
constexpr auto epsilon = 1e-1f;
float3 min = fluidSystem.bounds[0] + math::castTo<float3>(fluidHit.voxel) * fluidMemory.cell_size;
float3 max = fluidSystem.bounds[0] + (math::castTo<float3>(fluidHit.voxel) + 1.f) * fluidMemory.cell_size;
auto rH = aabb::rayIntersectAABB(raySM[ii], min, max);
auto hitPosition = raySM[ii].orig + rH.tmin * raySM[ii].dir;
auto c = (min + max) * 0.5f;
auto prel = hitPosition - c;
auto d = math::abs((min - max) * 0.5f);
auto n = math::castTo<int3>(prel / d * (1.f + epsilon));
auto nc = char3{ static_cast<char>(n.x), static_cast<char>(n.y), static_cast<char>(n.z) };
auto t = rH.tmin;
if (t < scene_t) {
scene_t = t;
auto n = math::castTo<float3>(nc);
auto nl = math::dot(n, raySM[ii].dir) < 0 ? n : n * -1;
auto f = float3{ 0.05098f, 0.23137f, 0.494177f };
auto emit = float3{ 0.f, 0.f, 0.f };
// rayi[ii] = RayIntersection{ fluidHit.depth, DIFF, f, nl, emit };
normal = nl;
emission = emit;
color = f;
}
}
for (int32_t sphere_id = 0; sphere_id < int32_t(sizeof(spheres) / sizeof(Sphere)); sphere_id++) {
Sphere &sphere = spheres[sphere_id];
float d = spheres[sphere_id].intersect(raySM[ii]);
if (d && d < scene_t) {
scene_t = d;
auto x = raySM[ii].orig + raySM[ii].dir * scene_t;
auto n = math::normalize(float3{ x.x - sphere.pos.x, x.y - sphere.pos.y, x.z - sphere.pos.z });
normal = math::dot(n, raySM[ii].dir) < 0 ? n : n * -1;
color = sphere.col;
emission = sphere.emi;
// rayi[ii] = RayIntersection{ d, sphere.refl, sphere.col, nl, sphere.emi };
}
}
if (scene_t > 1e19f)
return;
// rayi[ii] = RayIntersection{ 1e20f, DIFF, float3{0.f,0.f,0.f}, float3{1.f,0.f,0.f}, float3{0.f,0.f,0.f} };
hiprandState_t randState;
hiprand_init(seed + i, 0, 0, &randState);
auto pixel = image[i];
// auto worldRay = rays[i];
auto position = raySM[ii].orig + raySM[ii].dir * scene_t;
pixel.color += (pixel.mask * emission);
float phi = 2 * CUDART_PI_F * hiprand_uniform(&randState);
float r2 = hiprand_uniform(&randState);
float r2s = sqrtf(r2);
float3 w = math::normalize(normal);
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{ 0, 1, 0 } : float3{ 1, 0, 0 }), w));
float3 v = math::cross(w, u);
auto dw = math::normalize(u * cosf(phi) * r2s + v * sinf(phi) * r2s + w * sqrtf(1 - r2));
pixel.mask *= color;
raySM[ii].orig = position + w * 0.01f;
raySM[ii].dir = dw;
rays[i] = raySM[ii];
image[i] = pixel;
//#undef i
//#undef ii
}
__global__ void intersectRays(Ray* rays, RayIntersection* intersections) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
auto worldRay = rays[i];
float scene_t = 1e20f;
for (int32_t sphere_id = 0; sphere_id < int32_t(sizeof(spheres) / sizeof(Sphere)); sphere_id++) {
Sphere &sphere = spheres[sphere_id];
float d = spheres[sphere_id].intersect(worldRay);
if (d && d < scene_t) {
scene_t = d;
auto x = worldRay.orig + worldRay.dir * scene_t;
auto n = math::normalize(float3{ x.x - sphere.pos.x, x.y - sphere.pos.y, x.z - sphere.pos.z });
auto nl = math::dot(n, worldRay.dir) < 0 ? n : n * -1;
intersections[i] = RayIntersection{ d, sphere.refl, sphere.col, n, sphere.emi };
}
}
auto fluidHit = fluid::rayIntersectFluid(worldRay);
if (fluidHit.status && fluidHit.depth < scene_t) {
scene_t = fluidHit.depth;
auto n = fluidHit.normal;
//return n;
//auto nl = math::dot(n, worldRay.dir) < 0 ? n : n * -1;
auto f = float3{ 0.05098f, 0.23137f, 0.494177f };
auto emit = float3{ 0.f, 0.f, 0.f };
intersections[i] = RayIntersection{ fluidHit.depth, DIFF, f, n, emit };
}
auto dw = worldRay.dir;
if (dw.x != dw.x || dw.y != dw.y || dw.z != dw.z || (dw.x == 0.f && dw.y == 0.f && dw.z == 0.f)) {
//printf("x[%f %f %f] : [%f %f %f]\n", worldRay.orig.x, worldRay.orig.y, worldRay.orig.z, worldRay.dir.x, worldRay.dir.y, worldRay.dir.z);
}
if (scene_t > 1e19f) {
//printf("y[%f %f %f] : [%f %f %f]\n", worldRay.orig.x, worldRay.orig.y, worldRay.orig.z, worldRay.dir.x, worldRay.dir.y, worldRay.dir.z);
intersections[i] = RayIntersection{ 1e20f, DIFF, float3{0.f,0.f,0.f}, float3{1.f,0.f,0.f}, float3{0.f,0.f,0.f} };
}
}
__global__ void intersectRaysSpheresScene(int32_t seed) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
for (int32_t ii = 0; ii < cMsaaRate; ++ii) {
int32_t idx = i * cMsaaRate + ii;
IDRAY2D = cRaysDepth[idx];
float3 normal, emission, color, x;
if (cFluidIntersections[idx].depth < 1e19f) {
normal = cFluidIntersections[idx].normal;
IDRAY2D.depth = cFluidIntersections[idx].depth;
x = IDRAY2D.origin + IDRAY2D.direction * IDRAY2D.depth;
auto f = float3{ 0.05098f, 0.23137f, 0.494177f };
color = f;
emission = float3{ 0.f,0.f,0.f };
}
for (int32_t sphere_id = 0; sphere_id < int32_t(sizeof(spheres) / sizeof(Sphere)); sphere_id++) {
//Sphere &sphere = spheres[sphere_id];
float d = spheres[sphere_id].intersectID2D();
if (d && d < IDRAY2D.depth) {
IDRAY2D.depth = d;
IDRAY2D.geomType = sphere_id;
x = IDRAY2D.origin + IDRAY2D.direction * IDRAY2D.depth;
Sphere &sphere = spheres[IDRAY2D.geomType];
normal = math::normalize(float3{ x.x - sphere.pos.x, x.y - sphere.pos.y, x.z - sphere.pos.z });
color = sphere.col;
emission = sphere.emi;
}
}
normal = math::dot(normal, IDRAY2D.direction) < 0 ? normal : normal * -1;
hiprandState_t randState;
int32_t threadId =
(blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
hiprand_init(seed + threadId, 0, 0, &randState);
//float phi = 2 * CUDART_PI_F * randf();
//float r2 = randf();
float phi = 2 * CUDART_PI_F * hiprand_uniform(&randState);
float r2 = hiprand_uniform(&randState);
float r2s = sqrtf(r2);
float3 w = math::normalize(normal);
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{ 0, 1, 0 } : float3{ 1, 0, 0 }), w));
float3 v = math::cross(w, u);
auto dw = math::normalize(u * cosf(phi) * r2s + v * sinf(phi) * r2s + w * sqrtf(1 - r2));
cImage[IDRAY2D.index].color += cImage[IDRAY2D.index].mask * emission;
cImage[IDRAY2D.index].mask *= color;
IDRAY2D.origin = x + w * 0.01f;
IDRAY2D.direction = dw;
//if (IDRAY2D.bounces == 0) { cImage[IDRAY2D.index].color = float3{ d * 0.005f, d*0.005f, d * 0.005f }; }
IDRAY2D.depth = FLT_MAX;
IDRAY2D.geomType = 0;
IDRAY2D.bounces++;
cRaysDepth[i * cMsaaRate + ii] = IDRAY2D;
}
}
__global__ void updateIDRAY(int32_t seed) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
for (int32_t ii = 0; ii < cMsaaRate; ++ii) {
int32_t idx = i * cMsaaRate + ii;
IDRAY2D = cRaysDepth[idx];
float3 normal, emission, color;
auto d = IDRAY2D.depth;
auto x = IDRAY2D.origin + IDRAY2D.direction * IDRAY2D.depth;
if (IDRAY2D.depth < 1e19f) {
if (IDRAY2D.geomType == MAX_VAL_04BIT) {
constexpr auto epsilon = 1e-1f;
const auto cs = fluidMemory.cell_size.x;
//auto hitPosition = SMRAY_ORIG + fluidHit * SMRAY_DIR;
//int3 vIdx = position_to_idx3D_i(fluidMemory.position[fluidHit], fluidMemory.min_coord, cs);
auto voxelIdx = position_to_idx3D_i(x, fluidMemory.min_coord, fluidMemory.cell_size.x);
float3 min = fluidSystem.bounds[0] + math::castTo<float3>(voxelIdx) * cs;
auto c = min + cs * 0.5f;
auto prel = x - c;
auto d = cs * 0.5f;
auto ni = math::castTo<int3>(prel / d * (1.f + epsilon));
auto nc = char3{ static_cast<char>(ni.x), static_cast<char>(ni.y), static_cast<char>(ni.z) };
normal = math::castTo<float3>(nc);
//auto nl = math::dot(n, SMRAY_DIR) < 0 ? n : n * -1;
auto f = float3{ 0.05098f, 0.23137f, 0.494177f };
//normal = nl;
color = f;
emission = float3{ 0.f,0.f,0.f };
}
else
{
Sphere &sphere = spheres[IDRAY2D.geomType];
normal = math::normalize(float3{ x.x - sphere.pos.x, x.y - sphere.pos.y, x.z - sphere.pos.z });
color = sphere.col;
emission = sphere.emi;
}
normal = math::dot(normal, IDRAY2D.direction) < 0 ? normal : normal * -1;
hiprandState_t randState;
int32_t threadId =
(blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
hiprand_init(seed + threadId, 0, 0, &randState);
//float phi = 2 * CUDART_PI_F * randf();
//float r2 = randf();
float phi = 2 * CUDART_PI_F * hiprand_uniform(&randState);
float r2 = hiprand_uniform(&randState);
float r2s = sqrtf(r2);
float3 w = math::normalize(normal);
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{ 0, 1, 0 } : float3{ 1, 0, 0 }), w));
float3 v = math::cross(w, u);
auto dw = math::normalize(u * cosf(phi) * r2s + v * sinf(phi) * r2s + w * sqrtf(1 - r2));
cImage[IDRAY2D.index].color += cImage[IDRAY2D.index].mask * emission;
cImage[IDRAY2D.index].mask *= color;
IDRAY2D.origin = x + w * 0.01f;
IDRAY2D.direction = dw;
}
if (IDRAY2D.bounces == 0) { cImage[IDRAY2D.index].color = float3{ d * 0.005f, d*0.005f, d * 0.005f }; }
IDRAY2D.depth = FLT_MAX;
IDRAY2D.geomType = 0;
IDRAY2D.bounces++;
cRaysDepth[i * cMsaaRate + ii] = IDRAY2D;
}
}
__global__ void intersectRaysBVH(Ray* rays, RayIntersection* intersections) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
auto worldRay = rays[i];
float scene_t = 1e20f;
for (int32_t sphere_id = 0; sphere_id < int32_t(sizeof(spheres) / sizeof(Sphere)); sphere_id++) {
Sphere &sphere = spheres[sphere_id];
float d = spheres[sphere_id].intersect(worldRay);
if (d && d < scene_t) {
scene_t = d;
auto x = worldRay.orig + worldRay.dir * scene_t;
auto n = math::normalize(float3{ x.x - sphere.pos.x, x.y - sphere.pos.y, x.z - sphere.pos.z });
auto nl = math::dot(n, worldRay.dir) < 0 ? n : n * -1;
intersections[i] = RayIntersection{ d, sphere.refl, sphere.col, nl, sphere.emi };
}
}
auto fluidHit = bvh::rayIntersectFluidBVH(worldRay);
if (fluidHit.status && fluidHit.depth < scene_t) {
scene_t = fluidHit.depth;
auto n = fluidHit.normal;
//return n;
auto nl = math::dot(n, worldRay.dir) < 0 ? n : n * -1;
auto f = float3{ 0.05098f, 0.23137f, 0.494177f };
auto emit = float3{ 0.f, 0.f, 0.f };
intersections[i] = RayIntersection{ fluidHit.depth, DIFF, f, nl, emit };
}
if (scene_t > 1e19f)
intersections[i] = RayIntersection{ 1e20f, DIFF, float3{0.f,0.f,0.f}, float3{1.f,0.f,0.f}, float3{0.f,0.f,0.f} };
}
__global__ void shadeRays(int32_t seed, Pixel* image, Ray* rays, RayIntersection* intersections) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
hiprandState_t randState;
int32_t threadId =
(blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
hiprand_init(seed + threadId, 0, 0, &randState);
auto worldRay = rays[i];
auto inter = intersections[i];
auto pixel = image[i];
if (inter.depth >= 1e18f)
return;
auto position = worldRay.orig + worldRay.dir * inter.depth;
pixel.color += (pixel.mask * inter.emission);
//pixel.color = inter.surfaceNormal;
//pixel.color = worldRay.dir;
float3 n = math::normalize(inter.surfaceNormal);
float3 nl = math::dot(n, worldRay.dir) < 0 ? n : n * -1;
float phi = 2 * CUDART_PI_F * hiprand_uniform(&randState);
float r2 = hiprand_uniform(&randState);
float r2s = sqrtf(r2);
float3 w = math::normalize(nl);
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{ 0, 1, 0 } : float3{ 1, 0, 0 }), w));
float3 v = math::cross(w, u);
float3 dw = math::normalize(u * cosf(phi) * r2s + v * sinf(phi) * r2s + w * sqrtf(1 - r2));
worldRay.orig = position + w * 0.01f;
pixel.mask *= inter.surfaceColor;
//dw = rayInWorldSpace - n * 2.0f * math::dot(n, rayInWorldSpace);
//worldRay.orig = position + nl * bias;
// worldRay.orig = position + w * 1.5f;
worldRay.dir = dw;
if (dw.x != dw.x || dw.y != dw.y || dw.z != dw.z || (dw.x == 0.f && dw.y == 0.f && dw.z == 0.f))
worldRay.dir = float3{ 1,0,0 };
rays[i] = worldRay;
image[i] = pixel;
}
}
}
#ifdef __INTELLISENSE__
#define CPSYMBOL(symbol, var)
#define LAUNCH(kernel, blocks, tpb, sm, stream) kernel
#else
#define CPSYMBOL(symbol, var) hipMemcpyToSymbol(symbol, &var, sizeof(symbol))
#define LAUNCH(kernel, blocks, tpb, sm, stream)hipLaunchKernelGGL(( kernel), dim3(blocks),dim3(tpb),sm,stream,
#endif
void cudaMLMRenderSceneInformation scene, cudaGraphicsResource_t resource, FluidMemory fmem, FluidSystem fsys, float3 *acc, unsigned framenumber,
unsigned hashedframes, int32_t renderMode) {
using namespace rtx;
static std::random_device rd;
static std::uniform_int_distribution<int32_t> dist(INT_MIN, INT_MAX);
static bool once = true;
static hipStream_t stream;
constexpr auto msaa = 1;
constexpr int32_t blocks_1080 = 32 * 20;
constexpr int32_t blockSize_1080 = 64;
constexpr auto bounces = 5;
int32_t width = static_cast<int32_t>(scene.width);
int32_t height = static_cast<int32_t>(scene.height);
int32_t numRays = width * height * msaa;
//int32_t num_blocks = blocks_1080 * blockSize_1080;
if (once) {
std::cout << "MLM renderer built at " << __TIMESTAMP__ << std::endl;
hipStreamCreate(&stream);
hipMalloc(&cuImage, sizeof(Pixel) * width * height);
hipMalloc(&cuCurrentRays, sizeof(Ray) * width * height);
hipMalloc(&cuRayIntersections, sizeof(RayIntersection) * width * height);
hipMalloc(&cuScheduledRays, sizeof(RayWithIndex) * width * height * msaa);
hipMalloc(&cuBlockedRays, sizeof(RayWithIndexAndDepth) * width * height * msaa);
hipMalloc(&cuFluidIntersections, sizeof(FluidIntersection) * width * height * msaa);
hipMalloc(&rayCounter, sizeof(int32_t));
hipMalloc(&cRNGSeeds, sizeof(uint32_t) * blocks_1080 * blockSize_1080);
hipMalloc(&cuRandStates, sizeof(hiprandState_t) * blocks_1080 * blockSize_1080);
//initRNG <<<blocks_1080, blockSize_1080>>> (cuRandStates, dist(rd));
LAUNCH(common::initRNGSeeds, blocks_1080, blockSize_1080, 0 ,0)(cRNGSeeds, dist(rd));
hipArray_t color_arr;
hipGraphicsMapResources(1, &resource, 0);
hipGraphicsSubResourceGetMappedArray(&color_arr, resource, 0, 0);
hipBindSurfaceToArray(surfaceWriteOut, color_arr);
once = false;
}
//scene.m_camera.apertureRadius = 0.f;
CPSYMBOL(cScene, scene);
CPSYMBOL(fluidSystem, fsys);
CPSYMBOL(fluidMemory, fmem);
CPSYMBOL(cNumRays, numRays);
CPSYMBOL(cRays, cuScheduledRays);
CPSYMBOL(cRaysDepth, cuBlockedRays);
CPSYMBOL(cImage, cuImage);
CPSYMBOL(cRandStates, cuRandStates);
CPSYMBOL(cFluidIntersections, cuFluidIntersections);
CPSYMBOL(cMsaaRate, msaa);
CPSYMBOL(cRayCounter, rayCounter);
CPSYMBOL(cuSeeds, cRNGSeeds);
dim3 texturedim((uint32_t)scene.width, (uint32_t)scene.height, 1);
dim3 blockdim(8, 8, 1);
dim3 griddim(texturedim.x / blockdim.x, texturedim.y / blockdim.y, 1);
if (texturedim.x % blockdim.x != 0)
griddim.x += 1;
if (texturedim.y % blockdim.y != 0)
griddim.y += 1;
//CoreLoopPathTracingKernel<<<griddim, blockdim>>>((float3 *)acc, framenumber, hashedframes);
//if (renderMode == 0) {
// LAUNCH(common::generateScheduledRays, griddim, dim3(msaa, blockdim.x, blockdim.y), 0, stream)(hashedframes, cuImage, cuScheduledRays, cuCurrentRays, msaa);
// hipMemcpy(rayCounter, &num_blocks, sizeof(int32_t), hipMemcpyHostToDevice);
// render::rayScheduler << <blocks_1080, blockSize_1080, sizeof(ScheduledRay) * blockSize_1080, stream >> >();
// common::toneMap << <griddim, blockdim, 0, stream >> > (framenumber, (float3*)acc, cuImage, (float)msaa);
//}
//else if (renderMode == 4) {
//cuda::sync("Test 1");
// LAUNCH(common::generateBlockedRays, griddim, dim3(msaa, blockdim.x, blockdim.y), 0, stream)(hashedframes, cuImage, cuBlockedRays, cuCurrentRays, msaa);
//// cuda::sync("Test 2");
// for (int32_t i = 0; i < bounces; ++i) {
// //std::cout << "Bounce: " << i << std::endl;
// hipMemcpyAsync(rayCounter, &num_blocks, sizeof(int32_t), hipMemcpyHostToDevice, stream);
// // cuda::sync("Test 3");
// render::IDrayScheduler << <blocks_1080, blockSize_1080, sizeof(RayWithIndexAndDepth) * blockSize_1080, stream >> > ();
// //cuda::sync("Test 4");
// render::intersectRaysSpheresScene << <griddim, blockdim, blockdim.x * blockdim.y * sizeof(RayWithIndexAndDepth), stream >>> (dist(rd));
// //cuda::sync("Test 5");
// //render::updateIDRAY << <griddim, blockdim, blockdim.x * blockdim.y * sizeof(RayWithIndexAndDepth), stream >> > (dist(rd));
// // cuda::sync("Test 6");
// }
// common::toneMap << <griddim, blockdim, 0, stream >> > (framenumber, (float3*)acc, cuImage, 1.f);
// //cuda::sync("Test 7");
// }
//else{
common::generatePrimaryRays << <griddim, blockdim, 0, stream >> > (hashedframes, cuImage, cuCurrentRays);
for (int32_t i = 0; i < bounces; ++i) {
std::cout << i;
std::cout.flush();
cuda::sync(std::to_string(__LINE__));
//if (renderMode == 1) {
// render::intersectAndShadeRaysSM<<<griddim, blockdim, sizeof(RayIntersection) * blockdim.x * blockdim.y, stream>>>(
// cuCurrentRays, cuImage, hashedframes);
//} else if (renderMode == 2) {
if (renderMode == 3) {
hipLaunchKernelGGL(( render::intersectRaysBVH), dim3(griddim), dim3(blockdim), 0, 0, cuCurrentRays, cuRayIntersections);
hipLaunchKernelGGL(( render::shadeRays), dim3(griddim), dim3(blockdim), 0, 0, dist(rd), cuImage, cuCurrentRays, cuRayIntersections);
}
else {
// std::cout << ".\n";
render::intersectRays << <griddim, blockdim >> > (cuCurrentRays, cuRayIntersections);
cuda::sync(std::to_string(__LINE__));
//std::cout << "-\n";
render::shadeRays << <griddim, blockdim >> > (dist(rd), cuImage, cuCurrentRays, cuRayIntersections);
cuda::sync(std::to_string(__LINE__));
//std::cout << ":\n";
}
//break;
// } else
}
std::cout << std::endl;
common::toneMap << <griddim, blockdim, 0, stream >> > (framenumber, (float3*)acc, cuImage, 1.f);
cuda::sync(std::to_string(__LINE__));
//}
hipStreamSynchronize(stream);
} | 34dbe0933bc5fb5b2d758442a533227be569dd43.cu | #define NO_QT
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#include <iostream>
#include <render/mlmRender/mlmRender.h>
#include <sstream>
#include <texture_types.h>
#include <utility/math.h>
#include <vector_functions.h>
#include <vector_types.h>
//#define DEBUG_AABB
#pragma region MACROS
#define SMRAY rayCache[threadIdx.x]
#define IDRAY rayid[threadIdx.x]
#define IDRAY2D rayid[threadIdx.x + blockDim.x * threadIdx.y]
#ifdef __INTELLISENSE__
#define gridDim int3{32,1,1}
#define blockDim int3{32,1,1}
#define threadIdx int3{0,0,0}
#define blockIdx int3{0,0,0}
#endif
#define SMRAY_DIR_X SMRAY.direction.x
#define SMRAY_DIR_Y SMRAY.direction.y
#define SMRAY_DIR_Z SMRAY.direction.z
#define SMRAY_ORIG_X SMRAY.origin.x
#define SMRAY_ORIG_Y SMRAY.origin.y
#define SMRAY_ORIG_Z SMRAY.origin.z
#define SMRAY_IDX SMRAY.index
#define SMRAY_BOUNCES SMRAY.bounces
#define SMRAY_DEPTH SMRAY.depth
#define SMRAY_MASK_X SMRAY.mask.x
#define SMRAY_MASK_Y SMRAY.mask.y
#define SMRAY_MASK_Z SMRAY.mask.z
#define SMRAY_DIR SMRAY.direction
#define SMRAY_ORIG SMRAY.origin
#define SMRAY_MASK SMRAY.mask
#define IDRAY_DIR_X IDRAY.direction.x
#define IDRAY_DIR_Y IDRAY.direction.y
#define IDRAY_DIR_Z IDRAY.direction.z
#define IDRAY_ORIG_X IDRAY.origin.x
#define IDRAY_ORIG_Y IDRAY.origin.y
#define IDRAY_ORIG_Z IDRAY.origin.z
#define IDRAY_IDX IDRAY.index
#define IDRAY_BOUNCES IDRAY.bounces
#define IDRAY_DEPTH IDRAY.depth
#define IDRAY_DIR IDRAY.direction
#define IDRAY_ORIG IDRAY.origin
#define GET_NEXT_RAY {if(!scheduler::grabRay()) return; else continue;}
#define GET_NEXT_IDRAY {if(!scheduler::grabIDRay()) return; else continue;}
#pragma endregion
namespace rtx {
enum Refl_t { DIFF, METAL, SPEC, REFR, COAT };
struct uint10_3 {
uint32_t x : 10;
uint32_t y : 10;
uint32_t z : 10;
uint32_t valid : 1;
};
struct Ray {
float3 orig, dir;
};
struct ScheduledRay {
float3 origin;
int32_t index;
float3 direction;
float depth;
float3 mask;
int32_t bounces;
};
struct RayWithIndex {
float3 origin;
float3 direction;
int32_t index;
};
struct RayWithIndexAndDepth {
float3 origin;
float depth;
float3 direction;
uint32_t index : 24;
uint32_t bounces : 4;
uint32_t geomType : 4;
};
struct FluidIntersection {
float3 normal;
float depth = 1e21f;
};
struct RayIntersection {
float depth;
Refl_t materialType;
float3 surfaceColor;
float3 surfaceNormal;
float3 emission;
};
struct rayHit {
float3 position;
float depth;
float3 normal;
bool status;
int32_t voxelIdx;
};
struct rayHitLean {
int3 voxel;
float depth;
};
struct rayHitSuperLean {
float depth;
};
struct AABBHit {
bool hit;
float tmin;
float tmax;
};
struct Pixel {
float3 color;
float3 mask;
};
struct bounceRay {
int32_t pixelIdx;
float3 orig;
float3 dir;
};
struct Sphere {
float rad;
float3 pos, emi, col;
Refl_t refl;
__device__ float intersect(const Ray &r) const {
float3 op = pos - r.orig; //
float t, epsilon = 0.01f;
float b = math::dot(op, r.dir);
float disc = b * b - math::dot(op, op) + rad * rad; // discriminant
if (disc < 0)
return 0;
else
disc = sqrtf(disc);
return (t = b - disc) > epsilon ? t : ((t = b + disc) > epsilon ? t : 0);
}
__device__ float intersectSM() const;
__device__ float intersectID() const;
__device__ float intersectID2D() const;
};
curandState* cuRandStates;
Pixel* cuImage;
Ray* cuCurrentRays;
RayIntersection* cuRayIntersections;
RayWithIndex* cuScheduledRays;
FluidIntersection* cuFluidIntersections;
RayWithIndexAndDepth* cuBlockedRays;
int32_t* rayCounter;
uint32_t* cRNGSeeds;
__device__ __constant__ SceneInformation cScene;
__device__ __constant__ FluidSystem fluidSystem;
__device__ __constant__ FluidMemory fluidMemory;
__device__ __constant__ Sphere spheres[] = {
//{16, {128.0f, 128, 128}, {6, 4, 2}, {0.f, 0.f, 0.f}, DIFF},
{10000, {50.0f, 40.8f, -1060.f}, {0.55f, 0.55f, 0.55f}, {0.175f, 0.175f, 0.175f}, DIFF},
//{100000, {0.0f, 0, -100000.}, {0, 0, 0}, {0.5f, 0.0f, 0.0f}, DIFF},
//{100000, {0.0f, 0, -100000.1}, {0, 0, 0}, {0.3f, 0.3f, 0.3f}, DIFF}
};
__device__ __constant__ int32_t cNumRays;
__device__ __constant__ RayWithIndex* cRays;
__device__ __constant__ RayWithIndexAndDepth* cRaysDepth;
__device__ __constant__ int32_t* cRayCounter;
__device__ __constant__ Pixel* cImage;
__device__ __constant__ curandState* cRandStates;
__device__ __constant__ FluidIntersection* cFluidIntersections;
__device__ __constant__ uint32_t* cuSeeds;
__device__ __constant__ int32_t cMsaaRate;
#pragma region MORTON_LUT
__device__ __constant__ int32_t morton256_x[256] = {
0x00000000,
0x00000001, 0x00000008, 0x00000009, 0x00000040, 0x00000041, 0x00000048, 0x00000049, 0x00000200,
0x00000201, 0x00000208, 0x00000209, 0x00000240, 0x00000241, 0x00000248, 0x00000249, 0x00001000,
0x00001001, 0x00001008, 0x00001009, 0x00001040, 0x00001041, 0x00001048, 0x00001049, 0x00001200,
0x00001201, 0x00001208, 0x00001209, 0x00001240, 0x00001241, 0x00001248, 0x00001249, 0x00008000,
0x00008001, 0x00008008, 0x00008009, 0x00008040, 0x00008041, 0x00008048, 0x00008049, 0x00008200,
0x00008201, 0x00008208, 0x00008209, 0x00008240, 0x00008241, 0x00008248, 0x00008249, 0x00009000,
0x00009001, 0x00009008, 0x00009009, 0x00009040, 0x00009041, 0x00009048, 0x00009049, 0x00009200,
0x00009201, 0x00009208, 0x00009209, 0x00009240, 0x00009241, 0x00009248, 0x00009249, 0x00040000,
0x00040001, 0x00040008, 0x00040009, 0x00040040, 0x00040041, 0x00040048, 0x00040049, 0x00040200,
0x00040201, 0x00040208, 0x00040209, 0x00040240, 0x00040241, 0x00040248, 0x00040249, 0x00041000,
0x00041001, 0x00041008, 0x00041009, 0x00041040, 0x00041041, 0x00041048, 0x00041049, 0x00041200,
0x00041201, 0x00041208, 0x00041209, 0x00041240, 0x00041241, 0x00041248, 0x00041249, 0x00048000,
0x00048001, 0x00048008, 0x00048009, 0x00048040, 0x00048041, 0x00048048, 0x00048049, 0x00048200,
0x00048201, 0x00048208, 0x00048209, 0x00048240, 0x00048241, 0x00048248, 0x00048249, 0x00049000,
0x00049001, 0x00049008, 0x00049009, 0x00049040, 0x00049041, 0x00049048, 0x00049049, 0x00049200,
0x00049201, 0x00049208, 0x00049209, 0x00049240, 0x00049241, 0x00049248, 0x00049249, 0x00200000,
0x00200001, 0x00200008, 0x00200009, 0x00200040, 0x00200041, 0x00200048, 0x00200049, 0x00200200,
0x00200201, 0x00200208, 0x00200209, 0x00200240, 0x00200241, 0x00200248, 0x00200249, 0x00201000,
0x00201001, 0x00201008, 0x00201009, 0x00201040, 0x00201041, 0x00201048, 0x00201049, 0x00201200,
0x00201201, 0x00201208, 0x00201209, 0x00201240, 0x00201241, 0x00201248, 0x00201249, 0x00208000,
0x00208001, 0x00208008, 0x00208009, 0x00208040, 0x00208041, 0x00208048, 0x00208049, 0x00208200,
0x00208201, 0x00208208, 0x00208209, 0x00208240, 0x00208241, 0x00208248, 0x00208249, 0x00209000,
0x00209001, 0x00209008, 0x00209009, 0x00209040, 0x00209041, 0x00209048, 0x00209049, 0x00209200,
0x00209201, 0x00209208, 0x00209209, 0x00209240, 0x00209241, 0x00209248, 0x00209249, 0x00240000,
0x00240001, 0x00240008, 0x00240009, 0x00240040, 0x00240041, 0x00240048, 0x00240049, 0x00240200,
0x00240201, 0x00240208, 0x00240209, 0x00240240, 0x00240241, 0x00240248, 0x00240249, 0x00241000,
0x00241001, 0x00241008, 0x00241009, 0x00241040, 0x00241041, 0x00241048, 0x00241049, 0x00241200,
0x00241201, 0x00241208, 0x00241209, 0x00241240, 0x00241241, 0x00241248, 0x00241249, 0x00248000,
0x00248001, 0x00248008, 0x00248009, 0x00248040, 0x00248041, 0x00248048, 0x00248049, 0x00248200,
0x00248201, 0x00248208, 0x00248209, 0x00248240, 0x00248241, 0x00248248, 0x00248249, 0x00249000,
0x00249001, 0x00249008, 0x00249009, 0x00249040, 0x00249041, 0x00249048, 0x00249049, 0x00249200,
0x00249201, 0x00249208, 0x00249209, 0x00249240, 0x00249241, 0x00249248, 0x00249249
};
__device__ __constant__ int32_t morton256_y[256] = {
0x00000000,
0x00000002, 0x00000010, 0x00000012, 0x00000080, 0x00000082, 0x00000090, 0x00000092, 0x00000400,
0x00000402, 0x00000410, 0x00000412, 0x00000480, 0x00000482, 0x00000490, 0x00000492, 0x00002000,
0x00002002, 0x00002010, 0x00002012, 0x00002080, 0x00002082, 0x00002090, 0x00002092, 0x00002400,
0x00002402, 0x00002410, 0x00002412, 0x00002480, 0x00002482, 0x00002490, 0x00002492, 0x00010000,
0x00010002, 0x00010010, 0x00010012, 0x00010080, 0x00010082, 0x00010090, 0x00010092, 0x00010400,
0x00010402, 0x00010410, 0x00010412, 0x00010480, 0x00010482, 0x00010490, 0x00010492, 0x00012000,
0x00012002, 0x00012010, 0x00012012, 0x00012080, 0x00012082, 0x00012090, 0x00012092, 0x00012400,
0x00012402, 0x00012410, 0x00012412, 0x00012480, 0x00012482, 0x00012490, 0x00012492, 0x00080000,
0x00080002, 0x00080010, 0x00080012, 0x00080080, 0x00080082, 0x00080090, 0x00080092, 0x00080400,
0x00080402, 0x00080410, 0x00080412, 0x00080480, 0x00080482, 0x00080490, 0x00080492, 0x00082000,
0x00082002, 0x00082010, 0x00082012, 0x00082080, 0x00082082, 0x00082090, 0x00082092, 0x00082400,
0x00082402, 0x00082410, 0x00082412, 0x00082480, 0x00082482, 0x00082490, 0x00082492, 0x00090000,
0x00090002, 0x00090010, 0x00090012, 0x00090080, 0x00090082, 0x00090090, 0x00090092, 0x00090400,
0x00090402, 0x00090410, 0x00090412, 0x00090480, 0x00090482, 0x00090490, 0x00090492, 0x00092000,
0x00092002, 0x00092010, 0x00092012, 0x00092080, 0x00092082, 0x00092090, 0x00092092, 0x00092400,
0x00092402, 0x00092410, 0x00092412, 0x00092480, 0x00092482, 0x00092490, 0x00092492, 0x00400000,
0x00400002, 0x00400010, 0x00400012, 0x00400080, 0x00400082, 0x00400090, 0x00400092, 0x00400400,
0x00400402, 0x00400410, 0x00400412, 0x00400480, 0x00400482, 0x00400490, 0x00400492, 0x00402000,
0x00402002, 0x00402010, 0x00402012, 0x00402080, 0x00402082, 0x00402090, 0x00402092, 0x00402400,
0x00402402, 0x00402410, 0x00402412, 0x00402480, 0x00402482, 0x00402490, 0x00402492, 0x00410000,
0x00410002, 0x00410010, 0x00410012, 0x00410080, 0x00410082, 0x00410090, 0x00410092, 0x00410400,
0x00410402, 0x00410410, 0x00410412, 0x00410480, 0x00410482, 0x00410490, 0x00410492, 0x00412000,
0x00412002, 0x00412010, 0x00412012, 0x00412080, 0x00412082, 0x00412090, 0x00412092, 0x00412400,
0x00412402, 0x00412410, 0x00412412, 0x00412480, 0x00412482, 0x00412490, 0x00412492, 0x00480000,
0x00480002, 0x00480010, 0x00480012, 0x00480080, 0x00480082, 0x00480090, 0x00480092, 0x00480400,
0x00480402, 0x00480410, 0x00480412, 0x00480480, 0x00480482, 0x00480490, 0x00480492, 0x00482000,
0x00482002, 0x00482010, 0x00482012, 0x00482080, 0x00482082, 0x00482090, 0x00482092, 0x00482400,
0x00482402, 0x00482410, 0x00482412, 0x00482480, 0x00482482, 0x00482490, 0x00482492, 0x00490000,
0x00490002, 0x00490010, 0x00490012, 0x00490080, 0x00490082, 0x00490090, 0x00490092, 0x00490400,
0x00490402, 0x00490410, 0x00490412, 0x00490480, 0x00490482, 0x00490490, 0x00490492, 0x00492000,
0x00492002, 0x00492010, 0x00492012, 0x00492080, 0x00492082, 0x00492090, 0x00492092, 0x00492400,
0x00492402, 0x00492410, 0x00492412, 0x00492480, 0x00492482, 0x00492490, 0x00492492
};
__device__ __constant__ int32_t morton256_z[256] = {
0x00000000,
0x00000004, 0x00000020, 0x00000024, 0x00000100, 0x00000104, 0x00000120, 0x00000124, 0x00000800,
0x00000804, 0x00000820, 0x00000824, 0x00000900, 0x00000904, 0x00000920, 0x00000924, 0x00004000,
0x00004004, 0x00004020, 0x00004024, 0x00004100, 0x00004104, 0x00004120, 0x00004124, 0x00004800,
0x00004804, 0x00004820, 0x00004824, 0x00004900, 0x00004904, 0x00004920, 0x00004924, 0x00020000,
0x00020004, 0x00020020, 0x00020024, 0x00020100, 0x00020104, 0x00020120, 0x00020124, 0x00020800,
0x00020804, 0x00020820, 0x00020824, 0x00020900, 0x00020904, 0x00020920, 0x00020924, 0x00024000,
0x00024004, 0x00024020, 0x00024024, 0x00024100, 0x00024104, 0x00024120, 0x00024124, 0x00024800,
0x00024804, 0x00024820, 0x00024824, 0x00024900, 0x00024904, 0x00024920, 0x00024924, 0x00100000,
0x00100004, 0x00100020, 0x00100024, 0x00100100, 0x00100104, 0x00100120, 0x00100124, 0x00100800,
0x00100804, 0x00100820, 0x00100824, 0x00100900, 0x00100904, 0x00100920, 0x00100924, 0x00104000,
0x00104004, 0x00104020, 0x00104024, 0x00104100, 0x00104104, 0x00104120, 0x00104124, 0x00104800,
0x00104804, 0x00104820, 0x00104824, 0x00104900, 0x00104904, 0x00104920, 0x00104924, 0x00120000,
0x00120004, 0x00120020, 0x00120024, 0x00120100, 0x00120104, 0x00120120, 0x00120124, 0x00120800,
0x00120804, 0x00120820, 0x00120824, 0x00120900, 0x00120904, 0x00120920, 0x00120924, 0x00124000,
0x00124004, 0x00124020, 0x00124024, 0x00124100, 0x00124104, 0x00124120, 0x00124124, 0x00124800,
0x00124804, 0x00124820, 0x00124824, 0x00124900, 0x00124904, 0x00124920, 0x00124924, 0x00800000,
0x00800004, 0x00800020, 0x00800024, 0x00800100, 0x00800104, 0x00800120, 0x00800124, 0x00800800,
0x00800804, 0x00800820, 0x00800824, 0x00800900, 0x00800904, 0x00800920, 0x00800924, 0x00804000,
0x00804004, 0x00804020, 0x00804024, 0x00804100, 0x00804104, 0x00804120, 0x00804124, 0x00804800,
0x00804804, 0x00804820, 0x00804824, 0x00804900, 0x00804904, 0x00804920, 0x00804924, 0x00820000,
0x00820004, 0x00820020, 0x00820024, 0x00820100, 0x00820104, 0x00820120, 0x00820124, 0x00820800,
0x00820804, 0x00820820, 0x00820824, 0x00820900, 0x00820904, 0x00820920, 0x00820924, 0x00824000,
0x00824004, 0x00824020, 0x00824024, 0x00824100, 0x00824104, 0x00824120, 0x00824124, 0x00824800,
0x00824804, 0x00824820, 0x00824824, 0x00824900, 0x00824904, 0x00824920, 0x00824924, 0x00900000,
0x00900004, 0x00900020, 0x00900024, 0x00900100, 0x00900104, 0x00900120, 0x00900124, 0x00900800,
0x00900804, 0x00900820, 0x00900824, 0x00900900, 0x00900904, 0x00900920, 0x00900924, 0x00904000,
0x00904004, 0x00904020, 0x00904024, 0x00904100, 0x00904104, 0x00904120, 0x00904124, 0x00904800,
0x00904804, 0x00904820, 0x00904824, 0x00904900, 0x00904904, 0x00904920, 0x00904924, 0x00920000,
0x00920004, 0x00920020, 0x00920024, 0x00920100, 0x00920104, 0x00920120, 0x00920124, 0x00920800,
0x00920804, 0x00920820, 0x00920824, 0x00920900, 0x00920904, 0x00920920, 0x00920924, 0x00924000,
0x00924004, 0x00924020, 0x00924024, 0x00924100, 0x00924104, 0x00924120, 0x00924124, 0x00924800,
0x00924804, 0x00924820, 0x00924824, 0x00924900, 0x00924904, 0x00924920, 0x00924924
};
#pragma endregion
__shared__ extern RayWithIndexAndDepth rayid[];
__shared__ extern RayIntersection rayi[];
__shared__ extern Ray raySM[];
__shared__ extern ScheduledRay rayCache[];
__shared__ extern float SMCache[];
surface<void, cudaSurfaceType2D> surfaceWriteOut;
__device__ __host__ __inline__ float sgn(float x) {
return x > 0.f ? 1.f : (x < 0.f ? -1.f : 0.f);
}
__device__ auto idx3D_to_mortonLUT(uint10_3 idx) {
return morton256_x[idx.x] | morton256_y[idx.y] | morton256_z[idx.z];
}
__device__ auto idx3D_to_mortonLUT(int3 idx) {
return morton256_x[idx.x] | morton256_y[idx.y] | morton256_z[idx.z];
}
template<typename T, typename U>
__device__ auto position_to_mortonLUT(T p, U& arrays, float factor = 1.f) {
return idx3D_to_mortonLUT(position_to_idx3D_i(p, arrays.min_coord, math::unit_get<1>(arrays.cell_size) * factor));
}
__device__ auto idx3D_to_hash(uint10_3 idx, uint32_t hash_entries) {
return (idx.x * 73856093 + idx.y * 19349663 + idx.z * 83492791) % hash_entries;
}
__device__ float Sphere::intersectSM() const {
float3 op = pos - SMRAY_ORIG; //
float t, epsilon = 0.01f;
float b = math::dot(op, SMRAY_DIR);
float disc = b * b - math::dot(op, op) + rad * rad; // discriminant
if (disc < 0)
return 0;
else
disc = sqrtf(disc);
return (t = b - disc) > epsilon ? t : ((t = b + disc) > epsilon ? t : 0);
}
__device__ float Sphere::intersectID() const {
float3 op = pos - IDRAY_ORIG; //
float t, epsilon = 0.01f;
float b = math::dot(op, IDRAY_DIR);
float disc = b * b - math::dot(op, op) + rad * rad; // discriminant
if (disc < 0)
return 0;
else
disc = sqrtf(disc);
return (t = b - disc) > epsilon ? t : ((t = b + disc) > epsilon ? t : 0);
}
__device__ float Sphere::intersectID2D() const {
float3 op = pos - IDRAY2D.origin; //
float t, epsilon = 0.01f;
float b = math::dot(op, IDRAY2D.direction);
float disc = b * b - math::dot(op, op) + rad * rad; // discriminant
if (disc < 0)
return 0;
else
disc = sqrtf(disc);
return (t = b - disc) > epsilon ? t : ((t = b + disc) > epsilon ? t : 0);
}
__device__ auto randf() {
auto x = cuSeeds[threadIdx.x + blockIdx.x * blockDim.x];
x ^= x >> 13;
x ^= x << 17;
x ^= x >> 5;
cuSeeds[threadIdx.x + blockIdx.x * blockDim.x] = x;
auto r = (x & 0x007FFFFF) | 0x3F800000;
return *reinterpret_cast<float*>(&r) - 1.f;
}
namespace common {
__device__ Ray generateCameraRay(int32_t x, int32_t y, curandState& randState) {
float3 rendercampos = float3{ cScene.m_camera.position.x, cScene.m_camera.position.y, cScene.m_camera.position.z };
int32_t pixelx = x;
int32_t pixely = cScene.height - y - 1;
//float3 finalcol = float3{ 0.0f, 0.0f, 0.0f };
float3 rendercamview =
math::normalize(float3{ cScene.m_camera.view.x, cScene.m_camera.view.y, cScene.m_camera.view.z });
float3 rendercamup = math::normalize(float3{ cScene.m_camera.up.x, cScene.m_camera.up.y, cScene.m_camera.up.z });
float3 horizontalAxis = math::normalize(math::cross(rendercamview, rendercamup));
float3 verticalAxis = math::normalize(math::cross(horizontalAxis, rendercamview));
float3 middle = rendercampos + rendercamview;
float3 horizontal = horizontalAxis * tanf(cScene.m_camera.fov.x * 0.5f * (CUDART_PI_F / 180));
float3 vertical = -verticalAxis * tanf(-cScene.m_camera.fov.y * 0.5f * (CUDART_PI_F / 180));
float jitterValueX = curand_uniform(&randState) - 0.5f;
float jitterValueY = curand_uniform(&randState) - 0.5f;
float sx = (jitterValueX + pixelx) / (cScene.width - 1);
float sy = (jitterValueY + pixely) / (cScene.height - 1);
// compute pixel on screen
float3 pointOnPlaneOneUnitAwayFromEye = middle + (horizontal * ((2 * sx) - 1)) + (vertical * ((2 * sy) - 1));
float3 pointOnImagePlane =
rendercampos + ((pointOnPlaneOneUnitAwayFromEye - rendercampos) * cScene.m_camera.focalDistance);
float3 aperturePoint;
if (cScene.m_camera.apertureRadius > 0.00001f) {
float random1 = curand_uniform(&randState);
float random2 = curand_uniform(&randState);
float angle = 2.f * CUDART_PI_F * random1;
float distance = cScene.m_camera.apertureRadius * sqrtf(random2);
float apertureX = cos(angle) * distance;
float apertureY = sin(angle) * distance;
aperturePoint = rendercampos + (horizontalAxis * apertureX) + (verticalAxis * apertureY);
}
else
{
aperturePoint = rendercampos;
}
float3 apertureToImagePlane = pointOnImagePlane - aperturePoint;
apertureToImagePlane = math::normalize(apertureToImagePlane);
float3 rayInWorldSpace = math::normalize(apertureToImagePlane);
float3 originInWorldSpace = aperturePoint;
return Ray{ originInWorldSpace, rayInWorldSpace };
}
__global__ void initRNGSeeds(uint32_t* rngStates, int32_t seed) {
int32_t gIdx = threadIdx.x + blockIdx.x * blockDim.x;
rngStates[gIdx] = gIdx ^ seed;
}
__global__ void generatePrimaryRays(int32_t seed, Pixel* image, Ray* rays) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
int32_t threadId =
(blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
curandState randState;
curand_init(seed + threadId, 0, 0, &randState);
auto worldRay = generateCameraRay(x, y, randState);
image[i] = Pixel{ float3{0.f,0.f,0.f}, float3{1.f,1.f,1.f} };
rays[i] = Ray{ worldRay.orig, worldRay.dir };
}
__global__ void generateScheduledRays(int32_t seed, Pixel* image, RayWithIndex* rays, Ray* oldRays, int32_t msaa_factor) {
int32_t x = blockIdx.x * blockDim.y + threadIdx.y;
int32_t y = blockIdx.y * blockDim.z + threadIdx.z;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
int32_t blockId = blockIdx.x + blockIdx.y * gridDim.x;
int32_t threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
curandState randState;
curand_init(seed + threadId, 0, 0, &randState);
image[i] = Pixel{ float3{0.f,0.f,0.f}, float3{1.f,1.f,1.f} };
auto worldRay = generateCameraRay(x, y, randState);
rays[i * msaa_factor + threadIdx.x] = RayWithIndex{ worldRay.orig, worldRay.dir, i };
}
__global__ void generateBlockedRays(int32_t seed, Pixel* image, RayWithIndexAndDepth* rays, Ray* oldRays, int32_t msaa_factor) {
int32_t x = blockIdx.x * blockDim.y + threadIdx.y;
int32_t y = blockIdx.y * blockDim.z + threadIdx.z;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
int32_t blockId = blockIdx.x + blockIdx.y * gridDim.x;
int32_t threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
curandState randState;
curand_init(seed + threadId, 0, 0, &randState);
image[i] = Pixel{ float3{0.f,0.f,0.f}, float3{1.f,1.f,1.f} };
auto worldRay = generateCameraRay(x, y, randState);
rays[i * msaa_factor + threadIdx.x] = RayWithIndexAndDepth{ worldRay.orig, FLT_MAX, worldRay.dir, (uint32_t) i, 0u, 0u};
}
__global__ void toneMap(int32_t frameNumber, float3* accumBuffer, Pixel* image, float rate) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
accumBuffer[i] += image[i].color / rate;
float3 tempcol = (accumBuffer[i] / frameNumber);
float3 colour = float3{ math::clamp(tempcol.x, 0.0f, 1.0f), math::clamp(tempcol.y, 0.0f, 1.0f),
math::clamp(tempcol.z, 0.0f, 1.0f) };
float4 out{ (powf(colour.x, 1 / 2.2f)), (powf(colour.y, 1 / 2.2f)), (powf(colour.z, 1 / 2.2f)), 1.f };
//out = float4{ colour.x, colour.y, colour.z, 1.f };
surf2Dwrite(out, surfaceWriteOut, x * sizeof(float4), y, cudaBoundaryModeClamp);
}
}
namespace aabb {
__device__ __host__ auto rayIntersectAABB(Ray worldRay, float3 aabb_min, float3 aabb_max) {
//float tmin, tmax, tymin, tymax, tzmin, tzmax;
//float invdirx = 1.f / worldRay.dir.x;
//float invdiry = 1.f / worldRay.dir.y;
//float invdirz = 1.f / worldRay.dir.z;
float tmin = ((worldRay.dir.x < 0.f ? aabb_max.x : aabb_min.x) - worldRay.orig.x) / worldRay.dir.x;
float tmax = ((worldRay.dir.x < 0.f ? aabb_min.x : aabb_max.x) - worldRay.orig.x) / worldRay.dir.x;
float tymin = ((worldRay.dir.y < 0.f ? aabb_max.y : aabb_min.y) - worldRay.orig.y) / worldRay.dir.y;
float tymax = ((worldRay.dir.y < 0.f ? aabb_min.y : aabb_max.y) - worldRay.orig.y) / worldRay.dir.y;
#ifndef __CUDA_ARCH__
std::cout << worldRay.orig << worldRay.dir << aabb_min << aabb_max << std::endl;
std::cout << tmin << " " << tmax << " " << tymin << " " << tymax << std::endl;
#endif
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float tzmin = ((worldRay.dir.z < 0.f ? aabb_max.z : aabb_min.z) - worldRay.orig.z) / worldRay.dir.z;
float tzmax = ((worldRay.dir.z < 0.f ? aabb_min.z : aabb_max.z) - worldRay.orig.z) / worldRay.dir.z;
#ifndef __CUDA_ARCH__
std::cout << tzmin << " " << tzmax << std::endl;
#endif
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
__device__ AABBHit rayIntersectFluidAABB(Ray worldRay) {
float tmin, tmax, tymin, tymax, tzmin, tzmax;
float invdirx = 1.f / worldRay.dir.x;
float invdiry = 1.f / worldRay.dir.y;
float invdirz = 1.f / worldRay.dir.z;
tmin = (fluidSystem.bounds[invdirx < 0.f].x - worldRay.orig.x) * invdirx;
tmax = (fluidSystem.bounds[1 - (invdirx < 0.f)].x - worldRay.orig.x) * invdirx;
tymin = (fluidSystem.bounds[invdiry < 0.f].y - worldRay.orig.y) * invdiry;
tymax = (fluidSystem.bounds[1 - (invdiry < 0.f)].y - worldRay.orig.y) * invdiry;
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
tzmin = (fluidSystem.bounds[invdirz < 0.f].z - worldRay.orig.z) * invdirz;
tzmax = (fluidSystem.bounds[1 - (invdirz < 0.f)].z - worldRay.orig.z) * invdirz;
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
__device__ AABBHit rayIntersectFluidAABBSMRAY() {
const auto aabb_min = fluidSystem.bounds[0];
const auto aabb_max = fluidSystem.bounds[1];
float tmin = ((SMRAY_DIR_X < 0.f ? aabb_max.x : aabb_min.x) - SMRAY_ORIG_X) / SMRAY_DIR_X;
float tmax = ((SMRAY_DIR_X < 0.f ? aabb_min.x : aabb_max.x) - SMRAY_ORIG_X) / SMRAY_DIR_X;
float tymin = ((SMRAY_DIR_Y < 0.f ? aabb_max.y : aabb_min.y) - SMRAY_ORIG_Y) / SMRAY_DIR_Y;
float tymax = ((SMRAY_DIR_Y < 0.f ? aabb_min.y : aabb_max.y) - SMRAY_ORIG_Y) / SMRAY_DIR_Y;
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float tzmin = ((SMRAY_DIR_Z < 0.f ? aabb_max.z : aabb_min.z) - SMRAY_ORIG_Z) / SMRAY_DIR_Z;
float tzmax = ((SMRAY_DIR_Z < 0.f ? aabb_min.z : aabb_max.z) - SMRAY_ORIG_Z) / SMRAY_DIR_Z;
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
__device__ AABBHit rayIntersectFluidAABBIDRAY() {
const auto aabb_min = fluidSystem.bounds[0];
const auto aabb_max = fluidSystem.bounds[1];
float tmin = ((IDRAY_DIR_X < 0.f ? aabb_max.x : aabb_min.x) - IDRAY_ORIG_X) / IDRAY_DIR_X;
float tmax = ((IDRAY_DIR_X < 0.f ? aabb_min.x : aabb_max.x) - IDRAY_ORIG_X) / IDRAY_DIR_X;
float tymin = ((IDRAY_DIR_Y < 0.f ? aabb_max.y : aabb_min.y) - IDRAY_ORIG_Y) / IDRAY_DIR_Y;
float tymax = ((IDRAY_DIR_Y < 0.f ? aabb_min.y : aabb_max.y) - IDRAY_ORIG_Y) / IDRAY_DIR_Y;
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float tzmin = ((IDRAY_DIR_Z < 0.f ? aabb_max.z : aabb_min.z) - IDRAY_ORIG_Z) / IDRAY_DIR_Z;
float tzmax = ((IDRAY_DIR_Z < 0.f ? aabb_min.z : aabb_max.z) - IDRAY_ORIG_Z) / IDRAY_DIR_Z;
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
__device__ auto rayIntersectAABBSM(float3 aabb_min, float3 aabb_max) {
float tmin = ((SMRAY_DIR_X < 0.f ? aabb_max.x : aabb_min.x) - SMRAY_ORIG_X) / SMRAY_DIR_X;
float tmax = ((SMRAY_DIR_X < 0.f ? aabb_min.x : aabb_max.x) - SMRAY_ORIG_X) / SMRAY_DIR_X;
float tymin = ((SMRAY_DIR_Y < 0.f ? aabb_max.y : aabb_min.y) - SMRAY_ORIG_Y) / SMRAY_DIR_Y;
float tymax = ((SMRAY_DIR_Y < 0.f ? aabb_min.y : aabb_max.y) - SMRAY_ORIG_Y) / SMRAY_DIR_Y;
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float tzmin = ((SMRAY_DIR_Z < 0.f ? aabb_max.z : aabb_min.z) - SMRAY_ORIG_Z) / SMRAY_DIR_Z;
float tzmax = ((SMRAY_DIR_Z < 0.f ? aabb_min.z : aabb_max.z) - SMRAY_ORIG_Z) / SMRAY_DIR_Z;
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
__device__ auto rayIntersectAABBID(float3 aabb_min, float3 aabb_max) {
float tmin = ((IDRAY_DIR_X < 0.f ? aabb_max.x : aabb_min.x) - IDRAY_ORIG_X) / IDRAY_DIR_X;
float tmax = ((IDRAY_DIR_X < 0.f ? aabb_min.x : aabb_max.x) - IDRAY_ORIG_X) / IDRAY_DIR_X;
float tymin = ((IDRAY_DIR_Y < 0.f ? aabb_max.y : aabb_min.y) - IDRAY_ORIG_Y) / IDRAY_DIR_Y;
float tymax = ((IDRAY_DIR_Y < 0.f ? aabb_min.y : aabb_max.y) - IDRAY_ORIG_Y) / IDRAY_DIR_Y;
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float tzmin = ((IDRAY_DIR_Z < 0.f ? aabb_max.z : aabb_min.z) - IDRAY_ORIG_Z) / IDRAY_DIR_Z;
float tzmax = ((IDRAY_DIR_Z < 0.f ? aabb_min.z : aabb_max.z) - IDRAY_ORIG_Z) / IDRAY_DIR_Z;
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
__device__ auto rayIntersectAABBSMU(float3 aabb_min, float len) {
float tmin = ((SMRAY_DIR_X < 0.f ? aabb_min.x + len : aabb_min.x) - SMRAY_ORIG_X) / SMRAY_DIR_X;
float tmax = ((SMRAY_DIR_X < 0.f ? aabb_min.x : aabb_min.x + len) - SMRAY_ORIG_X) / SMRAY_DIR_X;
float tymin = ((SMRAY_DIR_Y < 0.f ? aabb_min.y + len : aabb_min.y) - SMRAY_ORIG_Y) / SMRAY_DIR_Y;
float tymax = ((SMRAY_DIR_Y < 0.f ? aabb_min.y : aabb_min.y + len) - SMRAY_ORIG_Y) / SMRAY_DIR_Y;
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float tzmin = ((SMRAY_DIR_Z < 0.f ? aabb_min.z + len : aabb_min.z) - SMRAY_ORIG_Z) / SMRAY_DIR_Z;
float tzmax = ((SMRAY_DIR_Z < 0.f ? aabb_min.z : aabb_min.z + len) - SMRAY_ORIG_Z) / SMRAY_DIR_Z;
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
}
namespace traversal {
__device__ auto lookup_cell(const int3 &idx) {
if (idx.x >= fluidMemory.grid_size.x || idx.y >= fluidMemory.grid_size.y || idx.z >= fluidMemory.grid_size.z)
return -1;
if (idx.x < 0 || idx.y < 0 || idx.z < 0)
return -1;
auto morton = idx3D_to_hash(idx, fluidMemory.hash_entries);
auto s = fluidMemory.hashMap[idx3D_to_morton(idx) % fluidMemory.hash_entries];
if (s.hash.kind == LIST_ALWAYS_FALSE)
return INT_MAX;
if (s.hash.kind == LIST_ALWAYS_TRUE)
return 1;
if (s.hash.kind == LIST_COMPACT)
if (morton == s.cell.hash)
return 1;
if(s.hash.kind == LIST_ITERATE)
for (int32_t ii = s.hash.beginning; ii < s.hash.beginning + s.hash.length; ++ii)
if (fluidMemory.cellSpan[ii].cell.hash == morton)
return 1;
return INT_MAX;
}
//__device__ auto lookup_cell(const uint10_3& idx) {
// if (idx.x >= fluidMemory.grid_size.x || idx.y >= fluidMemory.grid_size.y || idx.z >= fluidMemory.grid_size.z)
// return INT_MAX;
// //auto morton = idx3D_to_mortonLUT(idx);
// auto s = fluidMemory.hashMap[idx3D_to_hash(idx, fluidMemory.hash_entries)];
// if (s.compacted == 1 && s.beginning != UINT31_MAX) {
// auto cs = cell_span{ (int32_t)s.beginning, s.length };
// auto jj = cs.beginning;
// if (position_to_mortonLUT(fluidMemory.position[jj], fluidMemory, 1.f) == idx3D_to_mortonLUT(idx)) {
// return cs.beginning;
// }
// }
// else {
// auto ii = (int32_t)s.beginning;
// if (s.beginning == UINT31_MAX)
// return INT_MAX;
// int32_t l = idx3D_to_mortonLUT(idx);
// for (; ii < s.beginning + s.length;) {
// auto cs = fluidMemory.cellSpan[ii];
// ++ii;
// auto jj = cs.beginning;
// if (position_to_mortonLUT(fluidMemory.position[jj], fluidMemory, 1.f) == l)
// return ii;
// }
// }
// return INT_MAX;
//}
//__device__ auto lookupVoxelCenter(const float3& voxelCenter) {
// if (voxelCenter.x < fluidSystem.bounds[0].x || voxelCenter.y < fluidSystem.bounds[0].y || voxelCenter.z < fluidSystem.bounds[0].z)
// return INT_MAX;
// if (voxelCenter.x > fluidSystem.bounds[1].x || voxelCenter.y > fluidSystem.bounds[1].y || voxelCenter.z > fluidSystem.bounds[1].z)
// return INT_MAX;
// auto s = fluidMemory.hashMap[position_to_hash(voxelCenter, fluidMemory)];
// if (s.compacted == 1 && s.beginning != UINT31_MAX) {
// auto d = math::max_elem(math::abs((math::castTo<float3>(fluidMemory.position[s.beginning]) - voxelCenter) / fluidMemory.cell_size.x * 0.5f));
// if (d < 1.f)
// return (int32_t)s.beginning;
// }
// else {
// if (s.beginning == UINT31_MAX)
// return INT_MAX;
// for (int32_t ii = s.beginning; ii < s.beginning + s.length; ++ii) {
// auto jj = fluidMemory.cellSpan[ii].beginning;
// auto d = math::max_elem(math::abs((math::castTo<float3>(fluidMemory.position[jj]) - voxelCenter) / fluidMemory.cell_size.x * 0.5f));
// if (d < 1.f)
// return jj;
// }
// }
// return INT_MAX;
//}
__device__ __host__ float mod(float a, float N) {
return a - N * floorf(a / N);
}
__device__ __host__ float intBound2_s(float s, float ds) {
if (s == floorf(s) && ds < 0.f)
return 0.f;
if (ds < 0.f)
return intBound2_s(-s, -ds);
float s2 = mod(s, 1.f);
return (1.f - s2) / ds;
}
__device__ __host__ float3 intBound(const float3& s, const float3& ds) {
return float3{
intBound2_s(s.x, ds.x),
intBound2_s(s.y, ds.y),
intBound2_s(s.z, ds.z)
};
}
template<typename Func> __device__ void iterateVoxels(Func&& fn, const float3& start, const float3& dir, float aabb_min, const float& aabb_max) {
aabb_min = math::max( 0.f, aabb_min);
auto position = start + dir * aabb_min;
auto offset = (position - fluidMemory.min_coord) / fluidMemory.cell_size.x;
int3 voxelPosition = math::castTo<int3>(math::floorf(offset));
float3 tDelta = float3{ sgn(dir.x) / dir.x,sgn(dir.y) / dir.y,sgn(dir.z)/dir.z };
float3 tMax = intBound(offset, dir);
float limit = (aabb_max - aabb_min) / fluidMemory.cell_size.x + 1.f;
while (true) {
auto cell_idx = lookup_cell(voxelPosition);
if (cell_idx == 1) {
if (fn(voxelPosition))
return;
}
if (tMax.x < tMax.y) {
if (tMax.x < tMax.z) {
if (tMax.x > limit) return;
voxelPosition.x += sgn(dir.x);
tMax.x += tDelta.x;
}
else {
if (tMax.z > limit) return;
voxelPosition.z += sgn(dir.z);
tMax.z += tDelta.z;
}
}
else {
if (tMax.y < tMax.z) {
if (tMax.y > limit) return;
voxelPosition.y += sgn(dir.y);
tMax.y += tDelta.y;
}
else {
if (tMax.z > limit) return;
voxelPosition.z += sgn(dir.z);
tMax.z += tDelta.z;
}
}
}
}
//template<typename Func> __device__ void iterateVoxelsCompact(Func&& fn, const float3& start, const float3& dir, const float& aabb_min, const float& aabb_max) {
// //auto intBound = [](auto s, auto ds) {
// // auto sIsInteger = (roundf(s) == s);
// // if (ds < 0 && sIsInteger)
// // return 0.f;
// // return (ds > 0 ? math::ceilf(s) - s : s - math::floorf(s)) / math::abs(ds);
// //};
// auto vP = position_to_idx3D(start, fluidMemory.min_coord, math::unit_get<1>(fluidMemory.cell_size));
// uint10_3 voxelPosition{ vP.x, vP.y, vP.z, 1 };
// auto offset = (start - fluidMemory.min_coord) / fluidMemory.cell_size;
// //float3 tMax{ intBound(offset.x, dir.x), intBound(offset.y, dir.y), intBound(offset.z, dir.z) };
// float3 tMax = intBound(offset, dir);
// //float3 tDelta = math::castTo<float3>(uchar3{ sgn(dir.x), sgn(dir.y), sgn(dir.z) }) / dir;
// while (true) {
// auto cell_idx = lookup_cell(voxelPosition);
// if (cell_idx != UINT31_MAX) {
// if (fn(voxelPosition))
// return;
// }
// if (tMax.x < tMax.y) {
// if (tMax.x < tMax.z) {
// if (tMax.x > aabb_max - aabb_min) return;
// voxelPosition.x += sgn(dir.x);
// tMax.x += sgn(dir.x) / dir.x;
// }
// else {
// if (tMax.z > aabb_max - aabb_min) return;
// voxelPosition.z += sgn(dir.z);
// tMax.z += sgn(dir.z) / dir.z;
// }
// }
// else {
// if (tMax.y < tMax.z) {
// if (tMax.y > aabb_max - aabb_min) return;
// voxelPosition.y += sgn(dir.y);
// tMax.y += sgn(dir.y) / dir.y;
// }
// else {
// if (tMax.z > aabb_max - aabb_min) return;
// voxelPosition.z += sgn(dir.z);
// tMax.z += sgn(dir.z) / dir.z;
// }
// }
// }
//}
//template<typename Func> __device__ void iterateVoxelCenters(Func&& fn, const float3& start, const float3& dir, const float& aabb_min, const float& aabb_max) {
// //auto intBound = [](auto s, auto ds) {
// // auto sIsInteger = (roundf(s) == s);
// // if (ds < 0 && sIsInteger)
// // return 0.f;
// // return (ds > 0 ? math::ceilf(s) - s : s - math::floorf(s)) / math::abs(ds);
// //};
// int3 voxelPosition = position_to_idx3D_i(start, fluidMemory.min_coord, fluidMemory.cell_size.x);
// float3 voxelPos = fluidMemory.min_coord + math::castTo<float3>(voxelPosition) * fluidMemory.cell_size.x + 0.5f * fluidMemory.cell_size.x;
// auto offset = (start - fluidMemory.min_coord) / fluidMemory.cell_size;
// //float3 tMax{ intBound(offset.x, dir.x), intBound(offset.y, dir.y), intBound(offset.z, dir.z) };
// float3 tMax = intBound(offset, dir);
// //float3 tDelta = math::castTo<float3>(uchar3{ sgn(dir.x), sgn(dir.y), sgn(dir.z) }) / dir;
// while (true) {
// auto cell_idx = lookupVoxelCenter(voxelPos);
// if (cell_idx != UINT31_MAX) {
// if (fn(cell_idx))
// return;
// }
// if (tMax.x < tMax.y) {
// if (tMax.x < tMax.z) {
// if (tMax.x > aabb_max - aabb_min) return;
// voxelPos.x += sgn(dir.x) * fluidMemory.cell_size.x;
// tMax.x += sgn(dir.x) / dir.x;
// }
// else {
// if (tMax.z > aabb_max - aabb_min) return;
// voxelPos.z += sgn(dir.z) * fluidMemory.cell_size.x;
// tMax.z += sgn(dir.z) / dir.z;
// }
// }
// else {
// if (tMax.y < tMax.z) {
// if (tMax.y > aabb_max - aabb_min) return;
// voxelPos.y += sgn(dir.y) * fluidMemory.cell_size.x;
// tMax.y += sgn(dir.y) / dir.y;
// }
// else {
// if (tMax.z > aabb_max - aabb_min) return;
// voxelPos.z += sgn(dir.z) * fluidMemory.cell_size.x;
// tMax.z += sgn(dir.z) / dir.z;
// }
// }
// }
//}
template<typename Func> __device__ void iterateVoxelsSMRAY(Func&& fn, const float& aabb_min, const float& aabb_max) {
auto intBound = [](auto s, auto ds) {
auto sIsInteger = (roundf(s) == s);
if (ds < 0 && sIsInteger)
return 0.f;
return (ds > 0 ? math::ceilf(s) - s : s - math::floorf(s)) / math::abs(ds);
};
int3 voxelPosition = position_to_idx3D_i(SMRAY.origin, fluidMemory.min_coord, math::unit_get<1>(fluidMemory.cell_size));
char4 step{ static_cast<char>(sgn(SMRAY.direction.x)), static_cast<char>(sgn(SMRAY.direction.y)), static_cast<char>(sgn(SMRAY.direction.z)), 1 };
auto offset = (SMRAY.origin - fluidMemory.min_coord) / fluidMemory.cell_size;
float3 tMax{ intBound(offset.x, SMRAY.direction.x), intBound(offset.y, SMRAY.direction.y), intBound(offset.z, SMRAY.direction.z) };
float3 tDelta = math::castTo<float3>(step) / SMRAY.direction;
while (true) {
auto cell_idx = lookup_cell(voxelPosition);
if (cell_idx != UINT31_MAX) {
if (fn(voxelPosition))
return;
}
if (tMax.x < tMax.y) {
if (tMax.x < tMax.z) {
if (tMax.x > aabb_max - aabb_min) return;
voxelPosition.x += step.x;
tMax.x += tDelta.x;
}
else {
if (tMax.z > aabb_max - aabb_min) return;
voxelPosition.z += step.z;
tMax.z += tDelta.z;
}
}
else {
if (tMax.y < tMax.z) {
if (tMax.y > aabb_max - aabb_min) return;
voxelPosition.y += step.y;
tMax.y += tDelta.y;
step.w = 2;
}
else {
if (tMax.z > aabb_max - aabb_min) return;
voxelPosition.z += step.z;
tMax.z += tDelta.z;
step.w = 3;
}
}
}
}
}
namespace bvh {
__device__ auto rayIntersectBVHNode(const CFBVHNode& node, Ray worldRay) {
float tmin, tmax, tymin, tymax, tzmin, tzmax;
float invdirx = 1.f / worldRay.dir.x;
float invdiry = 1.f / worldRay.dir.y;
float invdirz = 1.f / worldRay.dir.z;
float3 max = {
invdirx < 0.f ? node.min.x : node.max.x,
invdiry < 0.f ? node.min.y : node.max.y,
invdirz < 0.f ? node.min.z : node.max.z
};
float3 min = {
invdirx < 0.f ? node.max.x : node.min.x,
invdiry < 0.f ? node.max.y : node.min.y,
invdirz < 0.f ? node.max.z : node.min.z
};
tmin = (min.x - worldRay.orig.x) * invdirx;
tmax = (max.x - worldRay.orig.x) * invdirx;
tymin = (min.y - worldRay.orig.y) * invdiry;
tymax = (max.y - worldRay.orig.y) * invdiry;
if ((tmin > tymax) || (tymin > tmax))
return AABBHit{ false };
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
tzmin = (min.z - worldRay.orig.z) * invdirz;
tzmax = (max.z - worldRay.orig.z) * invdirz;
if ((tmin > tzmax) || (tzmin > tmax))
return AABBHit{ false };
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return AABBHit{ (tmin < 0.f && tmax > 0.f) || (tmin > 0.f && tmax > 0.f), tmin, tmax };
}
__device__ rayHit rayIntersectFluidBVH(Ray worldRay, int32_t oldIdx = -1) {
//auto aabb = rayIntersectFluidAABB(worldRay);
//if (aabb.hit == false)
// return rayHit{ float3{FLT_MAX, FLT_MAX, FLT_MAX}, FLT_MAX, float3{1.f,0.f,0.f}, false, -1 };
auto nodeNormal = [](auto node, auto point) {
constexpr auto epsilon = 1e-4f;
auto c = (node.min + node.max) * 0.5f;
auto prel = point - c;
auto d = math::abs((node.min - node.max) * 0.5f);
auto n = math::castTo<int3>(prel / d * (1.f + epsilon));
return float3{ (float)n.x, (float)n.y, (float)n.z };
};
auto getHitInformation = [=](auto node, auto tmin, auto tmax) {
float3 aabb_min = worldRay.orig + tmin * worldRay.dir;
float3 aabb_max = worldRay.orig + tmax * worldRay.dir;
// DEBUG render for AABB
if (tmin >= 0.f)
return rayHit{ aabb_min, tmin, math::abs(nodeNormal(node, aabb_min)), true };
else
return rayHit{ aabb_max, tmax, math::abs(nodeNormal(node, aabb_max)), true };
};
constexpr auto BVH_STACK_SIZE = 32;
int32_t hitVoxelIdx = -1;
float hitTmin = FLT_MAX;
float hitTmax = -FLT_MAX;
int32_t bvhStack[BVH_STACK_SIZE];
int32_t bvhStackIdx = 0;
bvhStack[bvhStackIdx++] = 0;
while (bvhStackIdx) {
int32_t boxIdx = bvhStack[bvhStackIdx - 1];
bvhStackIdx--;
auto node = fluidSystem.fluidBVH[boxIdx];
if (!(node.u.leaf.count & 0x80000000)) { // INNER NODE
auto intersection = rayIntersectBVHNode(node, worldRay);
if (intersection.hit && intersection.tmin < hitTmin) {
bvhStack[bvhStackIdx++] = node.u.inner.idxLeft;
bvhStack[bvhStackIdx++] = node.u.inner.idxRight;
if (bvhStackIdx > BVH_STACK_SIZE) {
return rayHit{ float3{FLT_MAX, FLT_MAX, FLT_MAX}, FLT_MAX, float3{1.f,0.f,0.f}, false, -1 };
}
//return getHitInformation(node, intersection.tmin, intersection.tmax);
}
}
else {
auto intersection = rayIntersectBVHNode(node, worldRay);
if (intersection.hit && intersection.tmin < hitTmin) {
hitVoxelIdx = boxIdx;
hitTmin = intersection.tmin;
hitTmax = intersection.tmax;
}
}
}
if (hitVoxelIdx != -1) {
auto hitNode = fluidSystem.fluidBVH[hitVoxelIdx];
return getHitInformation(hitNode, hitTmin, hitTmax);
}
else
return rayHit{ float3{FLT_MAX, FLT_MAX, FLT_MAX}, FLT_MAX, float3{1.f,0.f,0.f}, false, -1 };
}
}
namespace fluid {
__device__ rayHit rayIntersectFluid(Ray& worldRay, int32_t oldIdx = -1) {
//#define DEBUG_AABB
auto aabb_center = (fluidSystem.bounds[1] - fluidSystem.bounds[0]) / 2.f;
auto aabb_normal = [](auto v) {
constexpr auto epsilon = 1e-5f;
auto c = (fluidSystem.bounds[0] + fluidSystem.bounds[1]) * 0.5f;
auto prel = v - c;
auto d = math::abs((fluidSystem.bounds[0] - fluidSystem.bounds[1]) * 0.5f);
auto n = math::castTo<int3>(prel / d * (1.f + epsilon));
return float3{ (float)n.x, (float)n.y, (float)n.z };
};
worldRay.dir = math::normalize(worldRay.dir);
auto aabb = aabb::rayIntersectFluidAABB(worldRay);
if (aabb.hit == true) {
#ifdef DEBUG_AABB
float3 aabb_min = worldRay.orig + aabb.tmin * worldRay.dir;
float3 aabb_max = worldRay.orig + aabb.tmax * worldRay.dir;
// DEBUG render for AABB
if (aabb.tmin >= 0.f)
return rayHit{ aabb_min, aabb.tmin, math::abs(aabb_normal(aabb_min)), true };
else
return rayHit{ aabb_max, aabb.tmax, math::abs(aabb_normal(aabb_max)), true };
#endif
float t = FLT_MAX;
char3 nc{ 1,0,0 };
//float3 normal;
traversal::iterateVoxels([&](int3 voxel) {
constexpr auto epsilon = 1e-1f;
float3 min = fluidMemory.min_coord + math::castTo<float3>(voxel) * fluidMemory.cell_size;
float3 max = min + fluidMemory.cell_size;
auto rH = aabb::rayIntersectAABB(worldRay, min, max);
auto hitPosition = worldRay.orig + rH.tmin * worldRay.dir;
auto c = (min + max) * 0.5f;
auto prel = hitPosition - c;
auto d = math::abs((min - max) * 0.5f);
auto n = math::castTo<int3>(prel / d * (1.f + epsilon));
nc = char3{ static_cast<char>(n.x), static_cast<char>(n.y), static_cast<char>(n.z) };
//normal = math::abs(prel / d);
//normal = math::abs(float3{
// math::abs(prel.x) > math::abs(prel.y) && math::abs(prel.x) > math::abs(prel.z) ? prel.x : 0.f,
// math::abs(prel.y) > math::abs(prel.x) && math::abs(prel.y) > math::abs(prel.z) ? prel.y : 0.f,
// math::abs(prel.z) > math::abs(prel.y) && math::abs(prel.z) > math::abs(prel.x) ? prel.z : 0.f
//});
/* normal = math::castTo<float3>(voxel);
normal.x *= 0.001f;
normal.y *= 0.02f;
normal.z *= 0.02f;*/
t = rH.tmin;
return true;
}, worldRay.orig, worldRay.dir, aabb.tmin, aabb.tmax);
float3 hitPosition = worldRay.orig + t * worldRay.dir;
float3 normal = float3{ (float)nc.x, (float)nc.y, (float)nc.z };
if (nc.x == nc.y && nc.y == nc.z && nc.z == 0)
normal = float3{ 1.f,0.f,0.f };
if (t < 1e19f)
return rayHit{ hitPosition, math::distance3(worldRay.orig, hitPosition), normal, true };
}
return rayHit{ float3{FLT_MAX, FLT_MAX, FLT_MAX}, FLT_MAX, float3{1.f,0.f,0.f}, false };
}
__device__ rayHitLean rayIntersectFluidLean(Ray& worldRay, int32_t oldIdx = -1) {
auto aabb = aabb::rayIntersectFluidAABB(worldRay);
if (aabb.hit == true) {
float t = FLT_MAX;
int3 hitVoxel{ INT_MAX, INT_MAX, INT_MAX };
traversal::iterateVoxels([&](int3 voxel) {
hitVoxel = voxel;
t = 0.f;
return true;
}, worldRay.orig, worldRay.dir, math::max(aabb.tmin, 0.f), aabb.tmax);
if (t < 1e19f)
return rayHitLean{ hitVoxel, 0.f };
}
return rayHitLean{ {INT_MAX, INT_MAX, INT_MAX}, FLT_MAX };
}
__device__ rayHitLean rayIntersectFluidLeanSM(int32_t oldIdx = -1) {
int32_t ii = threadIdx.x + blockDim.x * threadIdx.y;
auto aabb = aabb::rayIntersectFluidAABB(raySM[ii]);
if (aabb.hit == true) {
float t = FLT_MAX;
int3 hitVoxel{ INT_MAX, INT_MAX, INT_MAX };
traversal::iterateVoxels([&](int3 voxel) {
hitVoxel = voxel;
t = 0.f;
return true;
}, raySM[ii].orig, raySM[ii].dir, math::max(aabb.tmin, 0.f), aabb.tmax);
if (t < 1e19f)
return rayHitLean{ hitVoxel, 0.f };
}
return rayHitLean{ {INT_MAX, INT_MAX, INT_MAX}, FLT_MAX };
}
__device__ int3 rayIntersectFluidLeanSMRAY(int32_t oldIdx = -1) {
auto aabb = aabb::rayIntersectFluidAABBSMRAY();// (Ray{ SMRAY_ORIG, SMRAY_DIR });
if (aabb.hit == true) {
//float t = FLT_MAX;
int3 idx{ INT_MAX, INT_MAX, INT_MAX };
traversal::iterateVoxels([&](int3 voxel) {
idx = voxel;
//t = 0.f;
return true;
}, SMRAY_ORIG, SMRAY_DIR, math::max(aabb.tmin, 0.f), aabb.tmax);
if (idx.x != INT_MAX)
return idx;
}
return int3{ INT_MAX, INT_MAX, INT_MAX };
}
__device__ int3 rayIntersectFluidLeanIDRAY(int32_t oldIdx = -1) {
auto aabb = aabb::rayIntersectFluidAABBIDRAY();// (Ray{ SMRAY_ORIG, SMRAY_DIR });
if (aabb.hit == true) {
//float t = FLT_MAX;
int3 idx{ INT_MAX, INT_MAX, INT_MAX };
traversal::iterateVoxels([&](int3 voxel) {
idx = voxel;
//t = 0.f;
return true;
}, IDRAY_ORIG, IDRAY_DIR, math::max(aabb.tmin, 0.f), aabb.tmax);
if (idx.x != INT_MAX)
return idx;
}
return int3{ INT_MAX, INT_MAX, INT_MAX };
}
//__device__ uint10_3 rayIntersectFluidLeanSMRAYSuper(int32_t oldIdx = -1) {
// auto aabb = aabb::rayIntersectFluidAABBSMRAY();// (Ray{ SMRAY_ORIG, SMRAY_DIR });
// if (aabb.hit == true) {
// uint10_3 t{ 0, 0, 0, 0 };
// traversal::iterateVoxelsCompact([&](uint10_3 particleIdx) {
// t = particleIdx;
// return true;
// }, SMRAY_ORIG, SMRAY_DIR, 0.f, aabb.tmax);
// if (t.valid == true)
// return t;
// }
// return uint10_3{ 0, 0, 0, 0 };
//}
}
namespace scheduler {
__device__ void update_ray(int32_t counter) {
SMRAY = ScheduledRay{ cRays[counter].origin, cRays[counter].index, cRays[counter].direction, 1e21f, float3{1.f,1.f,1.f}, 0 };
}
__device__ void updateIDRay(int32_t counter) {
IDRAY = cRaysDepth[counter];// RayWithIndexAndDepth{ cRaysDepth[counter].origin, cRaysDepth[counter].depth, cRaysDepth[counter].direction, cRaysDepth[counter].index, cRaysDepth[counter].bounces, cRaysDepth[counter].geomType };
}
__device__ bool grabRay() {
cuda_atomic<int32_t> atomicCounter(cRayCounter);
int32_t counter = atomicCounter.add(1);
if (counter >= cNumRays) return false;
update_ray(counter);
return true;
}
__device__ bool grabIDRay() {
cuda_atomic<int32_t> atomicCounter(cRayCounter);
int32_t counter = atomicCounter.add(1);
if (counter >= cNumRays) return false;
updateIDRay(counter);
return true;
}
}
namespace render{
__global__ void rayScheduler() {
//const int32_t tIdx = threadIdx.x;
//const int32_t gIdx = threadIdx.x + blockIdx.x * blockDim.x;
__syncwarp();
if (threadIdx.x + blockIdx.x * blockDim.x >= cNumRays) return;
__syncwarp();
scheduler::update_ray(threadIdx.x + blockIdx.x * blockDim.x);
__syncwarp();
while (1) {
__syncwarp();
auto fluidHit = fluid::rayIntersectFluidLeanSMRAY();
__syncwarp();
float3 normal, color;
if (fluidHit.x != INT_MAX) {
constexpr auto epsilon = 1e-1f;
const auto cs = fluidMemory.cell_size.x;
//auto hitPosition = SMRAY_ORIG + fluidHit * SMRAY_DIR;
//int3 vIdx = position_to_idx3D_i(fluidMemory.position[fluidHit], fluidMemory.min_coord, cs);
float3 min = fluidSystem.bounds[0] + math::castTo<float3>(fluidHit) * cs;
auto rH = aabb::rayIntersectAABBSM(min, min + cs);
auto t = rH.tmin;
if (t < SMRAY_DEPTH) {
SMRAY_DEPTH = t;
auto hitPosition = SMRAY_ORIG + rH.tmin*1.01f * SMRAY_DIR;
auto c = min + cs * 0.5f;
auto prel = hitPosition - c;
auto d = cs * 0.5f;
auto ni = math::castTo<int3>(prel / d * (1.f + epsilon));
auto nc = char3{ static_cast<char>(ni.x), static_cast<char>(ni.y), static_cast<char>(ni.z) };
auto n = math::castTo<float3>(nc);
//float3 n{ 1.f,0.f,0.f };
auto nl = math::dot(n, SMRAY_DIR) < 0 ? n : n * -1;
auto f = float3{ 0.05098f, 0.23137f, 0.494177f };
normal = nl;
color = f;
}
}
__syncwarp();
int32_t idx = -1;
for (int32_t sphere_id = 0; sphere_id < int32_t(sizeof(spheres) / sizeof(Sphere)); sphere_id++) {
Sphere &sphere = spheres[sphere_id];
float d = spheres[sphere_id].intersectSM();
if (d && d < SMRAY_DEPTH) {
SMRAY_DEPTH = d;
auto x = SMRAY_ORIG + SMRAY_DIR * SMRAY_DEPTH;
auto n = math::normalize(float3{ x.x - sphere.pos.x, x.y - sphere.pos.y, x.z - sphere.pos.z });
normal = math::dot(n, SMRAY_DIR) < 0 ? n : n * -1;
color = sphere.col;
idx = sphere_id;
}
}
if (idx != -1 && math::length3(spheres[idx].emi) > 0.1f) {
cuda_atomic<float3> atomicColor(&cImage[SMRAY_IDX].color);
atomicColor.x += SMRAY_MASK_X * spheres[idx].emi.x;
atomicColor.y += SMRAY_MASK_Y * spheres[idx].emi.y;
atomicColor.z += SMRAY_MASK_Z * spheres[idx].emi.z;
//GET_NEXT_RAY;
}
__syncwarp();
if (SMRAY_DEPTH > 1e19f) GET_NEXT_RAY;
//cuda_atomic<float3> atomicColor(&cImage[SMRAY_IDX].color);
//atomicColor.x += randf();
//atomicColor.y += randf();
//atomicColor.z += randf();
//GET_NEXT_RAY;
SMRAY_MASK *= color;
auto position = SMRAY_ORIG + SMRAY_DIR * SMRAY_DEPTH;
//float phi = 2 * CUDART_PI_F * curand_uniform(cRandStates + threadIdx.x + blockIdx.x * blockDim.x);
float phi = 2 * CUDART_PI_F * randf();
//float r2 = curand_uniform(cRandStates + threadIdx.x + blockIdx.x * blockDim.x);
float r2 = randf();
float r2s = sqrtf(r2);
float3 w = math::normalize(normal);
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{ 0, 1, 0 } : float3{ 1, 0, 0 }), w));
float3 v = math::cross(w, u);
auto dw = math::normalize(u * cosf(phi) * r2s + v * sinf(phi) * r2s + w * sqrtf(1 - r2));
//SMRAY_MASK_X *= color.x;
//SMRAY_MASK_Y *= color.y;
//SMRAY_MASK_Z *= color.z;
SMRAY_BOUNCES++;
if (SMRAY_BOUNCES == 5)
GET_NEXT_RAY;
SMRAY_ORIG = position + w * 0.01f;
//SMRAY_ORIG_X = position.x + w.x * 0.01f;
//SMRAY_ORIG_Y = position.y + w.y * 0.01f;
//SMRAY_ORIG_Z = position.z + w.z * 0.01f;
SMRAY_DIR = dw;
//SMRAY_DIR_X = dw.x;
//SMRAY_DIR_Y = dw.y;
//SMRAY_DIR_Z = dw.z;
SMRAY_DEPTH = 1e21f;
}
}
__global__ void IDrayScheduler() {
if (threadIdx.x + blockIdx.x * blockDim.x >= cNumRays) return;
cuda_atomic<int32_t> atomicCounter(cRayCounter);
auto counter = threadIdx.x + blockIdx.x * blockDim.x;
IDRAY = cRaysDepth[counter];
while (1) {
auto fluidHit = fluid::rayIntersectFluidLeanIDRAY();
if (fluidHit.x != INT_MAX) {
constexpr auto epsilon = 1e-1f;
const auto cs = fluidMemory.cell_size.x;
float3 min = fluidSystem.bounds[0] + math::castTo<float3>(fluidHit) * cs;
auto rH = aabb::rayIntersectAABBID(min, min + cs);
auto t = rH.tmin;
if (t < IDRAY_DEPTH) {
IDRAY_DEPTH = t;
auto hitPosition = IDRAY_ORIG + rH.tmin * IDRAY_DIR;
auto c = min + cs * 0.5f;
auto prel = hitPosition - c;
auto d = cs * 0.5f;
auto ni = math::castTo<int3>(prel / d * (1.f + epsilon));
auto nc = char3{ static_cast<char>(ni.x), static_cast<char>(ni.y), static_cast<char>(ni.z) };
auto n = math::castTo<float3>(nc);
//float3 n{ 1.f,0.f,0.f };
auto nl = math::dot(n, IDRAY_DIR) < 0 ? n : n * -1;
//auto f = float3{ 0.05098f, 0.23137f, 0.494177f };
cFluidIntersections[counter].depth = rH.tmin;
cFluidIntersections[counter].normal = n;
//cRaysDepth[counter].depth = rH.tmin;
//cRaysDepth[counter].geomType = MAX_VAL_04BIT;
}
else {
cFluidIntersections[counter].depth = FLT_MAX;
}
}
else {
cFluidIntersections[counter].depth = FLT_MAX;
}
counter = atomicCounter.add(1);
if (counter >= cNumRays) return;
IDRAY = cRaysDepth[counter];
}
}
__global__ void intersectAndShadeRaysSM(Ray *rays, Pixel *image, int32_t seed) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
int32_t ii = threadIdx.x + blockDim.x * threadIdx.y;
float scene_t = 1e20f;
raySM[ii] = rays[i];
auto fluidHit = fluid::rayIntersectFluidLeanSM();
float3 normal, color, emission;
if (fluidHit.depth < FLT_MAX) {
constexpr auto epsilon = 1e-1f;
float3 min = fluidSystem.bounds[0] + math::castTo<float3>(fluidHit.voxel) * fluidMemory.cell_size;
float3 max = fluidSystem.bounds[0] + (math::castTo<float3>(fluidHit.voxel) + 1.f) * fluidMemory.cell_size;
auto rH = aabb::rayIntersectAABB(raySM[ii], min, max);
auto hitPosition = raySM[ii].orig + rH.tmin * raySM[ii].dir;
auto c = (min + max) * 0.5f;
auto prel = hitPosition - c;
auto d = math::abs((min - max) * 0.5f);
auto n = math::castTo<int3>(prel / d * (1.f + epsilon));
auto nc = char3{ static_cast<char>(n.x), static_cast<char>(n.y), static_cast<char>(n.z) };
auto t = rH.tmin;
if (t < scene_t) {
scene_t = t;
auto n = math::castTo<float3>(nc);
auto nl = math::dot(n, raySM[ii].dir) < 0 ? n : n * -1;
auto f = float3{ 0.05098f, 0.23137f, 0.494177f };
auto emit = float3{ 0.f, 0.f, 0.f };
// rayi[ii] = RayIntersection{ fluidHit.depth, DIFF, f, nl, emit };
normal = nl;
emission = emit;
color = f;
}
}
for (int32_t sphere_id = 0; sphere_id < int32_t(sizeof(spheres) / sizeof(Sphere)); sphere_id++) {
Sphere &sphere = spheres[sphere_id];
float d = spheres[sphere_id].intersect(raySM[ii]);
if (d && d < scene_t) {
scene_t = d;
auto x = raySM[ii].orig + raySM[ii].dir * scene_t;
auto n = math::normalize(float3{ x.x - sphere.pos.x, x.y - sphere.pos.y, x.z - sphere.pos.z });
normal = math::dot(n, raySM[ii].dir) < 0 ? n : n * -1;
color = sphere.col;
emission = sphere.emi;
// rayi[ii] = RayIntersection{ d, sphere.refl, sphere.col, nl, sphere.emi };
}
}
if (scene_t > 1e19f)
return;
// rayi[ii] = RayIntersection{ 1e20f, DIFF, float3{0.f,0.f,0.f}, float3{1.f,0.f,0.f}, float3{0.f,0.f,0.f} };
curandState randState;
curand_init(seed + i, 0, 0, &randState);
auto pixel = image[i];
// auto worldRay = rays[i];
auto position = raySM[ii].orig + raySM[ii].dir * scene_t;
pixel.color += (pixel.mask * emission);
float phi = 2 * CUDART_PI_F * curand_uniform(&randState);
float r2 = curand_uniform(&randState);
float r2s = sqrtf(r2);
float3 w = math::normalize(normal);
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{ 0, 1, 0 } : float3{ 1, 0, 0 }), w));
float3 v = math::cross(w, u);
auto dw = math::normalize(u * cosf(phi) * r2s + v * sinf(phi) * r2s + w * sqrtf(1 - r2));
pixel.mask *= color;
raySM[ii].orig = position + w * 0.01f;
raySM[ii].dir = dw;
rays[i] = raySM[ii];
image[i] = pixel;
//#undef i
//#undef ii
}
__global__ void intersectRays(Ray* rays, RayIntersection* intersections) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
auto worldRay = rays[i];
float scene_t = 1e20f;
for (int32_t sphere_id = 0; sphere_id < int32_t(sizeof(spheres) / sizeof(Sphere)); sphere_id++) {
Sphere &sphere = spheres[sphere_id];
float d = spheres[sphere_id].intersect(worldRay);
if (d && d < scene_t) {
scene_t = d;
auto x = worldRay.orig + worldRay.dir * scene_t;
auto n = math::normalize(float3{ x.x - sphere.pos.x, x.y - sphere.pos.y, x.z - sphere.pos.z });
auto nl = math::dot(n, worldRay.dir) < 0 ? n : n * -1;
intersections[i] = RayIntersection{ d, sphere.refl, sphere.col, n, sphere.emi };
}
}
auto fluidHit = fluid::rayIntersectFluid(worldRay);
if (fluidHit.status && fluidHit.depth < scene_t) {
scene_t = fluidHit.depth;
auto n = fluidHit.normal;
//return n;
//auto nl = math::dot(n, worldRay.dir) < 0 ? n : n * -1;
auto f = float3{ 0.05098f, 0.23137f, 0.494177f };
auto emit = float3{ 0.f, 0.f, 0.f };
intersections[i] = RayIntersection{ fluidHit.depth, DIFF, f, n, emit };
}
auto dw = worldRay.dir;
if (dw.x != dw.x || dw.y != dw.y || dw.z != dw.z || (dw.x == 0.f && dw.y == 0.f && dw.z == 0.f)) {
//printf("x[%f %f %f] : [%f %f %f]\n", worldRay.orig.x, worldRay.orig.y, worldRay.orig.z, worldRay.dir.x, worldRay.dir.y, worldRay.dir.z);
}
if (scene_t > 1e19f) {
//printf("y[%f %f %f] : [%f %f %f]\n", worldRay.orig.x, worldRay.orig.y, worldRay.orig.z, worldRay.dir.x, worldRay.dir.y, worldRay.dir.z);
intersections[i] = RayIntersection{ 1e20f, DIFF, float3{0.f,0.f,0.f}, float3{1.f,0.f,0.f}, float3{0.f,0.f,0.f} };
}
}
__global__ void intersectRaysSpheresScene(int32_t seed) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
for (int32_t ii = 0; ii < cMsaaRate; ++ii) {
int32_t idx = i * cMsaaRate + ii;
IDRAY2D = cRaysDepth[idx];
float3 normal, emission, color, x;
if (cFluidIntersections[idx].depth < 1e19f) {
normal = cFluidIntersections[idx].normal;
IDRAY2D.depth = cFluidIntersections[idx].depth;
x = IDRAY2D.origin + IDRAY2D.direction * IDRAY2D.depth;
auto f = float3{ 0.05098f, 0.23137f, 0.494177f };
color = f;
emission = float3{ 0.f,0.f,0.f };
}
for (int32_t sphere_id = 0; sphere_id < int32_t(sizeof(spheres) / sizeof(Sphere)); sphere_id++) {
//Sphere &sphere = spheres[sphere_id];
float d = spheres[sphere_id].intersectID2D();
if (d && d < IDRAY2D.depth) {
IDRAY2D.depth = d;
IDRAY2D.geomType = sphere_id;
x = IDRAY2D.origin + IDRAY2D.direction * IDRAY2D.depth;
Sphere &sphere = spheres[IDRAY2D.geomType];
normal = math::normalize(float3{ x.x - sphere.pos.x, x.y - sphere.pos.y, x.z - sphere.pos.z });
color = sphere.col;
emission = sphere.emi;
}
}
normal = math::dot(normal, IDRAY2D.direction) < 0 ? normal : normal * -1;
curandState randState;
int32_t threadId =
(blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
curand_init(seed + threadId, 0, 0, &randState);
//float phi = 2 * CUDART_PI_F * randf();
//float r2 = randf();
float phi = 2 * CUDART_PI_F * curand_uniform(&randState);
float r2 = curand_uniform(&randState);
float r2s = sqrtf(r2);
float3 w = math::normalize(normal);
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{ 0, 1, 0 } : float3{ 1, 0, 0 }), w));
float3 v = math::cross(w, u);
auto dw = math::normalize(u * cosf(phi) * r2s + v * sinf(phi) * r2s + w * sqrtf(1 - r2));
cImage[IDRAY2D.index].color += cImage[IDRAY2D.index].mask * emission;
cImage[IDRAY2D.index].mask *= color;
IDRAY2D.origin = x + w * 0.01f;
IDRAY2D.direction = dw;
//if (IDRAY2D.bounces == 0) { cImage[IDRAY2D.index].color = float3{ d * 0.005f, d*0.005f, d * 0.005f }; }
IDRAY2D.depth = FLT_MAX;
IDRAY2D.geomType = 0;
IDRAY2D.bounces++;
cRaysDepth[i * cMsaaRate + ii] = IDRAY2D;
}
}
__global__ void updateIDRAY(int32_t seed) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
for (int32_t ii = 0; ii < cMsaaRate; ++ii) {
int32_t idx = i * cMsaaRate + ii;
IDRAY2D = cRaysDepth[idx];
float3 normal, emission, color;
auto d = IDRAY2D.depth;
auto x = IDRAY2D.origin + IDRAY2D.direction * IDRAY2D.depth;
if (IDRAY2D.depth < 1e19f) {
if (IDRAY2D.geomType == MAX_VAL_04BIT) {
constexpr auto epsilon = 1e-1f;
const auto cs = fluidMemory.cell_size.x;
//auto hitPosition = SMRAY_ORIG + fluidHit * SMRAY_DIR;
//int3 vIdx = position_to_idx3D_i(fluidMemory.position[fluidHit], fluidMemory.min_coord, cs);
auto voxelIdx = position_to_idx3D_i(x, fluidMemory.min_coord, fluidMemory.cell_size.x);
float3 min = fluidSystem.bounds[0] + math::castTo<float3>(voxelIdx) * cs;
auto c = min + cs * 0.5f;
auto prel = x - c;
auto d = cs * 0.5f;
auto ni = math::castTo<int3>(prel / d * (1.f + epsilon));
auto nc = char3{ static_cast<char>(ni.x), static_cast<char>(ni.y), static_cast<char>(ni.z) };
normal = math::castTo<float3>(nc);
//auto nl = math::dot(n, SMRAY_DIR) < 0 ? n : n * -1;
auto f = float3{ 0.05098f, 0.23137f, 0.494177f };
//normal = nl;
color = f;
emission = float3{ 0.f,0.f,0.f };
}
else
{
Sphere &sphere = spheres[IDRAY2D.geomType];
normal = math::normalize(float3{ x.x - sphere.pos.x, x.y - sphere.pos.y, x.z - sphere.pos.z });
color = sphere.col;
emission = sphere.emi;
}
normal = math::dot(normal, IDRAY2D.direction) < 0 ? normal : normal * -1;
curandState randState;
int32_t threadId =
(blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
curand_init(seed + threadId, 0, 0, &randState);
//float phi = 2 * CUDART_PI_F * randf();
//float r2 = randf();
float phi = 2 * CUDART_PI_F * curand_uniform(&randState);
float r2 = curand_uniform(&randState);
float r2s = sqrtf(r2);
float3 w = math::normalize(normal);
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{ 0, 1, 0 } : float3{ 1, 0, 0 }), w));
float3 v = math::cross(w, u);
auto dw = math::normalize(u * cosf(phi) * r2s + v * sinf(phi) * r2s + w * sqrtf(1 - r2));
cImage[IDRAY2D.index].color += cImage[IDRAY2D.index].mask * emission;
cImage[IDRAY2D.index].mask *= color;
IDRAY2D.origin = x + w * 0.01f;
IDRAY2D.direction = dw;
}
if (IDRAY2D.bounces == 0) { cImage[IDRAY2D.index].color = float3{ d * 0.005f, d*0.005f, d * 0.005f }; }
IDRAY2D.depth = FLT_MAX;
IDRAY2D.geomType = 0;
IDRAY2D.bounces++;
cRaysDepth[i * cMsaaRate + ii] = IDRAY2D;
}
}
__global__ void intersectRaysBVH(Ray* rays, RayIntersection* intersections) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
auto worldRay = rays[i];
float scene_t = 1e20f;
for (int32_t sphere_id = 0; sphere_id < int32_t(sizeof(spheres) / sizeof(Sphere)); sphere_id++) {
Sphere &sphere = spheres[sphere_id];
float d = spheres[sphere_id].intersect(worldRay);
if (d && d < scene_t) {
scene_t = d;
auto x = worldRay.orig + worldRay.dir * scene_t;
auto n = math::normalize(float3{ x.x - sphere.pos.x, x.y - sphere.pos.y, x.z - sphere.pos.z });
auto nl = math::dot(n, worldRay.dir) < 0 ? n : n * -1;
intersections[i] = RayIntersection{ d, sphere.refl, sphere.col, nl, sphere.emi };
}
}
auto fluidHit = bvh::rayIntersectFluidBVH(worldRay);
if (fluidHit.status && fluidHit.depth < scene_t) {
scene_t = fluidHit.depth;
auto n = fluidHit.normal;
//return n;
auto nl = math::dot(n, worldRay.dir) < 0 ? n : n * -1;
auto f = float3{ 0.05098f, 0.23137f, 0.494177f };
auto emit = float3{ 0.f, 0.f, 0.f };
intersections[i] = RayIntersection{ fluidHit.depth, DIFF, f, nl, emit };
}
if (scene_t > 1e19f)
intersections[i] = RayIntersection{ 1e20f, DIFF, float3{0.f,0.f,0.f}, float3{1.f,0.f,0.f}, float3{0.f,0.f,0.f} };
}
__global__ void shadeRays(int32_t seed, Pixel* image, Ray* rays, RayIntersection* intersections) {
int32_t x = blockIdx.x * blockDim.x + threadIdx.x;
int32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= cScene.width)
return;
if (y >= cScene.height)
return;
int32_t i = (cScene.height - y - 1) * cScene.width + x;
curandState randState;
int32_t threadId =
(blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
curand_init(seed + threadId, 0, 0, &randState);
auto worldRay = rays[i];
auto inter = intersections[i];
auto pixel = image[i];
if (inter.depth >= 1e18f)
return;
auto position = worldRay.orig + worldRay.dir * inter.depth;
pixel.color += (pixel.mask * inter.emission);
//pixel.color = inter.surfaceNormal;
//pixel.color = worldRay.dir;
float3 n = math::normalize(inter.surfaceNormal);
float3 nl = math::dot(n, worldRay.dir) < 0 ? n : n * -1;
float phi = 2 * CUDART_PI_F * curand_uniform(&randState);
float r2 = curand_uniform(&randState);
float r2s = sqrtf(r2);
float3 w = math::normalize(nl);
float3 u = math::normalize(math::cross((fabs(w.x) > .1f ? float3{ 0, 1, 0 } : float3{ 1, 0, 0 }), w));
float3 v = math::cross(w, u);
float3 dw = math::normalize(u * cosf(phi) * r2s + v * sinf(phi) * r2s + w * sqrtf(1 - r2));
worldRay.orig = position + w * 0.01f;
pixel.mask *= inter.surfaceColor;
//dw = rayInWorldSpace - n * 2.0f * math::dot(n, rayInWorldSpace);
//worldRay.orig = position + nl * bias;
// worldRay.orig = position + w * 1.5f;
worldRay.dir = dw;
if (dw.x != dw.x || dw.y != dw.y || dw.z != dw.z || (dw.x == 0.f && dw.y == 0.f && dw.z == 0.f))
worldRay.dir = float3{ 1,0,0 };
rays[i] = worldRay;
image[i] = pixel;
}
}
}
#ifdef __INTELLISENSE__
#define CPSYMBOL(symbol, var)
#define LAUNCH(kernel, blocks, tpb, sm, stream) kernel
#else
#define CPSYMBOL(symbol, var) cudaMemcpyToSymbol(symbol, &var, sizeof(symbol))
#define LAUNCH(kernel, blocks, tpb, sm, stream) kernel<<<blocks,tpb,sm,stream>>>
#endif
void cudaMLMRender(SceneInformation scene, cudaGraphicsResource_t resource, FluidMemory fmem, FluidSystem fsys, float3 *acc, unsigned framenumber,
unsigned hashedframes, int32_t renderMode) {
using namespace rtx;
static std::random_device rd;
static std::uniform_int_distribution<int32_t> dist(INT_MIN, INT_MAX);
static bool once = true;
static cudaStream_t stream;
constexpr auto msaa = 1;
constexpr int32_t blocks_1080 = 32 * 20;
constexpr int32_t blockSize_1080 = 64;
constexpr auto bounces = 5;
int32_t width = static_cast<int32_t>(scene.width);
int32_t height = static_cast<int32_t>(scene.height);
int32_t numRays = width * height * msaa;
//int32_t num_blocks = blocks_1080 * blockSize_1080;
if (once) {
std::cout << "MLM renderer built at " << __TIMESTAMP__ << std::endl;
cudaStreamCreate(&stream);
cudaMalloc(&cuImage, sizeof(Pixel) * width * height);
cudaMalloc(&cuCurrentRays, sizeof(Ray) * width * height);
cudaMalloc(&cuRayIntersections, sizeof(RayIntersection) * width * height);
cudaMalloc(&cuScheduledRays, sizeof(RayWithIndex) * width * height * msaa);
cudaMalloc(&cuBlockedRays, sizeof(RayWithIndexAndDepth) * width * height * msaa);
cudaMalloc(&cuFluidIntersections, sizeof(FluidIntersection) * width * height * msaa);
cudaMalloc(&rayCounter, sizeof(int32_t));
cudaMalloc(&cRNGSeeds, sizeof(uint32_t) * blocks_1080 * blockSize_1080);
cudaMalloc(&cuRandStates, sizeof(curandState) * blocks_1080 * blockSize_1080);
//initRNG <<<blocks_1080, blockSize_1080>>> (cuRandStates, dist(rd));
LAUNCH(common::initRNGSeeds, blocks_1080, blockSize_1080, 0 ,0)(cRNGSeeds, dist(rd));
cudaArray_t color_arr;
cudaGraphicsMapResources(1, &resource, 0);
cudaGraphicsSubResourceGetMappedArray(&color_arr, resource, 0, 0);
cudaBindSurfaceToArray(surfaceWriteOut, color_arr);
once = false;
}
//scene.m_camera.apertureRadius = 0.f;
CPSYMBOL(cScene, scene);
CPSYMBOL(fluidSystem, fsys);
CPSYMBOL(fluidMemory, fmem);
CPSYMBOL(cNumRays, numRays);
CPSYMBOL(cRays, cuScheduledRays);
CPSYMBOL(cRaysDepth, cuBlockedRays);
CPSYMBOL(cImage, cuImage);
CPSYMBOL(cRandStates, cuRandStates);
CPSYMBOL(cFluidIntersections, cuFluidIntersections);
CPSYMBOL(cMsaaRate, msaa);
CPSYMBOL(cRayCounter, rayCounter);
CPSYMBOL(cuSeeds, cRNGSeeds);
dim3 texturedim((uint32_t)scene.width, (uint32_t)scene.height, 1);
dim3 blockdim(8, 8, 1);
dim3 griddim(texturedim.x / blockdim.x, texturedim.y / blockdim.y, 1);
if (texturedim.x % blockdim.x != 0)
griddim.x += 1;
if (texturedim.y % blockdim.y != 0)
griddim.y += 1;
//CoreLoopPathTracingKernel<<<griddim, blockdim>>>((float3 *)acc, framenumber, hashedframes);
//if (renderMode == 0) {
// LAUNCH(common::generateScheduledRays, griddim, dim3(msaa, blockdim.x, blockdim.y), 0, stream)(hashedframes, cuImage, cuScheduledRays, cuCurrentRays, msaa);
// cudaMemcpy(rayCounter, &num_blocks, sizeof(int32_t), cudaMemcpyHostToDevice);
// render::rayScheduler << <blocks_1080, blockSize_1080, sizeof(ScheduledRay) * blockSize_1080, stream >> >();
// common::toneMap << <griddim, blockdim, 0, stream >> > (framenumber, (float3*)acc, cuImage, (float)msaa);
//}
//else if (renderMode == 4) {
//cuda::sync("Test 1");
// LAUNCH(common::generateBlockedRays, griddim, dim3(msaa, blockdim.x, blockdim.y), 0, stream)(hashedframes, cuImage, cuBlockedRays, cuCurrentRays, msaa);
//// cuda::sync("Test 2");
// for (int32_t i = 0; i < bounces; ++i) {
// //std::cout << "Bounce: " << i << std::endl;
// cudaMemcpyAsync(rayCounter, &num_blocks, sizeof(int32_t), cudaMemcpyHostToDevice, stream);
// // cuda::sync("Test 3");
// render::IDrayScheduler << <blocks_1080, blockSize_1080, sizeof(RayWithIndexAndDepth) * blockSize_1080, stream >> > ();
// //cuda::sync("Test 4");
// render::intersectRaysSpheresScene << <griddim, blockdim, blockdim.x * blockdim.y * sizeof(RayWithIndexAndDepth), stream >>> (dist(rd));
// //cuda::sync("Test 5");
// //render::updateIDRAY << <griddim, blockdim, blockdim.x * blockdim.y * sizeof(RayWithIndexAndDepth), stream >> > (dist(rd));
// // cuda::sync("Test 6");
// }
// common::toneMap << <griddim, blockdim, 0, stream >> > (framenumber, (float3*)acc, cuImage, 1.f);
// //cuda::sync("Test 7");
// }
//else{
common::generatePrimaryRays << <griddim, blockdim, 0, stream >> > (hashedframes, cuImage, cuCurrentRays);
for (int32_t i = 0; i < bounces; ++i) {
std::cout << i;
std::cout.flush();
cuda::sync(std::to_string(__LINE__));
//if (renderMode == 1) {
// render::intersectAndShadeRaysSM<<<griddim, blockdim, sizeof(RayIntersection) * blockdim.x * blockdim.y, stream>>>(
// cuCurrentRays, cuImage, hashedframes);
//} else if (renderMode == 2) {
if (renderMode == 3) {
render::intersectRaysBVH<<<griddim, blockdim>>>(cuCurrentRays, cuRayIntersections);
render::shadeRays<<<griddim, blockdim>>>(dist(rd), cuImage, cuCurrentRays, cuRayIntersections);
}
else {
// std::cout << ".\n";
render::intersectRays << <griddim, blockdim >> > (cuCurrentRays, cuRayIntersections);
cuda::sync(std::to_string(__LINE__));
//std::cout << "-\n";
render::shadeRays << <griddim, blockdim >> > (dist(rd), cuImage, cuCurrentRays, cuRayIntersections);
cuda::sync(std::to_string(__LINE__));
//std::cout << ":\n";
}
//break;
// } else
}
std::cout << std::endl;
common::toneMap << <griddim, blockdim, 0, stream >> > (framenumber, (float3*)acc, cuImage, 1.f);
cuda::sync(std::to_string(__LINE__));
//}
cudaStreamSynchronize(stream);
} |
8d364a646fcda6147b4a382512400db0b92d9165.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This expermental software is provided AS IS.
* Feel free to use/modify/distribute,
* If used, please retain this disclaimer and cite
* "GPUfs: Integrating a file system with GPUs",
* M Silberstein,B Ford,I Keidar,E Witchel
* ASPLOS13, March 2013, Houston,USA
*/
#define FETCH_SIZE 4096
__shared__ float s_vector[FETCH_SIZE];
#define ACCUM_N 512
__shared__ volatile float s_reduction[ACCUM_N];
__device__ void inner_product_simple( float* a, float* b, int size,float* res)
{
float tmp=0;
__syncthreads();
if (threadIdx.x==0) {
*res=0;
}
__syncthreads();
int i=0;
for( i=threadIdx.x;i<size;i+=blockDim.x){
tmp+=(a[i]*b[i]);
}
atomicAdd((float*)res,tmp);
__syncthreads();
}
__device__ float inner_product( float* a, float* b, int size)
{
float tmp=0;
// __syncthreads();
// if (threadIdx.x==0) {
// *res=0;
// }
// __syncthreads();
int i=0;
for( i=threadIdx.x;i<size;i+=blockDim.x){
tmp+=a[i]*b[i];
}
s_reduction[threadIdx.x]=tmp;
__syncthreads();
for (int stride = ACCUM_N / 2; stride > 32; stride >>= 1)
{
if(threadIdx.x<stride) s_reduction[threadIdx.x] += s_reduction[stride + threadIdx.x];
__syncthreads();
}
for (int stride = 32; stride > 0 && threadIdx.x<32 ; stride >>=1 )
{
if(threadIdx.x<stride) s_reduction[threadIdx.x] += s_reduction[stride + threadIdx.x];
}
__syncthreads();
return s_reduction[0];
}
__device__ void prefetch(float* s_v, float* v,int size){
for(int i=threadIdx.x;i<size;i+=blockDim.x)
{
s_v[i]=v[i];
}
}
__shared__ float tmp_res_simple;
__global__ void bigmatrix_nofiles_simple(float* m, float *v, float* o,int out_offset, int m_size,int v_size)
{
int per_block=m_size/gridDim.x;
for(size_t out=per_block*blockIdx.x;out<(blockIdx.x+1)*per_block;out+=v_size){
inner_product_simple(v,m+out, v_size,&tmp_res_simple);
if(threadIdx.x==0)
{
(*(o+out_offset + (out/v_size)))=tmp_res_simple;
}
__syncthreads();
}
}
__global__ void bigmatrix_nofiles(float* m, float *v, float* o,int out_offset, int m_size, int v_size)
{
int per_block=m_size/gridDim.x;
//for (int i=0;i<10000;i++){
for (int tile=0;tile<v_size;tile+=FETCH_SIZE){
prefetch(s_vector,v+tile,FETCH_SIZE);
__syncthreads();
for(size_t out=per_block*blockIdx.x;out<(blockIdx.x+1)*per_block;out+=v_size){
float tmp_res=inner_product(s_vector,m+out+tile, FETCH_SIZE);
if(threadIdx.x==0)
{
if (tile==0) (*(o+out_offset + (out/v_size)))=tmp_res;
else (*(o+out_offset + (out/v_size)))+=tmp_res;
}
__syncthreads();
}
}
//}
}
void init_device_app(){
// CUDA_SAFE_CALL(hipDeviceSetLimit(hipLimitMallocHeapSize,1<<30));
}
#include <sys/time.h>
double _timestamp(){
struct timeval tv;
gettimeofday(&tv,0);
return 1e6*tv.tv_sec+tv.tv_usec;
}
#include <stdio.h>
#include <sys/mman.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "timer.h"
#include <math.h>
#include <stdlib.h>
#include <limits.h>
#include <assert.h>
#include <errno.h>
#include <string.h>
void* open_map_file(const char* f, int* fd, size_t* size, int type)
{
int open_fd=open(f,type==O_RDONLY?type:type|O_CREAT|O_TRUNC,S_IRUSR|S_IWUSR);
if (open_fd<0){
perror("open failed");
return NULL;
}
if (type!=O_RDONLY) {
assert(*size>0);
if (ftruncate(open_fd,*size)){
perror("ftrunc failed");
return NULL;
}
}
struct stat s;
if (fstat(open_fd,&s)) {
fprintf(stderr,"Problem with fstat the file on CPU: %s \n ",strerror(errno));
}
if (s.st_size==0) {
fprintf(stderr,"file with zero lenght, skipping %s\n",f);
close(open_fd);
return NULL;
}
void* data=mmap(NULL,s.st_size,type==O_RDONLY?PROT_READ:PROT_READ|PROT_WRITE,MAP_POPULATE|(O_RDONLY?MAP_PRIVATE:MAP_SHARED),open_fd,0);
if (data==MAP_FAILED) {
perror("mmap");
close(open_fd);
return NULL;
}
*fd=open_fd;
*size=s.st_size;
return data;
}
#define CUDA_SAFE_CALL(x) if((x)!=hipSuccess) { fprintf(stderr,"CUDA ERROR %s: %d %s\n",__FILE__, __LINE__, hipGetErrorString(hipGetLastError())); exit(-1); }
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include "fs_constants.h"
#include "fs_debug.cu.h"
#include "util.cu.h"
#include "timer.h"
//DEBUG
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
void unmap_close_file(int fd, void* ptr,size_t len, const char* what)
{
if(munmap(ptr,len)) { perror("unmap"); fprintf(stderr,"%s\n",what); return;}
close(fd);
}
#include <assert.h>
// size of the output used for data staging
int main( int argc, char** argv)
{
if(argc<4) {
fprintf(stderr," <vector> <matrix> <output>\n\n");
return -1;
}
double total_time=0;
int nblocks=28;//56;
int nthreads=256;
int trials =5;
for(int t=0;t<trials+1;t++){
int fd_m;
size_t size_m;
char* h_matrix=(char*)open_map_file(argv[2], &fd_m, &size_m, O_RDONLY);
assert(h_matrix);
float* h_d_matrix[2];
float* d_matrix[2];
int data_per_chunk=size_m/4;
assert(size_m%data_per_chunk==0);
printf("Data per chunk: %d\n",data_per_chunk);
assert(data_per_chunk%sizeof(float)==0);
CUDA_SAFE_CALL(hipHostMalloc(&h_d_matrix[0],data_per_chunk, hipHostMallocDefault));
CUDA_SAFE_CALL(hipHostMalloc(&h_d_matrix[1],data_per_chunk, hipHostMallocDefault));
CUDA_SAFE_CALL(hipMalloc(&d_matrix[0],data_per_chunk));
CUDA_SAFE_CALL(hipMalloc(&d_matrix[1],data_per_chunk));
int fd_v;
size_t size_v;
char* h_vector=(char*)open_map_file(argv[1],&fd_v,&size_v,O_RDONLY);
assert(h_vector);
float* h_d_vector;
float* d_vector;
CUDA_SAFE_CALL(hipHostMalloc(&h_d_vector,size_v, hipHostMallocDefault));
CUDA_SAFE_CALL(hipMalloc(&d_vector,size_v));
assert(data_per_chunk/size_v/nblocks>0);
assert((data_per_chunk/size_v)%nblocks==0);
printf("Running with %d blocks, %d threads, %d vals per block\n",nblocks, nthreads,(data_per_chunk/size_v)/nblocks );
int fd_v_out;
size_t size_v_out=size_m/size_v*sizeof(float);
assert(size_v_out);
char* h_v_out=(char*)open_map_file(argv[3], &fd_v_out, &size_v_out, O_RDWR);
assert(h_v_out);
float* h_d_v_out;
float* d_v_out;
CUDA_SAFE_CALL(hipHostMalloc(&h_d_v_out,size_v_out, hipHostMallocDefault));
CUDA_SAFE_CALL(hipMalloc(&d_v_out,size_v_out));
fprintf(stderr,"using: %s for matrix of size %lu, %s for vector of size %lu, %s for output of size %lu, data per chunk %lu\n",
argv[2], size_m,argv[1],size_v,argv[3],size_v_out,data_per_chunk);
hipStream_t s[2];
CUDA_SAFE_CALL(hipStreamCreate(&s[0]));
CUDA_SAFE_CALL(hipStreamCreate(&s[1]));
double time_before=_timestamp();
if (t==0) time_before=0;
int c=0;
memcpy(h_d_vector,h_vector,size_v);
CUDA_SAFE_CALL(hipMemcpy(d_vector,h_d_vector,size_v,hipMemcpyHostToDevice));
if (t==0) time_before=0;
for(size_t i=0 ;i<size_m;i+=data_per_chunk)
{
fprintf(stderr,"chunk %lu\n",i);
CUDA_SAFE_CALL(hipStreamSynchronize(s[c]));
// total_mem_time+=_timestamp()-time_before_mem;;
// time_before_mem=_timestamp();
//
// size_t num_read=pread(fd_m,h_d_matrix[c],data_per_chunk,i);
// assert(num_read==data_per_chunk);
memcpy(h_d_matrix[c],h_matrix+i,data_per_chunk);
CUDA_SAFE_CALL(hipMemcpyAsync(d_matrix[c],h_d_matrix[c],data_per_chunk,hipMemcpyHostToDevice,s[c]));
hipLaunchKernelGGL(( bigmatrix_nofiles_simple), dim3(nblocks),dim3(nthreads),0,s[c], d_matrix[c],d_vector,d_v_out,i/size_v,
data_per_chunk/(sizeof(float)), size_v/(sizeof(float)));
c=c^0x1;
}
//time_before_mem=_timestamp();
//CUDA_SAFE_CALL(hipDeviceSynchronize());
CUDA_SAFE_CALL(hipMemcpy(h_d_v_out,d_v_out,size_v_out,hipMemcpyDeviceToHost));
memcpy(h_v_out,h_d_v_out,size_v_out);
double time_after=_timestamp();
total_time+=(time_after-time_before);
if(!t) total_time=0;
unmap_close_file(fd_v_out,h_v_out,size_v_out,"out");
unmap_close_file(fd_m,h_matrix,size_m,"matrix");
unmap_close_file(fd_v,h_vector,size_v,"vector");
if (t) fprintf(stderr,"total time %.0f us %.3f GB \n ",total_time/t, t*(size_v+size_m+size_v_out)/total_time/1000);
hipHostFree(h_d_v_out);
hipHostFree(h_d_vector);
hipHostFree(h_d_matrix);
hipFree(d_v_out);
hipFree(d_vector);
hipFree(d_matrix);
hipError_t error = hipDeviceSynchronize();
//Check for errors and failed asserts in asynchronous kernel launch.
if(error != hipSuccess )
{
printf("Device failed, CUDA error message is: %s\n\n", hipGetErrorString(error));
}
// hipFree(d_output);
hipDeviceReset();
if(error) break;
}
return 0;
}
| 8d364a646fcda6147b4a382512400db0b92d9165.cu | /*
* This expermental software is provided AS IS.
* Feel free to use/modify/distribute,
* If used, please retain this disclaimer and cite
* "GPUfs: Integrating a file system with GPUs",
* M Silberstein,B Ford,I Keidar,E Witchel
* ASPLOS13, March 2013, Houston,USA
*/
#define FETCH_SIZE 4096
__shared__ float s_vector[FETCH_SIZE];
#define ACCUM_N 512
__shared__ volatile float s_reduction[ACCUM_N];
__device__ void inner_product_simple( float* a, float* b, int size,float* res)
{
float tmp=0;
__syncthreads();
if (threadIdx.x==0) {
*res=0;
}
__syncthreads();
int i=0;
for( i=threadIdx.x;i<size;i+=blockDim.x){
tmp+=(a[i]*b[i]);
}
atomicAdd((float*)res,tmp);
__syncthreads();
}
__device__ float inner_product( float* a, float* b, int size)
{
float tmp=0;
// __syncthreads();
// if (threadIdx.x==0) {
// *res=0;
// }
// __syncthreads();
int i=0;
for( i=threadIdx.x;i<size;i+=blockDim.x){
tmp+=a[i]*b[i];
}
s_reduction[threadIdx.x]=tmp;
__syncthreads();
for (int stride = ACCUM_N / 2; stride > 32; stride >>= 1)
{
if(threadIdx.x<stride) s_reduction[threadIdx.x] += s_reduction[stride + threadIdx.x];
__syncthreads();
}
for (int stride = 32; stride > 0 && threadIdx.x<32 ; stride >>=1 )
{
if(threadIdx.x<stride) s_reduction[threadIdx.x] += s_reduction[stride + threadIdx.x];
}
__syncthreads();
return s_reduction[0];
}
__device__ void prefetch(float* s_v, float* v,int size){
for(int i=threadIdx.x;i<size;i+=blockDim.x)
{
s_v[i]=v[i];
}
}
__shared__ float tmp_res_simple;
__global__ void bigmatrix_nofiles_simple(float* m, float *v, float* o,int out_offset, int m_size,int v_size)
{
int per_block=m_size/gridDim.x;
for(size_t out=per_block*blockIdx.x;out<(blockIdx.x+1)*per_block;out+=v_size){
inner_product_simple(v,m+out, v_size,&tmp_res_simple);
if(threadIdx.x==0)
{
(*(o+out_offset + (out/v_size)))=tmp_res_simple;
}
__syncthreads();
}
}
__global__ void bigmatrix_nofiles(float* m, float *v, float* o,int out_offset, int m_size, int v_size)
{
int per_block=m_size/gridDim.x;
//for (int i=0;i<10000;i++){
for (int tile=0;tile<v_size;tile+=FETCH_SIZE){
prefetch(s_vector,v+tile,FETCH_SIZE);
__syncthreads();
for(size_t out=per_block*blockIdx.x;out<(blockIdx.x+1)*per_block;out+=v_size){
float tmp_res=inner_product(s_vector,m+out+tile, FETCH_SIZE);
if(threadIdx.x==0)
{
if (tile==0) (*(o+out_offset + (out/v_size)))=tmp_res;
else (*(o+out_offset + (out/v_size)))+=tmp_res;
}
__syncthreads();
}
}
//}
}
void init_device_app(){
// CUDA_SAFE_CALL(cudaDeviceSetLimit(cudaLimitMallocHeapSize,1<<30));
}
#include <sys/time.h>
double _timestamp(){
struct timeval tv;
gettimeofday(&tv,0);
return 1e6*tv.tv_sec+tv.tv_usec;
}
#include <stdio.h>
#include <sys/mman.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "timer.h"
#include <math.h>
#include <stdlib.h>
#include <limits.h>
#include <assert.h>
#include <errno.h>
#include <string.h>
void* open_map_file(const char* f, int* fd, size_t* size, int type)
{
int open_fd=open(f,type==O_RDONLY?type:type|O_CREAT|O_TRUNC,S_IRUSR|S_IWUSR);
if (open_fd<0){
perror("open failed");
return NULL;
}
if (type!=O_RDONLY) {
assert(*size>0);
if (ftruncate(open_fd,*size)){
perror("ftrunc failed");
return NULL;
}
}
struct stat s;
if (fstat(open_fd,&s)) {
fprintf(stderr,"Problem with fstat the file on CPU: %s \n ",strerror(errno));
}
if (s.st_size==0) {
fprintf(stderr,"file with zero lenght, skipping %s\n",f);
close(open_fd);
return NULL;
}
void* data=mmap(NULL,s.st_size,type==O_RDONLY?PROT_READ:PROT_READ|PROT_WRITE,MAP_POPULATE|(O_RDONLY?MAP_PRIVATE:MAP_SHARED),open_fd,0);
if (data==MAP_FAILED) {
perror("mmap");
close(open_fd);
return NULL;
}
*fd=open_fd;
*size=s.st_size;
return data;
}
#define CUDA_SAFE_CALL(x) if((x)!=cudaSuccess) { fprintf(stderr,"CUDA ERROR %s: %d %s\n",__FILE__, __LINE__, cudaGetErrorString(cudaGetLastError())); exit(-1); }
#include <cuda.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include "fs_constants.h"
#include "fs_debug.cu.h"
#include "util.cu.h"
#include "timer.h"
//DEBUG
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
void unmap_close_file(int fd, void* ptr,size_t len, const char* what)
{
if(munmap(ptr,len)) { perror("unmap"); fprintf(stderr,"%s\n",what); return;}
close(fd);
}
#include <assert.h>
// size of the output used for data staging
int main( int argc, char** argv)
{
if(argc<4) {
fprintf(stderr," <vector> <matrix> <output>\n\n");
return -1;
}
double total_time=0;
int nblocks=28;//56;
int nthreads=256;
int trials =5;
for(int t=0;t<trials+1;t++){
int fd_m;
size_t size_m;
char* h_matrix=(char*)open_map_file(argv[2], &fd_m, &size_m, O_RDONLY);
assert(h_matrix);
float* h_d_matrix[2];
float* d_matrix[2];
int data_per_chunk=size_m/4;
assert(size_m%data_per_chunk==0);
printf("Data per chunk: %d\n",data_per_chunk);
assert(data_per_chunk%sizeof(float)==0);
CUDA_SAFE_CALL(cudaHostAlloc(&h_d_matrix[0],data_per_chunk, cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaHostAlloc(&h_d_matrix[1],data_per_chunk, cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaMalloc(&d_matrix[0],data_per_chunk));
CUDA_SAFE_CALL(cudaMalloc(&d_matrix[1],data_per_chunk));
int fd_v;
size_t size_v;
char* h_vector=(char*)open_map_file(argv[1],&fd_v,&size_v,O_RDONLY);
assert(h_vector);
float* h_d_vector;
float* d_vector;
CUDA_SAFE_CALL(cudaHostAlloc(&h_d_vector,size_v, cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaMalloc(&d_vector,size_v));
assert(data_per_chunk/size_v/nblocks>0);
assert((data_per_chunk/size_v)%nblocks==0);
printf("Running with %d blocks, %d threads, %d vals per block\n",nblocks, nthreads,(data_per_chunk/size_v)/nblocks );
int fd_v_out;
size_t size_v_out=size_m/size_v*sizeof(float);
assert(size_v_out);
char* h_v_out=(char*)open_map_file(argv[3], &fd_v_out, &size_v_out, O_RDWR);
assert(h_v_out);
float* h_d_v_out;
float* d_v_out;
CUDA_SAFE_CALL(cudaHostAlloc(&h_d_v_out,size_v_out, cudaHostAllocDefault));
CUDA_SAFE_CALL(cudaMalloc(&d_v_out,size_v_out));
fprintf(stderr,"using: %s for matrix of size %lu, %s for vector of size %lu, %s for output of size %lu, data per chunk %lu\n",
argv[2], size_m,argv[1],size_v,argv[3],size_v_out,data_per_chunk);
cudaStream_t s[2];
CUDA_SAFE_CALL(cudaStreamCreate(&s[0]));
CUDA_SAFE_CALL(cudaStreamCreate(&s[1]));
double time_before=_timestamp();
if (t==0) time_before=0;
int c=0;
memcpy(h_d_vector,h_vector,size_v);
CUDA_SAFE_CALL(cudaMemcpy(d_vector,h_d_vector,size_v,cudaMemcpyHostToDevice));
if (t==0) time_before=0;
for(size_t i=0 ;i<size_m;i+=data_per_chunk)
{
fprintf(stderr,"chunk %lu\n",i);
CUDA_SAFE_CALL(cudaStreamSynchronize(s[c]));
// total_mem_time+=_timestamp()-time_before_mem;;
// time_before_mem=_timestamp();
//
// size_t num_read=pread(fd_m,h_d_matrix[c],data_per_chunk,i);
// assert(num_read==data_per_chunk);
memcpy(h_d_matrix[c],h_matrix+i,data_per_chunk);
CUDA_SAFE_CALL(cudaMemcpyAsync(d_matrix[c],h_d_matrix[c],data_per_chunk,cudaMemcpyHostToDevice,s[c]));
bigmatrix_nofiles_simple<<<nblocks,nthreads,0,s[c]>>>(d_matrix[c],d_vector,d_v_out,i/size_v,
data_per_chunk/(sizeof(float)), size_v/(sizeof(float)));
c=c^0x1;
}
//time_before_mem=_timestamp();
//CUDA_SAFE_CALL(cudaDeviceSynchronize());
CUDA_SAFE_CALL(cudaMemcpy(h_d_v_out,d_v_out,size_v_out,cudaMemcpyDeviceToHost));
memcpy(h_v_out,h_d_v_out,size_v_out);
double time_after=_timestamp();
total_time+=(time_after-time_before);
if(!t) total_time=0;
unmap_close_file(fd_v_out,h_v_out,size_v_out,"out");
unmap_close_file(fd_m,h_matrix,size_m,"matrix");
unmap_close_file(fd_v,h_vector,size_v,"vector");
if (t) fprintf(stderr,"total time %.0f us %.3f GB \n ",total_time/t, t*(size_v+size_m+size_v_out)/total_time/1000);
cudaFreeHost(h_d_v_out);
cudaFreeHost(h_d_vector);
cudaFreeHost(h_d_matrix);
cudaFree(d_v_out);
cudaFree(d_vector);
cudaFree(d_matrix);
cudaError_t error = cudaDeviceSynchronize();
//Check for errors and failed asserts in asynchronous kernel launch.
if(error != cudaSuccess )
{
printf("Device failed, CUDA error message is: %s\n\n", cudaGetErrorString(error));
}
// cudaFree(d_output);
cudaDeviceReset();
if(error) break;
}
return 0;
}
|
78e5cc1c09120397448048489dcff7a72ccb3767.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to call a CUTLASS TRMM kernel and provides a naive reference
matrix multiply kernel to verify its correctness.
The CUTLASS Trmm template is instantiated in the function CutlassStrmmNN. This is kernel computes
the triangular matrix product (TRMM) using double-precision floating-point arithmetic and assumes
all matrices have column-major layout.
The threadblock tile size is chosen as 64x64x16 which offers good performance for large matrices.
See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available
in CUTLASS.
https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/
Aside from defining and launching the STRMM kernel, this example does not use any other components
or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are
prevalent in the CUTLASS unit tests.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// Helper methods to check for errors
#include "helper.h"
//
// CUTLASS includes needed for double-precision TRMM kernel
//
// Defines cutlass::gemm::device::Trmm, the generic Trmm computation template class.
#include "cutlass/gemm/device/trmm.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// This function defines a CUTLASS TRMM kernel instantiation, constructs its parameters object,
// and launches it on the CUDA device.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS TRMM template and launch a TRMM kernel.
hipError_t CutlassStrmmNN(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
// Define type definition for double-precision CUTLASS TRMM with column-major
// input matrices and 64x64x16 threadblock tile size (chosen by default).
//
// To keep the interface manageable, several helpers are defined for plausible compositions
// including the following example for double-precision TRMM. Typical values are used as
// default template arguments.
//
// To view the full trmm device API interface, see `cutlass/gemm/device/trmm.h`
using ColumnMajor = cutlass::layout::ColumnMajor;
using CutlassTrmm = cutlass::gemm::device::Trmm<
double,
ColumnMajor,
cutlass::SideMode::kLeft,
cutlass::FillMode::kLower,
cutlass::DiagType::kNonUnit,
double,
ColumnMajor,
double,
ColumnMajor,
double,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<8, 8, 4>,
cutlass::epilogue::thread::LinearCombination<
double,
1,
double,
double,
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
5,
1,
1,
false,
cutlass::arch::OpMultiplyAdd
>;
// Define a CUTLASS TRMM type
CutlassTrmm trmm_operator;
// Construct the CUTLASS TRMM arguments object.
//
// One of CUTLASS's design patterns is to define trmm argument objects that are constructible
// in host code and passed to kernels by value. These may include pointers, strides, scalars,
// and other arguments needed by Trmm and its components.
//
// The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible
// arguments to kernels and (2.) minimized initialization overhead on kernel entry.
//
CutlassTrmm::Arguments args(cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, M}, // Trmm Problem dimensions in Left-Side Mode
1, // batch_count,
{alpha}, // Scalars used in the Epilogue
reinterpret_cast<void const *>(A),
reinterpret_cast<void const *>(B),
reinterpret_cast<void *>(C), // destination matrix D (may be different memory than source C matrix)
(int64_t)M*M, // Batch strides
(int64_t)M*N,
(int64_t)M*N,
lda,
ldb,
ldc);
//
// Launch the CUTLASS TRMM kernel.
//
cutlass::Status status = trmm_operator(args);
//
// Return a hipError_t if the CUTLASS TRMM operator returned an error code.
//
if (status != cutlass::Status::kSuccess) {
return hipErrorUnknown;
}
// Return success, if no errors were encountered.
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// The source code after this point in the file is generic CUDA using the CUDA Runtime API
// and simple CUDA kernels to initialize matrices and compute the general matrix product.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize a matrix with small integers.
__global__ void InitializeMatrix_kernel(
double *matrix,
int ldm,
int rows,
int columns,
int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < columns) {
if (fill_mode == cutlass::FillMode::kLower && i < j) return;
else if (fill_mode == cutlass::FillMode::kUpper && i > j) return;
int offset = i + j * ldm;
// Generate arbitrary elements.
int const k = 16807;
int const m = 16;
double value = double(((offset + seed) * k % m) - m / 2);
matrix[offset] = value;
}
}
/// Simple function to initialize a matrix to arbitrary small integers.
hipError_t InitializeMatrix(double *matrix, int ldm, int rows, int columns, int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
dim3 block(16, 16);
dim3 grid(
(rows + block.x - 1) / block.x,
(columns + block.y - 1) / block.y
);
hipLaunchKernelGGL(( InitializeMatrix_kernel), dim3(grid), dim3(block) , 0, 0, matrix, ldm, rows, columns, seed, fill_mode);
return hipGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocates device memory for a matrix then fills with arbitrary small integers.
hipError_t AllocateMatrix(double **matrix, int ldm, int rows, int columns, int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
hipError_t result;
size_t sizeof_matrix = sizeof(double) * ldm * columns;
// Allocate device memory.
result = hipMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix);
if (result != hipSuccess) {
std::cerr << "Failed to allocate matrix: "
<< hipGetErrorString(result) << std::endl;
return result;
}
// Clear the allocation.
result = hipMemset(*matrix, 0, sizeof_matrix);
if (result != hipSuccess) {
std::cerr << "Failed to clear matrix device memory: "
<< hipGetErrorString(result) << std::endl;
return result;
}
// Initialize matrix elements to arbitrary small integers.
result = InitializeMatrix(*matrix, ldm, rows, columns, seed, fill_mode);
if (result != hipSuccess) {
std::cerr << "Failed to initialize matrix: "
<< hipGetErrorString(result) << std::endl;
return result;
}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Naive reference TRMM computation.
__global__ void ReferenceTrmm_kernel(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < M && j < N) {
double accumulator = 0;
for (int k = 0; k < M; ++k) {
accumulator += A[i + k * lda] * B[k + j * ldb]; // Since A is in Left-Side Mode
}
C[i + j * ldc] = alpha * accumulator;
}
}
/// Reference TRMM computation.
hipError_t ReferenceTrmm(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
dim3 block(16, 16);
dim3 grid(
(M + block.x - 1) / block.x,
(N + block.y - 1) / block.y
);
hipLaunchKernelGGL(( ReferenceTrmm_kernel), dim3(grid), dim3(block) , 0, 0, M, N, alpha, A, lda, B, ldb, C, ldc);
return hipGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a double-precision
/// CUTLASS TRMM kernel.
hipError_t TestCutlassTrmm(int M, int N, double alpha) {
hipError_t result;
//
// Define several matrices to be used as operands to TRMM kernels.
//
// Compute leading dimensions for each matrix.
int lda = M;
int ldb = M;
int ldc = M;
// Compute size in bytes of the C matrix.
size_t sizeof_C = sizeof(double) * ldc * N;
// Define pointers to matrices in GPU device memory.
double *A;
double *B;
double *C_cutlass;
double *C_reference;
//
// Allocate matrices in GPU device memory with arbitrary seeds.
//
result = AllocateMatrix(&A, lda, M, M, 0, cutlass::FillMode::kLower);
if (result != hipSuccess) {
return result;
}
result = AllocateMatrix(&B, ldb, M, N, 17);
if (result != hipSuccess) {
hipFree(A);
return result;
}
result = AllocateMatrix(&C_cutlass, ldc, M, N, 101);
if (result != hipSuccess) {
hipFree(A);
hipFree(B);
return result;
}
result = AllocateMatrix(&C_reference, ldc, M, N, 101);
if (result != hipSuccess) {
hipFree(A);
hipFree(B);
hipFree(C_cutlass);
return result;
}
result = hipMemcpy(C_reference, C_cutlass, sizeof_C, hipMemcpyDeviceToDevice);
if (result != hipSuccess) {
std::cerr << "Failed to copy C_cutlass matrix to C_reference: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
//
// Launch CUTLASS TRMM.
//
result = CutlassStrmmNN(M, N, alpha, A, lda, B, ldb, C_cutlass, ldc);
if (result != hipSuccess) {
std::cerr << "CUTLASS TRMM kernel failed: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
//
// Verify.
//
// Launch reference TRMM
result = ReferenceTrmm(M, N, alpha, A, lda, B, ldb, C_reference, ldc);
if (result != hipSuccess) {
std::cerr << "Reference TRMM kernel failed: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
// Copy to host and verify equivalence.
std::vector<double> host_cutlass(ldc * N, 0);
std::vector<double> host_reference(ldc * N, 0);
result = hipMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, hipMemcpyDeviceToHost);
if (result != hipSuccess) {
std::cerr << "Failed to copy CUTLASS TRMM results: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
result = hipMemcpy(host_reference.data(), C_reference, sizeof_C, hipMemcpyDeviceToHost);
if (result != hipSuccess) {
std::cerr << "Failed to copy Reference TRMM results: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
//
// Free device memory allocations.
//
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
//
// Test for bit equivalence of results.
//
if (host_cutlass != host_reference) {
std::cerr << "CUTLASS results incorrect." << std::endl;
return hipErrorUnknown;
}
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to basic_trmm example.
//
// usage:
//
// 00_basic_trmm <M> <N> <alpha>
//
int main(int argc, const char *arg[]) {
bool notSupported = false;
// CUTLASS must be compiled with CUDA 11 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "This example requires compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
//
// Parse the command line to obtain TRMM dimensions and scalar values.
//
// TRMM problem dimensions.
int problem[2] = { 128, 128 };
for (int i = 1; i < argc && i < 3; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Scalars used for linear scaling the result of the matrix product.
double scalars[1] = { 1 };
for (int i = 3; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 3];
}
//
// Run the CUTLASS TRMM test.
//
hipError_t result = TestCutlassTrmm(
problem[0], // TRMM M dimension
problem[1], // TRMM N dimension
scalars[0] // alpha
);
if (result == hipSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == hipSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| 78e5cc1c09120397448048489dcff7a72ccb3767.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to call a CUTLASS TRMM kernel and provides a naive reference
matrix multiply kernel to verify its correctness.
The CUTLASS Trmm template is instantiated in the function CutlassStrmmNN. This is kernel computes
the triangular matrix product (TRMM) using double-precision floating-point arithmetic and assumes
all matrices have column-major layout.
The threadblock tile size is chosen as 64x64x16 which offers good performance for large matrices.
See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available
in CUTLASS.
https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/
Aside from defining and launching the STRMM kernel, this example does not use any other components
or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are
prevalent in the CUTLASS unit tests.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// Helper methods to check for errors
#include "helper.h"
//
// CUTLASS includes needed for double-precision TRMM kernel
//
// Defines cutlass::gemm::device::Trmm, the generic Trmm computation template class.
#include "cutlass/gemm/device/trmm.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// This function defines a CUTLASS TRMM kernel instantiation, constructs its parameters object,
// and launches it on the CUDA device.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS TRMM template and launch a TRMM kernel.
cudaError_t CutlassStrmmNN(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
// Define type definition for double-precision CUTLASS TRMM with column-major
// input matrices and 64x64x16 threadblock tile size (chosen by default).
//
// To keep the interface manageable, several helpers are defined for plausible compositions
// including the following example for double-precision TRMM. Typical values are used as
// default template arguments.
//
// To view the full trmm device API interface, see `cutlass/gemm/device/trmm.h`
using ColumnMajor = cutlass::layout::ColumnMajor;
using CutlassTrmm = cutlass::gemm::device::Trmm<
double,
ColumnMajor,
cutlass::SideMode::kLeft,
cutlass::FillMode::kLower,
cutlass::DiagType::kNonUnit,
double,
ColumnMajor,
double,
ColumnMajor,
double,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<8, 8, 4>,
cutlass::epilogue::thread::LinearCombination<
double,
1,
double,
double,
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
5,
1,
1,
false,
cutlass::arch::OpMultiplyAdd
>;
// Define a CUTLASS TRMM type
CutlassTrmm trmm_operator;
// Construct the CUTLASS TRMM arguments object.
//
// One of CUTLASS's design patterns is to define trmm argument objects that are constructible
// in host code and passed to kernels by value. These may include pointers, strides, scalars,
// and other arguments needed by Trmm and its components.
//
// The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible
// arguments to kernels and (2.) minimized initialization overhead on kernel entry.
//
CutlassTrmm::Arguments args(cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, M}, // Trmm Problem dimensions in Left-Side Mode
1, // batch_count,
{alpha}, // Scalars used in the Epilogue
reinterpret_cast<void const *>(A),
reinterpret_cast<void const *>(B),
reinterpret_cast<void *>(C), // destination matrix D (may be different memory than source C matrix)
(int64_t)M*M, // Batch strides
(int64_t)M*N,
(int64_t)M*N,
lda,
ldb,
ldc);
//
// Launch the CUTLASS TRMM kernel.
//
cutlass::Status status = trmm_operator(args);
//
// Return a cudaError_t if the CUTLASS TRMM operator returned an error code.
//
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
// Return success, if no errors were encountered.
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// The source code after this point in the file is generic CUDA using the CUDA Runtime API
// and simple CUDA kernels to initialize matrices and compute the general matrix product.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize a matrix with small integers.
__global__ void InitializeMatrix_kernel(
double *matrix,
int ldm,
int rows,
int columns,
int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < columns) {
if (fill_mode == cutlass::FillMode::kLower && i < j) return;
else if (fill_mode == cutlass::FillMode::kUpper && i > j) return;
int offset = i + j * ldm;
// Generate arbitrary elements.
int const k = 16807;
int const m = 16;
double value = double(((offset + seed) * k % m) - m / 2);
matrix[offset] = value;
}
}
/// Simple function to initialize a matrix to arbitrary small integers.
cudaError_t InitializeMatrix(double *matrix, int ldm, int rows, int columns, int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
dim3 block(16, 16);
dim3 grid(
(rows + block.x - 1) / block.x,
(columns + block.y - 1) / block.y
);
InitializeMatrix_kernel<<< grid, block >>>(matrix, ldm, rows, columns, seed, fill_mode);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocates device memory for a matrix then fills with arbitrary small integers.
cudaError_t AllocateMatrix(double **matrix, int ldm, int rows, int columns, int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
cudaError_t result;
size_t sizeof_matrix = sizeof(double) * ldm * columns;
// Allocate device memory.
result = cudaMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to allocate matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Clear the allocation.
result = cudaMemset(*matrix, 0, sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to clear matrix device memory: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Initialize matrix elements to arbitrary small integers.
result = InitializeMatrix(*matrix, ldm, rows, columns, seed, fill_mode);
if (result != cudaSuccess) {
std::cerr << "Failed to initialize matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Naive reference TRMM computation.
__global__ void ReferenceTrmm_kernel(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < M && j < N) {
double accumulator = 0;
for (int k = 0; k < M; ++k) {
accumulator += A[i + k * lda] * B[k + j * ldb]; // Since A is in Left-Side Mode
}
C[i + j * ldc] = alpha * accumulator;
}
}
/// Reference TRMM computation.
cudaError_t ReferenceTrmm(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
dim3 block(16, 16);
dim3 grid(
(M + block.x - 1) / block.x,
(N + block.y - 1) / block.y
);
ReferenceTrmm_kernel<<< grid, block >>>(M, N, alpha, A, lda, B, ldb, C, ldc);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a double-precision
/// CUTLASS TRMM kernel.
cudaError_t TestCutlassTrmm(int M, int N, double alpha) {
cudaError_t result;
//
// Define several matrices to be used as operands to TRMM kernels.
//
// Compute leading dimensions for each matrix.
int lda = M;
int ldb = M;
int ldc = M;
// Compute size in bytes of the C matrix.
size_t sizeof_C = sizeof(double) * ldc * N;
// Define pointers to matrices in GPU device memory.
double *A;
double *B;
double *C_cutlass;
double *C_reference;
//
// Allocate matrices in GPU device memory with arbitrary seeds.
//
result = AllocateMatrix(&A, lda, M, M, 0, cutlass::FillMode::kLower);
if (result != cudaSuccess) {
return result;
}
result = AllocateMatrix(&B, ldb, M, N, 17);
if (result != cudaSuccess) {
cudaFree(A);
return result;
}
result = AllocateMatrix(&C_cutlass, ldc, M, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(B);
return result;
}
result = AllocateMatrix(&C_reference, ldc, M, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(B);
cudaFree(C_cutlass);
return result;
}
result = cudaMemcpy(C_reference, C_cutlass, sizeof_C, cudaMemcpyDeviceToDevice);
if (result != cudaSuccess) {
std::cerr << "Failed to copy C_cutlass matrix to C_reference: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Launch CUTLASS TRMM.
//
result = CutlassStrmmNN(M, N, alpha, A, lda, B, ldb, C_cutlass, ldc);
if (result != cudaSuccess) {
std::cerr << "CUTLASS TRMM kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Verify.
//
// Launch reference TRMM
result = ReferenceTrmm(M, N, alpha, A, lda, B, ldb, C_reference, ldc);
if (result != cudaSuccess) {
std::cerr << "Reference TRMM kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
// Copy to host and verify equivalence.
std::vector<double> host_cutlass(ldc * N, 0);
std::vector<double> host_reference(ldc * N, 0);
result = cudaMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy CUTLASS TRMM results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
result = cudaMemcpy(host_reference.data(), C_reference, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy Reference TRMM results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Free device memory allocations.
//
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
//
// Test for bit equivalence of results.
//
if (host_cutlass != host_reference) {
std::cerr << "CUTLASS results incorrect." << std::endl;
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to basic_trmm example.
//
// usage:
//
// 00_basic_trmm <M> <N> <alpha>
//
int main(int argc, const char *arg[]) {
bool notSupported = false;
// CUTLASS must be compiled with CUDA 11 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "This example requires compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
//
// Parse the command line to obtain TRMM dimensions and scalar values.
//
// TRMM problem dimensions.
int problem[2] = { 128, 128 };
for (int i = 1; i < argc && i < 3; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Scalars used for linear scaling the result of the matrix product.
double scalars[1] = { 1 };
for (int i = 3; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 3];
}
//
// Run the CUTLASS TRMM test.
//
cudaError_t result = TestCutlassTrmm(
problem[0], // TRMM M dimension
problem[1], // TRMM N dimension
scalars[0] // alpha
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
e69e76183ee2246546d3f680e902d96dcca2b651.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_match.cuh"
#include <hip/hip_runtime.h>
#define UNROLL_SIZE(l) (l > 0 ? UNROLL: 1)
namespace STMatch {
struct StealingArgs {
int* idle_warps;
int* idle_warps_count;
int* global_mutex;
int* local_mutex;
CallStack* global_callstack;
};
__forceinline__ __device__ void lock(int* mutex) {
while (atomicCAS((int*)mutex, 0, 1) != 0) {
}
}
__forceinline__ __device__ void unlock(int* mutex) {
atomicExch((int*)mutex, 0);
}
__device__ bool trans_layer(CallStack& _target_stk, CallStack& _cur_stk, Pattern* _pat, int _k, int ratio = 2) {
if (_target_stk.level <= _k)
return false;
int num_left_task = _target_stk.slot_size[_pat->rowptr[_k]][_target_stk.uiter[_k]] -
(_target_stk.iter[_k] + _target_stk.uiter[_k + 1] + 1);
if (num_left_task <= 0)
return false;
int stealed_start_idx_in_target = _target_stk.iter[_k] + _target_stk.uiter[_k + 1] + 1 + num_left_task / ratio;
_cur_stk.slot_storage[_pat->rowptr[0]][_target_stk.uiter[0]][_target_stk.iter[0] + _target_stk.uiter[1]] = _target_stk.slot_storage[_pat->rowptr[0]][_target_stk.uiter[0]][_target_stk.iter[0] + _target_stk.uiter[1]];
_cur_stk.slot_storage[_pat->rowptr[0]][_target_stk.uiter[0]][_target_stk.iter[0] + _target_stk.uiter[1] + JOB_CHUNK_SIZE] = _target_stk.slot_storage[_pat->rowptr[0]][_target_stk.uiter[0]][_target_stk.iter[0] + _target_stk.uiter[1] + JOB_CHUNK_SIZE];
for (int i = 1; i < _k; i++) {
_cur_stk.slot_storage[_pat->rowptr[i]][_target_stk.uiter[i]][_target_stk.iter[i] + _target_stk.uiter[i + 1]] = _target_stk.slot_storage[_pat->rowptr[i]][_target_stk.uiter[i]][_target_stk.iter[i] + _target_stk.uiter[i + 1]];
}
for (int r = _pat->rowptr[_k]; r < _pat->rowptr[_k + 1]; r++) {
for (int u = 0; u < UNROLL_SIZE(_k); u++) {
int loop_end = _k == 0 ? JOB_CHUNK_SIZE * 2 : _target_stk.slot_size[r][u];
for (int t = 0; t < loop_end; t++) {
_cur_stk.slot_storage[r][u][t] = _target_stk.slot_storage[r][u][t];
}
}
}
for (int l = 0; l < _k; l++) {
_cur_stk.iter[l] = _target_stk.iter[l];
_cur_stk.uiter[l] = _target_stk.uiter[l];
for (int s = _pat->rowptr[l]; s < _pat->rowptr[l + 1]; s++) {
if (s > _pat->rowptr[l]) {
for (int u = 0; u < UNROLL; u++) {
_cur_stk.slot_size[s][u] = _target_stk.slot_size[s][u];
}
}
else {
for (int u = 0; u < UNROLL_SIZE(l); u++) {
if (u == _cur_stk.uiter[l])
_cur_stk.slot_size[_pat->rowptr[l]][u] = _target_stk.iter[l] + 1;
else
_cur_stk.slot_size[_pat->rowptr[l]][u] = 0;
}
}
}
}
// copy
for (int i = stealed_start_idx_in_target - _target_stk.iter[_k]; i < UNROLL_SIZE(_k + 1); i++) {
_target_stk.slot_size[_pat->rowptr[_k + 1]][i] = 0;
}
for (int s = _pat->rowptr[_k]; s < _pat->rowptr[_k + 1]; s++) {
if (s == _pat->rowptr[_k]) {
for (int u = 0; u < UNROLL_SIZE(_k); u++) {
if (u == _target_stk.uiter[_k])
_cur_stk.slot_size[s][u] = _target_stk.slot_size[s][u];
else
_cur_stk.slot_size[s][u] = 0;
}
}
else {
for (int u = 0; u < UNROLL_SIZE(_k); u++) {
_cur_stk.slot_size[s][u] = _target_stk.slot_size[s][u];
}
}
}
_cur_stk.uiter[_k] = _target_stk.uiter[_k];
_cur_stk.iter[_k] = stealed_start_idx_in_target;
_target_stk.slot_size[_pat->rowptr[_k]][_target_stk.uiter[_k]] = stealed_start_idx_in_target;
// copy
for (int l = _k + 1; l < _pat->nnodes - 1; l++) {
_cur_stk.iter[l] = 0;
_cur_stk.uiter[l] = 0;
for (int s = _pat->rowptr[l]; s < _pat->rowptr[l + 1]; s++) {
for (int u = 0; u < UNROLL_SIZE(l); u++) {
_cur_stk.slot_size[s][u] = 0;
}
}
}
_cur_stk.iter[_pat->nnodes - 1] = 0;
_cur_stk.uiter[_pat->nnodes - 1] = 0;
for (int u = 0; u < UNROLL_SIZE(_pat->nnodes - 1); u++) {
_cur_stk.slot_size[_pat->rowptr[_pat->nnodes - 1]][u] = 0;
}
_cur_stk.level = _k + 1;
return true;
}
__device__ bool trans_skt(CallStack* _all_stk, CallStack* _cur_stk, Pattern* pat, StealingArgs* _stealing_args) {
int max_left_task = 0;
int stk_idx = -1;
int at_level = -1;
for (int level = 0; level < STOP_LEVEL; level++) {
for (int i = 0; i < NWARPS_PER_BLOCK; i++) {
if (i == threadIdx.x / WARP_SIZE)
continue;
lock(&(_stealing_args->local_mutex[i]));
int left_task = _all_stk[i].slot_size[pat->rowptr[level]][_all_stk[i].uiter[level]] -
(_all_stk[i].iter[level] + _all_stk[i].uiter[level + 1] + 1);
if (left_task > max_left_task) {
max_left_task = left_task;
stk_idx = i;
at_level = level;
}
unlock(&(_stealing_args->local_mutex[i]));
}
if (stk_idx != -1)
break;
}
if (stk_idx != -1) {
bool res;
lock(&(_stealing_args->local_mutex[threadIdx.x / WARP_SIZE]));
lock(&(_stealing_args->local_mutex[stk_idx]));
res = trans_layer(_all_stk[stk_idx], *_cur_stk, pat, at_level);
unlock(&(_stealing_args->local_mutex[threadIdx.x / WARP_SIZE]));
unlock(&(_stealing_args->local_mutex[stk_idx]));
return res;
}
return false;
}
__forceinline__ __device__ graph_node_t path(CallStack* stk, Pattern* pat, int level, int k) {
if (level > 0)
return stk->slot_storage[pat->rowptr[level]][stk->uiter[level]][stk->iter[level] + k];
else {
return stk->slot_storage[0][stk->uiter[0]][stk->iter[0] + k + (level + 1) * JOB_CHUNK_SIZE];
}
}
__forceinline__ __device__ graph_node_t* path_address(CallStack* stk, Pattern* pat, int level, int k) {
if (level > 0)
return &(stk->slot_storage[pat->rowptr[level]][stk->uiter[level]][stk->iter[level] + k]);
else {
return &(stk->slot_storage[0][stk->uiter[0]][stk->iter[0] + k + (level + 1) * JOB_CHUNK_SIZE]);
}
}
typedef struct {
graph_node_t* set1[UNROLL], * set2[UNROLL], * res[UNROLL];
graph_node_t set1_size[UNROLL], set2_size[UNROLL], * res_size[UNROLL];
graph_node_t ub[UNROLL];
bitarray32 label;
Graph* g;
int num_sets;
bool cached;
int level;
Pattern* pat;
} Arg_t;
template<typename DATA_T, typename SIZE_T>
__forceinline__ __device__
bool bsearch_exist(DATA_T* set2, SIZE_T set2_size, DATA_T target) {
if (set2_size <= 0) return false;
int mid;
int low = 0;
int high = set2_size - 1;
while (low <= high) {
mid = (low + high) / 2;
if (target == set2[mid]) {
return true;
}
else if (target > set2[mid]) {
low = mid + 1;
}
else {
high = mid - 1;
}
}
return false;
}
template<typename DATA_T, typename SIZE_T>
__forceinline__ __device__
SIZE_T upper_bound(DATA_T* set2, SIZE_T set2_size, DATA_T target) {
int i, step;
int low = 0;
while (set2_size > 0) {
i = low;
step = set2_size / 2;
i += step;
if (target > set2[i]) {
low = ++i; set2_size -= step + 1;
}
else {
set2_size = step;
}
}
return low;
}
__forceinline__ __device__
void prefix_sum(int* _input, int input_size) {
int thid = threadIdx.x % WARP_SIZE;
int offset = 1;
int last_element = _input[input_size - 1];
// build sum in place up the tree
for (int d = (WARP_SIZE >> 1); d > 0; d >>= 1) {
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
_input[bi] += _input[ai];
}
offset <<= 1;
}
if (thid == 0) { _input[WARP_SIZE - 1] = 0; } // clear the last element
// traverse down tree & build scan
for (int d = 1; d < WARP_SIZE; d <<= 1) {
offset >>= 1;
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = _input[ai];
_input[ai] = _input[bi];
_input[bi] += t;
}
}
__syncwarp();
if (thid >= input_size - 1)
_input[thid + 1] = _input[input_size - 1] + last_element;
}
template<bool DIFF>
__device__ void compute_set(Arg_t* arg) {
__shared__ graph_node_t size_psum[NWARPS_PER_BLOCK][WARP_SIZE + 1];
__shared__ int end_pos[NWARPS_PER_BLOCK][UNROLL];
int wid = threadIdx.x / WARP_SIZE;
int tid = threadIdx.x % WARP_SIZE;
if (tid < arg->num_sets) {
arg->set1_size[tid] = upper_bound(arg->set1[tid], arg->set1_size[tid], arg->ub[tid]);
size_psum[wid][tid] = arg->set1_size[tid];
end_pos[wid][tid] = 0;
}
else {
size_psum[wid][tid] = 0;
}
__syncwarp();
prefix_sum(&size_psum[wid][0], arg->num_sets);
__syncwarp();
bool still_loop = true;
int slot_idx = 0;
int offset = 0;
int predicate;
for (int idx = tid; (idx < ((size_psum[wid][WARP_SIZE] > 0) ? (((size_psum[wid][WARP_SIZE] - 1) / WARP_SIZE + 1) * WARP_SIZE) : 0) && still_loop); idx += WARP_SIZE) {
predicate = 0;
if (idx < size_psum[wid][WARP_SIZE]) {
while (idx >= size_psum[wid][slot_idx + 1]) {
slot_idx++;
}
offset = idx - size_psum[wid][slot_idx];
bitarray32 lb = arg->g->vertex_label[arg->set1[slot_idx][offset]];
predicate = ((lb & arg->label) == lb) && (DIFF ^ bsearch_exist(arg->set2[slot_idx], arg->set2_size[slot_idx], arg->set1[slot_idx][offset]));
}
else {
slot_idx = arg->num_sets;
still_loop = false;
}
still_loop = __shfl_sync(0xFFFFFFFF, still_loop, 31);
predicate = __ballot_sync(0xFFFFFFFF, predicate);
bool cond = (arg->level < arg->pat->nnodes - 2 && predicate & (1 << tid));
graph_node_t res_tmp;
if (cond) {
res_tmp = arg->set1[slot_idx][offset];
}
int prev_idx = ((idx / WARP_SIZE == size_psum[wid][slot_idx] / WARP_SIZE) ? size_psum[wid][slot_idx] % WARP_SIZE : 0);
if (cond) {
arg->res[slot_idx][end_pos[wid][slot_idx] + __popc(predicate & ((1 << tid) - (1 << prev_idx)))] = res_tmp;
}
if (slot_idx < __shfl_down_sync(0xFFFFFFFF, slot_idx, 1)) {
end_pos[wid][slot_idx] += __popc(predicate & ((1 << (tid + 1)) - (1 << prev_idx)));
}
else if (tid == WARP_SIZE - 1 && slot_idx < arg->num_sets) {
end_pos[wid][slot_idx] += __popc(predicate & (0xFFFFFFFF - (1 << prev_idx) + 1));
}
}
__syncwarp();
if (tid < arg->num_sets) {
*(arg->res_size[tid]) = end_pos[wid][tid];
}
__syncwarp();
}
__forceinline__ __device__ void get_job(JobQueue* q, graph_node_t& cur_pos, graph_node_t& njobs) {
lock(&(q->mutex));
cur_pos = q->cur;
q->cur += JOB_CHUNK_SIZE;
if (q->cur > q->length) q->cur = q->length;
njobs = q->cur - cur_pos;
unlock(&(q->mutex));
}
__device__ void extend(Graph* g, Pattern* pat, CallStack* stk, JobQueue* q, pattern_node_t level) {
__shared__ Arg_t arg[NWARPS_PER_BLOCK];
int wid = threadIdx.x / WARP_SIZE;
if (level == 0) {
graph_node_t cur_job, njobs;
// TODO: change to warp
for (int k = 0; k < UNROLL_SIZE(level); k++) {
if (threadIdx.x % WARP_SIZE == 0) {
get_job(q, cur_job, njobs);
for (size_t i = 0; i < njobs; i++) {
for (int j = 0; j < 2; j++) {
stk->slot_storage[0][k][i + JOB_CHUNK_SIZE * j] = (q->q[cur_job + i].nodes)[j];
}
}
stk->slot_size[0][k] = njobs;
}
__syncwarp();
}
}
else {
arg[wid].g = g;
arg[wid].num_sets = UNROLL_SIZE(level);
int remaining = stk->slot_size[pat->rowptr[level - 1]][stk->uiter[level - 1]] - stk->iter[level - 1];
if (remaining >= 0 && UNROLL_SIZE(level) > remaining) {
arg[wid].num_sets = remaining;
}
for (int i = pat->rowptr[level]; i < pat->rowptr[level + 1]; i++) {
// compute ub based on pattern->partial
if (!LABELED) {
graph_node_t ub = ((i == pat->rowptr[level]) ? INT_MAX : -1);
if (pat->partial[i] != 0) {
// compute ub with nodes after start_level until previous level
for (pattern_node_t k = 1; k < level - 1; k++) {
if ((pat->partial[i] & (1 << (k + 1))) && ((i == pat->rowptr[level]) ^ (ub < path(stk, pat, k, stk->uiter[k + 1])))) ub = path(stk, pat, k, stk->uiter[k + 1]);
}
// compute ub with nodes in the previous level
for (pattern_node_t k = 0; k < arg[wid].num_sets; k++) {
arg[wid].ub[k] = ub;
int prev_level = (level > 1 ? 2 : 1);
int prev_iter = (level > 1 ? stk->uiter[1] : k);
// compute ub with the first few nodes before start_level
for (pattern_node_t j = 0; j < prev_level; j++) {
if ((pat->partial[i] & (1 << j)) && ((i == pat->rowptr[level]) ^ (arg[wid].ub[k] < path(stk, pat, j - 1, prev_iter)))) arg[wid].ub[k] = path(stk, pat, j - 1, prev_iter);
}
if ((pat->partial[i] & (1 << level)) && ((i == pat->rowptr[level]) ^ (arg[wid].ub[k] < path(stk, pat, level - 1, k)))) arg[wid].ub[k] = path(stk, pat, level - 1, k);
if (arg[wid].ub[k] == -1) arg[wid].ub[k] = INT_MAX;
}
}
else {
for (pattern_node_t k = 0; k < arg[wid].num_sets; k++) {
arg[wid].ub[k] = INT_MAX;
}
}
}
else {
for (pattern_node_t k = 0; k < arg[wid].num_sets; k++) {
arg[wid].ub[k] = INT_MAX;
}
}
arg[wid].label = pat->slot_labels[i];
if (pat->set_ops[i] & 0x20) {
for (graph_node_t k = 0; k < arg[wid].num_sets; k++) {
arg[wid].set2[k] = NULL;
arg[wid].set2_size[k] = 0;
if (!EDGE_INDUCED) {
graph_node_t t = path(stk, pat, level - 2, ((level > 1) ? stk->uiter[level - 1] : k));
arg[wid].set2[k] = &g->colidx[g->rowptr[t]];
arg[wid].set2_size[k] = (graph_node_t)(g->rowptr[t + 1] - g->rowptr[t]);
}
graph_node_t t = path(stk, pat, level - 1, k);
arg[wid].set1[k] = &g->colidx[g->rowptr[t]];
arg[wid].res[k] = &(stk->slot_storage[i][k][0]);
arg[wid].set1_size[k] = (graph_node_t)(g->rowptr[t + 1] - g->rowptr[t]);
arg[wid].res_size[k] = &(stk->slot_size[i][k]);
}
// arg[wid].cached = (level > 1);
arg[wid].level = level;
arg[wid].pat = pat;
compute_set<true>(&arg[wid]);
if(EDGE_INDUCED && !LABELED){
for (int j = level-1; j >= -1; j--)
{
int unrollIdx = threadIdx.x % WARP_SIZE;
if(unrollIdx < arg[wid].num_sets)
{
if(level==1){
arg[wid].set2[unrollIdx] = path_address(stk, pat, j, unrollIdx);
}
else{
if(j==level-1){
arg[wid].set2[unrollIdx] = path_address(stk, pat, j, unrollIdx);
arg[wid].set2_size[unrollIdx] = 1;
}
else{
if(j>0){
arg[wid].set2[unrollIdx] = path_address(stk, pat, j, stk->uiter[j + 1]);
}
else{
arg[wid].set2[unrollIdx] = path_address(stk, pat, j, stk->uiter[1]);
}
}
}
arg[wid].set1[unrollIdx] = &(stk->slot_storage[i][unrollIdx][0]);
arg[wid].res[unrollIdx] = &(stk->slot_storage[i][unrollIdx][0]);
arg[wid].set1_size[unrollIdx] = stk->slot_size[i][unrollIdx];
arg[wid].res_size[unrollIdx] = &(stk->slot_size[i][unrollIdx]);
arg[wid].set2_size[unrollIdx] = 1;
arg[wid].level = level;
arg[wid].pat = pat;
}
__syncwarp();
compute_set<true>(&arg[wid]);
}
}
if (!EDGE_INDUCED) {
for (pattern_node_t j = level - 3; j >= -1; j--) {
graph_node_t t = path(stk, pat, j, stk->uiter[(j > 0 ? j + 1 : 1)]);
for (graph_node_t k = 0; k < arg[wid].num_sets; k++) {
arg[wid].set1[k] = &(stk->slot_storage[i][k][0]);
arg[wid].set2[k] = &g->colidx[g->rowptr[t]];
arg[wid].res[k] = &(stk->slot_storage[i][k][0]);
arg[wid].set1_size[k] = stk->slot_size[i][k];
arg[wid].set2_size[k] = (graph_node_t)(g->rowptr[t + 1] - g->rowptr[t]);
arg[wid].res_size[k] = &(stk->slot_size[i][k]);
}
//arg[wid].cached = true;
arg[wid].level = level;
arg[wid].pat = pat;
compute_set<true>(&arg[wid]);
}
}
for (graph_node_t k = arg[wid].num_sets; k < UNROLL_SIZE(level); k++) stk->slot_size[i][k] = 0;
}
else {
pattern_node_t slot_idx = (pat->set_ops[i] & 0x1F);
if (pat->set_ops[i] & 0x40) { //INTE
for (graph_node_t k = 0; k < arg[wid].num_sets; k++) {
graph_node_t t = path(stk, pat, level - 1, k);
graph_node_t* neighbor = &g->colidx[g->rowptr[t]];
graph_node_t neighbor_size = (graph_node_t)(g->rowptr[t + 1] - g->rowptr[t]);
if (level > 1) {
arg[wid].set2[k] = &(stk->slot_storage[slot_idx][stk->uiter[level - 1]][0]);
arg[wid].set2_size[k] = stk->slot_size[slot_idx][stk->uiter[level - 1]];
}
else {
graph_node_t t = path(stk, pat, -1, k);
arg[wid].set2[k] = &g->colidx[g->rowptr[t]];
arg[wid].set2_size[k] = (graph_node_t)(g->rowptr[t + 1] - g->rowptr[t]);
}
arg[wid].set1[k] = neighbor;
arg[wid].set1_size[k] = neighbor_size;
arg[wid].res[k] = &(stk->slot_storage[i][k][0]);
arg[wid].res_size[k] = &(stk->slot_size[i][k]);
}
//arg[wid].cached = (level > 1);
arg[wid].level = level;
arg[wid].pat = pat;
compute_set<false>(&arg[wid]);
for (graph_node_t k = arg[wid].num_sets; k < UNROLL_SIZE(level); k++) stk->slot_size[i][k] = 0;
}
else { //DIFF
for (graph_node_t k = 0; k < arg[wid].num_sets; k++) {
graph_node_t* neighbor = NULL;
graph_node_t neighbor_size = 0;
if(EDGE_INDUCED && !LABELED){
neighbor = path_address(stk, pat, level - 1, k);
neighbor_size = 1;
}
if (!EDGE_INDUCED) {
graph_node_t t = path(stk, pat, level - 1, k);
neighbor = &g->colidx[g->rowptr[t]];
neighbor_size = (graph_node_t)(g->rowptr[t + 1] - g->rowptr[t]);
}
if (level > 1) {
arg[wid].set1[k] = &(stk->slot_storage[slot_idx][stk->uiter[level - 1]][0]);
arg[wid].set1_size[k] = stk->slot_size[slot_idx][stk->uiter[level - 1]];
}
else {
graph_node_t t = path(stk, pat, -1, k);
arg[wid].set1[k] = &g->colidx[g->rowptr[t]];
arg[wid].set1_size[k] = (graph_node_t)(g->rowptr[t + 1] - g->rowptr[t]);
}
arg[wid].set2[k] = neighbor;
arg[wid].set2_size[k] = neighbor_size;
arg[wid].res[k] = &(stk->slot_storage[i][k][0]);
arg[wid].res_size[k] = &(stk->slot_size[i][k]);
}
//arg[wid].cached = false;
arg[wid].level = level;
arg[wid].pat = pat;
compute_set<true>(&arg[wid]);
for (graph_node_t k = arg[wid].num_sets; k < UNROLL_SIZE(level); k++) stk->slot_size[i][k] = 0;
}
}
}
}
stk->iter[level] = 0;
stk->uiter[level] = 0;
}
__forceinline__ __device__ void respond_across_block(int level, CallStack* stk, Pattern* pat, StealingArgs* _stealing_args) {
if (level > 0 && level <= DETECT_LEVEL) {
if (threadIdx.x % WARP_SIZE == 0) {
int at_level = -1;
int left_task = 0;
for (int l = 0; l < level; l++) {
left_task = stk->slot_size[pat->rowptr[l]][stk->uiter[l]] - stk->iter[l] - stk->uiter[l + 1] - 1;
if (left_task > 0) {
at_level = l;
break;
}
}
if (at_level != -1) {
for (int b = 0; b < GRID_DIM; b++) {
if (b == blockIdx.x) continue;
if (atomicCAS(&(_stealing_args->global_mutex[b]), 0, 1) == 0) {
if (atomicAdd(&_stealing_args->idle_warps[b], 0) == 0xFFFFFFFF) {
__threadfence();
trans_layer(*stk, _stealing_args->global_callstack[b * NWARPS_PER_BLOCK], pat, at_level, INT_MAX);
__threadfence();
atomicSub(_stealing_args->idle_warps_count, NWARPS_PER_BLOCK);
atomicExch(&_stealing_args->idle_warps[b], 0);
atomicExch(&(_stealing_args->global_mutex[b]), 0);
break;
}
atomicExch(&(_stealing_args->global_mutex[b]), 0);
}
}
}
}
__syncwarp();
}
}
__device__ void match(Graph* g, Pattern* pat,
CallStack* stk, JobQueue* q, size_t* count, StealingArgs* _stealing_args) {
pattern_node_t& level = stk->level;
while (true) {
if (threadIdx.x % WARP_SIZE == 0) {
lock(&(_stealing_args->local_mutex[threadIdx.x / WARP_SIZE]));
}
__syncwarp();
if (level < pat->nnodes - 2) {
if (STEAL_ACROSS_BLOCK) {
respond_across_block(level, stk, pat, _stealing_args);
}
if (stk->uiter[level] == 0 && stk->slot_size[pat->rowptr[level]][0] == 0) {
extend(g, pat, stk, q, level);
if (level == 0 && stk->slot_size[0][0] == 0) {
if (threadIdx.x % WARP_SIZE == 0)
unlock(&(_stealing_args->local_mutex[threadIdx.x / WARP_SIZE]));
__syncwarp();
break;
}
}
if (stk->uiter[level] < UNROLL_SIZE(level)) {
if (stk->iter[level] < stk->slot_size[pat->rowptr[level]][stk->uiter[level]]) {
if (threadIdx.x % WARP_SIZE == 0)
level++;
__syncwarp();
}
else {
stk->slot_size[pat->rowptr[level]][stk->uiter[level]] = 0;
stk->iter[level] = 0;
if (threadIdx.x % WARP_SIZE == 0)
stk->uiter[level]++;
__syncwarp();
}
}
else {
stk->uiter[level] = 0;
if (level > 0) {
if (threadIdx.x % WARP_SIZE == 0)
level--;
if (threadIdx.x % WARP_SIZE == 0)
stk->iter[level] += UNROLL_SIZE(level + 1);
__syncwarp();
}
}
}
else if (level == pat->nnodes - 2) {
extend(g, pat, stk, q, level);
for (int j = 0; j < UNROLL_SIZE(level); j++) {
if (threadIdx.x % WARP_SIZE == 0) {
*count += stk->slot_size[pat->rowptr[level]][j];
}
__syncwarp();
stk->slot_size[pat->rowptr[level]][j] = 0;
}
stk->uiter[level] = 0;
if (threadIdx.x % WARP_SIZE == 0)
level--;
if (threadIdx.x % WARP_SIZE == 0)
stk->iter[level] += UNROLL_SIZE(level + 1);
__syncwarp();
}
//__syncwarp();
if (threadIdx.x % WARP_SIZE == 0)
unlock(&(_stealing_args->local_mutex[threadIdx.x / WARP_SIZE]));
__syncwarp();
}
}
__global__ void _parallel_match(Graph* dev_graph, Pattern* dev_pattern,
CallStack* dev_callstack, JobQueue* job_queue, size_t* res,
int* idle_warps, int* idle_warps_count, int* global_mutex) {
__shared__ Graph graph;
__shared__ Pattern pat;
__shared__ CallStack stk[NWARPS_PER_BLOCK];
__shared__ size_t count[NWARPS_PER_BLOCK];
__shared__ bool stealed[NWARPS_PER_BLOCK];
__shared__ int mutex_this_block[NWARPS_PER_BLOCK];
__shared__ StealingArgs stealing_args;
stealing_args.idle_warps = idle_warps;
stealing_args.idle_warps_count = idle_warps_count;
stealing_args.global_mutex = global_mutex;
stealing_args.local_mutex = mutex_this_block;
stealing_args.global_callstack = dev_callstack;
int global_tid = blockIdx.x * blockDim.x + threadIdx.x;
int global_wid = global_tid / WARP_SIZE;
int local_wid = threadIdx.x / WARP_SIZE;
if (threadIdx.x == 0) {
graph = *dev_graph;
pat = *dev_pattern;
}
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
stk[local_wid] = dev_callstack[global_wid];
}
__syncwarp();
auto start = clock64();
while (true) {
match(&graph, &pat, &stk[local_wid], job_queue, &count[local_wid], &stealing_args);
__syncwarp();
stealed[local_wid] = false;
if (STEAL_IN_BLOCK) {
if (threadIdx.x % WARP_SIZE == 0) {
stealed[local_wid] = trans_skt(stk, &stk[local_wid], &pat, &stealing_args);
}
__syncwarp();
}
if (STEAL_ACROSS_BLOCK) {
if (!stealed[local_wid]) {
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
atomicAdd(stealing_args.idle_warps_count, 1);
lock(&(stealing_args.global_mutex[blockIdx.x]));
atomicOr(&stealing_args.idle_warps[blockIdx.x], (1 << local_wid));
unlock(&(stealing_args.global_mutex[blockIdx.x]));
while ((atomicAdd(stealing_args.idle_warps_count, 0) < NWARPS_TOTAL) && (atomicAdd(&stealing_args.idle_warps[blockIdx.x], 0) & (1 << local_wid)));
if (atomicAdd(stealing_args.idle_warps_count, 0) < NWARPS_TOTAL) {
__threadfence();
if (local_wid == 0) {
stk[local_wid] = (stealing_args.global_callstack[blockIdx.x * NWARPS_PER_BLOCK]);
}
stealed[local_wid] = true;
}
else {
stealed[local_wid] = false;
}
}
__syncthreads();
}
}
if (!stealed[local_wid]) {
break;
}
}
auto stop = clock64();
if (threadIdx.x % WARP_SIZE == 0) {
res[global_wid] = count[local_wid];
// printf("%d\t%ld\t%d\t%d\n", blockIdx.x, stop - start, stealed[local_wid], local_wid);
//printf("%ld\n", stop - start);
}
// if(threadIdx.x % WARP_SIZE == 0)
// printf("%d\t%d\t%d\n", blockIdx.x, local_wid, mutex_this_block[local_wid]);
}
}
| e69e76183ee2246546d3f680e902d96dcca2b651.cu | #include "gpu_match.cuh"
#include <cuda.h>
#define UNROLL_SIZE(l) (l > 0 ? UNROLL: 1)
namespace STMatch {
struct StealingArgs {
int* idle_warps;
int* idle_warps_count;
int* global_mutex;
int* local_mutex;
CallStack* global_callstack;
};
__forceinline__ __device__ void lock(int* mutex) {
while (atomicCAS((int*)mutex, 0, 1) != 0) {
}
}
__forceinline__ __device__ void unlock(int* mutex) {
atomicExch((int*)mutex, 0);
}
__device__ bool trans_layer(CallStack& _target_stk, CallStack& _cur_stk, Pattern* _pat, int _k, int ratio = 2) {
if (_target_stk.level <= _k)
return false;
int num_left_task = _target_stk.slot_size[_pat->rowptr[_k]][_target_stk.uiter[_k]] -
(_target_stk.iter[_k] + _target_stk.uiter[_k + 1] + 1);
if (num_left_task <= 0)
return false;
int stealed_start_idx_in_target = _target_stk.iter[_k] + _target_stk.uiter[_k + 1] + 1 + num_left_task / ratio;
_cur_stk.slot_storage[_pat->rowptr[0]][_target_stk.uiter[0]][_target_stk.iter[0] + _target_stk.uiter[1]] = _target_stk.slot_storage[_pat->rowptr[0]][_target_stk.uiter[0]][_target_stk.iter[0] + _target_stk.uiter[1]];
_cur_stk.slot_storage[_pat->rowptr[0]][_target_stk.uiter[0]][_target_stk.iter[0] + _target_stk.uiter[1] + JOB_CHUNK_SIZE] = _target_stk.slot_storage[_pat->rowptr[0]][_target_stk.uiter[0]][_target_stk.iter[0] + _target_stk.uiter[1] + JOB_CHUNK_SIZE];
for (int i = 1; i < _k; i++) {
_cur_stk.slot_storage[_pat->rowptr[i]][_target_stk.uiter[i]][_target_stk.iter[i] + _target_stk.uiter[i + 1]] = _target_stk.slot_storage[_pat->rowptr[i]][_target_stk.uiter[i]][_target_stk.iter[i] + _target_stk.uiter[i + 1]];
}
for (int r = _pat->rowptr[_k]; r < _pat->rowptr[_k + 1]; r++) {
for (int u = 0; u < UNROLL_SIZE(_k); u++) {
int loop_end = _k == 0 ? JOB_CHUNK_SIZE * 2 : _target_stk.slot_size[r][u];
for (int t = 0; t < loop_end; t++) {
_cur_stk.slot_storage[r][u][t] = _target_stk.slot_storage[r][u][t];
}
}
}
for (int l = 0; l < _k; l++) {
_cur_stk.iter[l] = _target_stk.iter[l];
_cur_stk.uiter[l] = _target_stk.uiter[l];
for (int s = _pat->rowptr[l]; s < _pat->rowptr[l + 1]; s++) {
if (s > _pat->rowptr[l]) {
for (int u = 0; u < UNROLL; u++) {
_cur_stk.slot_size[s][u] = _target_stk.slot_size[s][u];
}
}
else {
for (int u = 0; u < UNROLL_SIZE(l); u++) {
if (u == _cur_stk.uiter[l])
_cur_stk.slot_size[_pat->rowptr[l]][u] = _target_stk.iter[l] + 1;
else
_cur_stk.slot_size[_pat->rowptr[l]][u] = 0;
}
}
}
}
// copy
for (int i = stealed_start_idx_in_target - _target_stk.iter[_k]; i < UNROLL_SIZE(_k + 1); i++) {
_target_stk.slot_size[_pat->rowptr[_k + 1]][i] = 0;
}
for (int s = _pat->rowptr[_k]; s < _pat->rowptr[_k + 1]; s++) {
if (s == _pat->rowptr[_k]) {
for (int u = 0; u < UNROLL_SIZE(_k); u++) {
if (u == _target_stk.uiter[_k])
_cur_stk.slot_size[s][u] = _target_stk.slot_size[s][u];
else
_cur_stk.slot_size[s][u] = 0;
}
}
else {
for (int u = 0; u < UNROLL_SIZE(_k); u++) {
_cur_stk.slot_size[s][u] = _target_stk.slot_size[s][u];
}
}
}
_cur_stk.uiter[_k] = _target_stk.uiter[_k];
_cur_stk.iter[_k] = stealed_start_idx_in_target;
_target_stk.slot_size[_pat->rowptr[_k]][_target_stk.uiter[_k]] = stealed_start_idx_in_target;
// copy
for (int l = _k + 1; l < _pat->nnodes - 1; l++) {
_cur_stk.iter[l] = 0;
_cur_stk.uiter[l] = 0;
for (int s = _pat->rowptr[l]; s < _pat->rowptr[l + 1]; s++) {
for (int u = 0; u < UNROLL_SIZE(l); u++) {
_cur_stk.slot_size[s][u] = 0;
}
}
}
_cur_stk.iter[_pat->nnodes - 1] = 0;
_cur_stk.uiter[_pat->nnodes - 1] = 0;
for (int u = 0; u < UNROLL_SIZE(_pat->nnodes - 1); u++) {
_cur_stk.slot_size[_pat->rowptr[_pat->nnodes - 1]][u] = 0;
}
_cur_stk.level = _k + 1;
return true;
}
__device__ bool trans_skt(CallStack* _all_stk, CallStack* _cur_stk, Pattern* pat, StealingArgs* _stealing_args) {
int max_left_task = 0;
int stk_idx = -1;
int at_level = -1;
for (int level = 0; level < STOP_LEVEL; level++) {
for (int i = 0; i < NWARPS_PER_BLOCK; i++) {
if (i == threadIdx.x / WARP_SIZE)
continue;
lock(&(_stealing_args->local_mutex[i]));
int left_task = _all_stk[i].slot_size[pat->rowptr[level]][_all_stk[i].uiter[level]] -
(_all_stk[i].iter[level] + _all_stk[i].uiter[level + 1] + 1);
if (left_task > max_left_task) {
max_left_task = left_task;
stk_idx = i;
at_level = level;
}
unlock(&(_stealing_args->local_mutex[i]));
}
if (stk_idx != -1)
break;
}
if (stk_idx != -1) {
bool res;
lock(&(_stealing_args->local_mutex[threadIdx.x / WARP_SIZE]));
lock(&(_stealing_args->local_mutex[stk_idx]));
res = trans_layer(_all_stk[stk_idx], *_cur_stk, pat, at_level);
unlock(&(_stealing_args->local_mutex[threadIdx.x / WARP_SIZE]));
unlock(&(_stealing_args->local_mutex[stk_idx]));
return res;
}
return false;
}
__forceinline__ __device__ graph_node_t path(CallStack* stk, Pattern* pat, int level, int k) {
if (level > 0)
return stk->slot_storage[pat->rowptr[level]][stk->uiter[level]][stk->iter[level] + k];
else {
return stk->slot_storage[0][stk->uiter[0]][stk->iter[0] + k + (level + 1) * JOB_CHUNK_SIZE];
}
}
__forceinline__ __device__ graph_node_t* path_address(CallStack* stk, Pattern* pat, int level, int k) {
if (level > 0)
return &(stk->slot_storage[pat->rowptr[level]][stk->uiter[level]][stk->iter[level] + k]);
else {
return &(stk->slot_storage[0][stk->uiter[0]][stk->iter[0] + k + (level + 1) * JOB_CHUNK_SIZE]);
}
}
typedef struct {
graph_node_t* set1[UNROLL], * set2[UNROLL], * res[UNROLL];
graph_node_t set1_size[UNROLL], set2_size[UNROLL], * res_size[UNROLL];
graph_node_t ub[UNROLL];
bitarray32 label;
Graph* g;
int num_sets;
bool cached;
int level;
Pattern* pat;
} Arg_t;
template<typename DATA_T, typename SIZE_T>
__forceinline__ __device__
bool bsearch_exist(DATA_T* set2, SIZE_T set2_size, DATA_T target) {
if (set2_size <= 0) return false;
int mid;
int low = 0;
int high = set2_size - 1;
while (low <= high) {
mid = (low + high) / 2;
if (target == set2[mid]) {
return true;
}
else if (target > set2[mid]) {
low = mid + 1;
}
else {
high = mid - 1;
}
}
return false;
}
template<typename DATA_T, typename SIZE_T>
__forceinline__ __device__
SIZE_T upper_bound(DATA_T* set2, SIZE_T set2_size, DATA_T target) {
int i, step;
int low = 0;
while (set2_size > 0) {
i = low;
step = set2_size / 2;
i += step;
if (target > set2[i]) {
low = ++i; set2_size -= step + 1;
}
else {
set2_size = step;
}
}
return low;
}
__forceinline__ __device__
void prefix_sum(int* _input, int input_size) {
int thid = threadIdx.x % WARP_SIZE;
int offset = 1;
int last_element = _input[input_size - 1];
// build sum in place up the tree
for (int d = (WARP_SIZE >> 1); d > 0; d >>= 1) {
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
_input[bi] += _input[ai];
}
offset <<= 1;
}
if (thid == 0) { _input[WARP_SIZE - 1] = 0; } // clear the last element
// traverse down tree & build scan
for (int d = 1; d < WARP_SIZE; d <<= 1) {
offset >>= 1;
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
int t = _input[ai];
_input[ai] = _input[bi];
_input[bi] += t;
}
}
__syncwarp();
if (thid >= input_size - 1)
_input[thid + 1] = _input[input_size - 1] + last_element;
}
template<bool DIFF>
__device__ void compute_set(Arg_t* arg) {
__shared__ graph_node_t size_psum[NWARPS_PER_BLOCK][WARP_SIZE + 1];
__shared__ int end_pos[NWARPS_PER_BLOCK][UNROLL];
int wid = threadIdx.x / WARP_SIZE;
int tid = threadIdx.x % WARP_SIZE;
if (tid < arg->num_sets) {
arg->set1_size[tid] = upper_bound(arg->set1[tid], arg->set1_size[tid], arg->ub[tid]);
size_psum[wid][tid] = arg->set1_size[tid];
end_pos[wid][tid] = 0;
}
else {
size_psum[wid][tid] = 0;
}
__syncwarp();
prefix_sum(&size_psum[wid][0], arg->num_sets);
__syncwarp();
bool still_loop = true;
int slot_idx = 0;
int offset = 0;
int predicate;
for (int idx = tid; (idx < ((size_psum[wid][WARP_SIZE] > 0) ? (((size_psum[wid][WARP_SIZE] - 1) / WARP_SIZE + 1) * WARP_SIZE) : 0) && still_loop); idx += WARP_SIZE) {
predicate = 0;
if (idx < size_psum[wid][WARP_SIZE]) {
while (idx >= size_psum[wid][slot_idx + 1]) {
slot_idx++;
}
offset = idx - size_psum[wid][slot_idx];
bitarray32 lb = arg->g->vertex_label[arg->set1[slot_idx][offset]];
predicate = ((lb & arg->label) == lb) && (DIFF ^ bsearch_exist(arg->set2[slot_idx], arg->set2_size[slot_idx], arg->set1[slot_idx][offset]));
}
else {
slot_idx = arg->num_sets;
still_loop = false;
}
still_loop = __shfl_sync(0xFFFFFFFF, still_loop, 31);
predicate = __ballot_sync(0xFFFFFFFF, predicate);
bool cond = (arg->level < arg->pat->nnodes - 2 && predicate & (1 << tid));
graph_node_t res_tmp;
if (cond) {
res_tmp = arg->set1[slot_idx][offset];
}
int prev_idx = ((idx / WARP_SIZE == size_psum[wid][slot_idx] / WARP_SIZE) ? size_psum[wid][slot_idx] % WARP_SIZE : 0);
if (cond) {
arg->res[slot_idx][end_pos[wid][slot_idx] + __popc(predicate & ((1 << tid) - (1 << prev_idx)))] = res_tmp;
}
if (slot_idx < __shfl_down_sync(0xFFFFFFFF, slot_idx, 1)) {
end_pos[wid][slot_idx] += __popc(predicate & ((1 << (tid + 1)) - (1 << prev_idx)));
}
else if (tid == WARP_SIZE - 1 && slot_idx < arg->num_sets) {
end_pos[wid][slot_idx] += __popc(predicate & (0xFFFFFFFF - (1 << prev_idx) + 1));
}
}
__syncwarp();
if (tid < arg->num_sets) {
*(arg->res_size[tid]) = end_pos[wid][tid];
}
__syncwarp();
}
__forceinline__ __device__ void get_job(JobQueue* q, graph_node_t& cur_pos, graph_node_t& njobs) {
lock(&(q->mutex));
cur_pos = q->cur;
q->cur += JOB_CHUNK_SIZE;
if (q->cur > q->length) q->cur = q->length;
njobs = q->cur - cur_pos;
unlock(&(q->mutex));
}
__device__ void extend(Graph* g, Pattern* pat, CallStack* stk, JobQueue* q, pattern_node_t level) {
__shared__ Arg_t arg[NWARPS_PER_BLOCK];
int wid = threadIdx.x / WARP_SIZE;
if (level == 0) {
graph_node_t cur_job, njobs;
// TODO: change to warp
for (int k = 0; k < UNROLL_SIZE(level); k++) {
if (threadIdx.x % WARP_SIZE == 0) {
get_job(q, cur_job, njobs);
for (size_t i = 0; i < njobs; i++) {
for (int j = 0; j < 2; j++) {
stk->slot_storage[0][k][i + JOB_CHUNK_SIZE * j] = (q->q[cur_job + i].nodes)[j];
}
}
stk->slot_size[0][k] = njobs;
}
__syncwarp();
}
}
else {
arg[wid].g = g;
arg[wid].num_sets = UNROLL_SIZE(level);
int remaining = stk->slot_size[pat->rowptr[level - 1]][stk->uiter[level - 1]] - stk->iter[level - 1];
if (remaining >= 0 && UNROLL_SIZE(level) > remaining) {
arg[wid].num_sets = remaining;
}
for (int i = pat->rowptr[level]; i < pat->rowptr[level + 1]; i++) {
// compute ub based on pattern->partial
if (!LABELED) {
graph_node_t ub = ((i == pat->rowptr[level]) ? INT_MAX : -1);
if (pat->partial[i] != 0) {
// compute ub with nodes after start_level until previous level
for (pattern_node_t k = 1; k < level - 1; k++) {
if ((pat->partial[i] & (1 << (k + 1))) && ((i == pat->rowptr[level]) ^ (ub < path(stk, pat, k, stk->uiter[k + 1])))) ub = path(stk, pat, k, stk->uiter[k + 1]);
}
// compute ub with nodes in the previous level
for (pattern_node_t k = 0; k < arg[wid].num_sets; k++) {
arg[wid].ub[k] = ub;
int prev_level = (level > 1 ? 2 : 1);
int prev_iter = (level > 1 ? stk->uiter[1] : k);
// compute ub with the first few nodes before start_level
for (pattern_node_t j = 0; j < prev_level; j++) {
if ((pat->partial[i] & (1 << j)) && ((i == pat->rowptr[level]) ^ (arg[wid].ub[k] < path(stk, pat, j - 1, prev_iter)))) arg[wid].ub[k] = path(stk, pat, j - 1, prev_iter);
}
if ((pat->partial[i] & (1 << level)) && ((i == pat->rowptr[level]) ^ (arg[wid].ub[k] < path(stk, pat, level - 1, k)))) arg[wid].ub[k] = path(stk, pat, level - 1, k);
if (arg[wid].ub[k] == -1) arg[wid].ub[k] = INT_MAX;
}
}
else {
for (pattern_node_t k = 0; k < arg[wid].num_sets; k++) {
arg[wid].ub[k] = INT_MAX;
}
}
}
else {
for (pattern_node_t k = 0; k < arg[wid].num_sets; k++) {
arg[wid].ub[k] = INT_MAX;
}
}
arg[wid].label = pat->slot_labels[i];
if (pat->set_ops[i] & 0x20) {
for (graph_node_t k = 0; k < arg[wid].num_sets; k++) {
arg[wid].set2[k] = NULL;
arg[wid].set2_size[k] = 0;
if (!EDGE_INDUCED) {
graph_node_t t = path(stk, pat, level - 2, ((level > 1) ? stk->uiter[level - 1] : k));
arg[wid].set2[k] = &g->colidx[g->rowptr[t]];
arg[wid].set2_size[k] = (graph_node_t)(g->rowptr[t + 1] - g->rowptr[t]);
}
graph_node_t t = path(stk, pat, level - 1, k);
arg[wid].set1[k] = &g->colidx[g->rowptr[t]];
arg[wid].res[k] = &(stk->slot_storage[i][k][0]);
arg[wid].set1_size[k] = (graph_node_t)(g->rowptr[t + 1] - g->rowptr[t]);
arg[wid].res_size[k] = &(stk->slot_size[i][k]);
}
// arg[wid].cached = (level > 1);
arg[wid].level = level;
arg[wid].pat = pat;
compute_set<true>(&arg[wid]);
if(EDGE_INDUCED && !LABELED){
for (int j = level-1; j >= -1; j--)
{
int unrollIdx = threadIdx.x % WARP_SIZE;
if(unrollIdx < arg[wid].num_sets)
{
if(level==1){
arg[wid].set2[unrollIdx] = path_address(stk, pat, j, unrollIdx);
}
else{
if(j==level-1){
arg[wid].set2[unrollIdx] = path_address(stk, pat, j, unrollIdx);
arg[wid].set2_size[unrollIdx] = 1;
}
else{
if(j>0){
arg[wid].set2[unrollIdx] = path_address(stk, pat, j, stk->uiter[j + 1]);
}
else{
arg[wid].set2[unrollIdx] = path_address(stk, pat, j, stk->uiter[1]);
}
}
}
arg[wid].set1[unrollIdx] = &(stk->slot_storage[i][unrollIdx][0]);
arg[wid].res[unrollIdx] = &(stk->slot_storage[i][unrollIdx][0]);
arg[wid].set1_size[unrollIdx] = stk->slot_size[i][unrollIdx];
arg[wid].res_size[unrollIdx] = &(stk->slot_size[i][unrollIdx]);
arg[wid].set2_size[unrollIdx] = 1;
arg[wid].level = level;
arg[wid].pat = pat;
}
__syncwarp();
compute_set<true>(&arg[wid]);
}
}
if (!EDGE_INDUCED) {
for (pattern_node_t j = level - 3; j >= -1; j--) {
graph_node_t t = path(stk, pat, j, stk->uiter[(j > 0 ? j + 1 : 1)]);
for (graph_node_t k = 0; k < arg[wid].num_sets; k++) {
arg[wid].set1[k] = &(stk->slot_storage[i][k][0]);
arg[wid].set2[k] = &g->colidx[g->rowptr[t]];
arg[wid].res[k] = &(stk->slot_storage[i][k][0]);
arg[wid].set1_size[k] = stk->slot_size[i][k];
arg[wid].set2_size[k] = (graph_node_t)(g->rowptr[t + 1] - g->rowptr[t]);
arg[wid].res_size[k] = &(stk->slot_size[i][k]);
}
//arg[wid].cached = true;
arg[wid].level = level;
arg[wid].pat = pat;
compute_set<true>(&arg[wid]);
}
}
for (graph_node_t k = arg[wid].num_sets; k < UNROLL_SIZE(level); k++) stk->slot_size[i][k] = 0;
}
else {
pattern_node_t slot_idx = (pat->set_ops[i] & 0x1F);
if (pat->set_ops[i] & 0x40) { //INTE
for (graph_node_t k = 0; k < arg[wid].num_sets; k++) {
graph_node_t t = path(stk, pat, level - 1, k);
graph_node_t* neighbor = &g->colidx[g->rowptr[t]];
graph_node_t neighbor_size = (graph_node_t)(g->rowptr[t + 1] - g->rowptr[t]);
if (level > 1) {
arg[wid].set2[k] = &(stk->slot_storage[slot_idx][stk->uiter[level - 1]][0]);
arg[wid].set2_size[k] = stk->slot_size[slot_idx][stk->uiter[level - 1]];
}
else {
graph_node_t t = path(stk, pat, -1, k);
arg[wid].set2[k] = &g->colidx[g->rowptr[t]];
arg[wid].set2_size[k] = (graph_node_t)(g->rowptr[t + 1] - g->rowptr[t]);
}
arg[wid].set1[k] = neighbor;
arg[wid].set1_size[k] = neighbor_size;
arg[wid].res[k] = &(stk->slot_storage[i][k][0]);
arg[wid].res_size[k] = &(stk->slot_size[i][k]);
}
//arg[wid].cached = (level > 1);
arg[wid].level = level;
arg[wid].pat = pat;
compute_set<false>(&arg[wid]);
for (graph_node_t k = arg[wid].num_sets; k < UNROLL_SIZE(level); k++) stk->slot_size[i][k] = 0;
}
else { //DIFF
for (graph_node_t k = 0; k < arg[wid].num_sets; k++) {
graph_node_t* neighbor = NULL;
graph_node_t neighbor_size = 0;
if(EDGE_INDUCED && !LABELED){
neighbor = path_address(stk, pat, level - 1, k);
neighbor_size = 1;
}
if (!EDGE_INDUCED) {
graph_node_t t = path(stk, pat, level - 1, k);
neighbor = &g->colidx[g->rowptr[t]];
neighbor_size = (graph_node_t)(g->rowptr[t + 1] - g->rowptr[t]);
}
if (level > 1) {
arg[wid].set1[k] = &(stk->slot_storage[slot_idx][stk->uiter[level - 1]][0]);
arg[wid].set1_size[k] = stk->slot_size[slot_idx][stk->uiter[level - 1]];
}
else {
graph_node_t t = path(stk, pat, -1, k);
arg[wid].set1[k] = &g->colidx[g->rowptr[t]];
arg[wid].set1_size[k] = (graph_node_t)(g->rowptr[t + 1] - g->rowptr[t]);
}
arg[wid].set2[k] = neighbor;
arg[wid].set2_size[k] = neighbor_size;
arg[wid].res[k] = &(stk->slot_storage[i][k][0]);
arg[wid].res_size[k] = &(stk->slot_size[i][k]);
}
//arg[wid].cached = false;
arg[wid].level = level;
arg[wid].pat = pat;
compute_set<true>(&arg[wid]);
for (graph_node_t k = arg[wid].num_sets; k < UNROLL_SIZE(level); k++) stk->slot_size[i][k] = 0;
}
}
}
}
stk->iter[level] = 0;
stk->uiter[level] = 0;
}
__forceinline__ __device__ void respond_across_block(int level, CallStack* stk, Pattern* pat, StealingArgs* _stealing_args) {
if (level > 0 && level <= DETECT_LEVEL) {
if (threadIdx.x % WARP_SIZE == 0) {
int at_level = -1;
int left_task = 0;
for (int l = 0; l < level; l++) {
left_task = stk->slot_size[pat->rowptr[l]][stk->uiter[l]] - stk->iter[l] - stk->uiter[l + 1] - 1;
if (left_task > 0) {
at_level = l;
break;
}
}
if (at_level != -1) {
for (int b = 0; b < GRID_DIM; b++) {
if (b == blockIdx.x) continue;
if (atomicCAS(&(_stealing_args->global_mutex[b]), 0, 1) == 0) {
if (atomicAdd(&_stealing_args->idle_warps[b], 0) == 0xFFFFFFFF) {
__threadfence();
trans_layer(*stk, _stealing_args->global_callstack[b * NWARPS_PER_BLOCK], pat, at_level, INT_MAX);
__threadfence();
atomicSub(_stealing_args->idle_warps_count, NWARPS_PER_BLOCK);
atomicExch(&_stealing_args->idle_warps[b], 0);
atomicExch(&(_stealing_args->global_mutex[b]), 0);
break;
}
atomicExch(&(_stealing_args->global_mutex[b]), 0);
}
}
}
}
__syncwarp();
}
}
__device__ void match(Graph* g, Pattern* pat,
CallStack* stk, JobQueue* q, size_t* count, StealingArgs* _stealing_args) {
pattern_node_t& level = stk->level;
while (true) {
if (threadIdx.x % WARP_SIZE == 0) {
lock(&(_stealing_args->local_mutex[threadIdx.x / WARP_SIZE]));
}
__syncwarp();
if (level < pat->nnodes - 2) {
if (STEAL_ACROSS_BLOCK) {
respond_across_block(level, stk, pat, _stealing_args);
}
if (stk->uiter[level] == 0 && stk->slot_size[pat->rowptr[level]][0] == 0) {
extend(g, pat, stk, q, level);
if (level == 0 && stk->slot_size[0][0] == 0) {
if (threadIdx.x % WARP_SIZE == 0)
unlock(&(_stealing_args->local_mutex[threadIdx.x / WARP_SIZE]));
__syncwarp();
break;
}
}
if (stk->uiter[level] < UNROLL_SIZE(level)) {
if (stk->iter[level] < stk->slot_size[pat->rowptr[level]][stk->uiter[level]]) {
if (threadIdx.x % WARP_SIZE == 0)
level++;
__syncwarp();
}
else {
stk->slot_size[pat->rowptr[level]][stk->uiter[level]] = 0;
stk->iter[level] = 0;
if (threadIdx.x % WARP_SIZE == 0)
stk->uiter[level]++;
__syncwarp();
}
}
else {
stk->uiter[level] = 0;
if (level > 0) {
if (threadIdx.x % WARP_SIZE == 0)
level--;
if (threadIdx.x % WARP_SIZE == 0)
stk->iter[level] += UNROLL_SIZE(level + 1);
__syncwarp();
}
}
}
else if (level == pat->nnodes - 2) {
extend(g, pat, stk, q, level);
for (int j = 0; j < UNROLL_SIZE(level); j++) {
if (threadIdx.x % WARP_SIZE == 0) {
*count += stk->slot_size[pat->rowptr[level]][j];
}
__syncwarp();
stk->slot_size[pat->rowptr[level]][j] = 0;
}
stk->uiter[level] = 0;
if (threadIdx.x % WARP_SIZE == 0)
level--;
if (threadIdx.x % WARP_SIZE == 0)
stk->iter[level] += UNROLL_SIZE(level + 1);
__syncwarp();
}
//__syncwarp();
if (threadIdx.x % WARP_SIZE == 0)
unlock(&(_stealing_args->local_mutex[threadIdx.x / WARP_SIZE]));
__syncwarp();
}
}
__global__ void _parallel_match(Graph* dev_graph, Pattern* dev_pattern,
CallStack* dev_callstack, JobQueue* job_queue, size_t* res,
int* idle_warps, int* idle_warps_count, int* global_mutex) {
__shared__ Graph graph;
__shared__ Pattern pat;
__shared__ CallStack stk[NWARPS_PER_BLOCK];
__shared__ size_t count[NWARPS_PER_BLOCK];
__shared__ bool stealed[NWARPS_PER_BLOCK];
__shared__ int mutex_this_block[NWARPS_PER_BLOCK];
__shared__ StealingArgs stealing_args;
stealing_args.idle_warps = idle_warps;
stealing_args.idle_warps_count = idle_warps_count;
stealing_args.global_mutex = global_mutex;
stealing_args.local_mutex = mutex_this_block;
stealing_args.global_callstack = dev_callstack;
int global_tid = blockIdx.x * blockDim.x + threadIdx.x;
int global_wid = global_tid / WARP_SIZE;
int local_wid = threadIdx.x / WARP_SIZE;
if (threadIdx.x == 0) {
graph = *dev_graph;
pat = *dev_pattern;
}
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
stk[local_wid] = dev_callstack[global_wid];
}
__syncwarp();
auto start = clock64();
while (true) {
match(&graph, &pat, &stk[local_wid], job_queue, &count[local_wid], &stealing_args);
__syncwarp();
stealed[local_wid] = false;
if (STEAL_IN_BLOCK) {
if (threadIdx.x % WARP_SIZE == 0) {
stealed[local_wid] = trans_skt(stk, &stk[local_wid], &pat, &stealing_args);
}
__syncwarp();
}
if (STEAL_ACROSS_BLOCK) {
if (!stealed[local_wid]) {
__syncthreads();
if (threadIdx.x % WARP_SIZE == 0) {
atomicAdd(stealing_args.idle_warps_count, 1);
lock(&(stealing_args.global_mutex[blockIdx.x]));
atomicOr(&stealing_args.idle_warps[blockIdx.x], (1 << local_wid));
unlock(&(stealing_args.global_mutex[blockIdx.x]));
while ((atomicAdd(stealing_args.idle_warps_count, 0) < NWARPS_TOTAL) && (atomicAdd(&stealing_args.idle_warps[blockIdx.x], 0) & (1 << local_wid)));
if (atomicAdd(stealing_args.idle_warps_count, 0) < NWARPS_TOTAL) {
__threadfence();
if (local_wid == 0) {
stk[local_wid] = (stealing_args.global_callstack[blockIdx.x * NWARPS_PER_BLOCK]);
}
stealed[local_wid] = true;
}
else {
stealed[local_wid] = false;
}
}
__syncthreads();
}
}
if (!stealed[local_wid]) {
break;
}
}
auto stop = clock64();
if (threadIdx.x % WARP_SIZE == 0) {
res[global_wid] = count[local_wid];
// printf("%d\t%ld\t%d\t%d\n", blockIdx.x, stop - start, stealed[local_wid], local_wid);
//printf("%ld\n", stop - start);
}
// if(threadIdx.x % WARP_SIZE == 0)
// printf("%d\t%d\t%d\n", blockIdx.x, local_wid, mutex_this_block[local_wid]);
}
}
|
a48235b7a29e7df250014c01413e3cf31e4324a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// %(type)s and %(nneu)d must be replaced using Python string foramtting
#define NNEU %(nneu)d
__global__ void leaky_iaf(
int neu_num,
%(type)s dt,
int *spk,
%(type)s *V,
%(type)s *I,
%(type)s *Vt,
%(type)s *Vr,
%(type)s *R,
%(type)s *C)
{
int bid = blockIdx.x;
int nid = bid * NNEU + threadIdx.x;
%(type)s v,i,r,c;
if( nid < neu_num ){
v = V[nid];
i = I[nid];
r = R[nid];
c = C[nid];
// update v
%(type)s bh = exp( -dt/r/c );
v = v*bh + r*i*(1.0-bh);
// spike detection
spk[nid] = 0;
if( v >= Vt[nid] ){
v = Vr[nid];
spk[nid] = 1;
}
V[nid] = v;
}
return;
}
| a48235b7a29e7df250014c01413e3cf31e4324a6.cu | // %(type)s and %(nneu)d must be replaced using Python string foramtting
#define NNEU %(nneu)d
__global__ void leaky_iaf(
int neu_num,
%(type)s dt,
int *spk,
%(type)s *V,
%(type)s *I,
%(type)s *Vt,
%(type)s *Vr,
%(type)s *R,
%(type)s *C)
{
int bid = blockIdx.x;
int nid = bid * NNEU + threadIdx.x;
%(type)s v,i,r,c;
if( nid < neu_num ){
v = V[nid];
i = I[nid];
r = R[nid];
c = C[nid];
// update v
%(type)s bh = exp( -dt/r/c );
v = v*bh + r*i*(1.0-bh);
// spike detection
spk[nid] = 0;
if( v >= Vt[nid] ){
v = Vr[nid];
spk[nid] = 1;
}
V[nid] = v;
}
return;
}
|
7e3dd4ef81a9de5172d27f05b526afb0d38830b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <cutil.h>
#include "util.h"
#include "ref_2dhisto.h"
// Adding a bunch of constants for testing
#define BIN_COUNT (HISTO_WIDTH * HISTO_HEIGHT)
#define INPUT_SIZE (INPUT_WIDTH * INPUT_HEIGHT)
#define WARP_COUNT 6
#define THREADBLOCK_MEMORY (WARP_COUNT * BIN_COUNT)
__global__ void HistogramSharedCoalesced(uint32_t* d_input, uint8_t* d_bins);
__global__ void HistogramShared(uint32_t* d_input, uint8_t* d_bins);
__global__ void HistogramFastest(uint32_t* d_input, uint8_t* d_bins);
void opt_2dhisto(uint32_t* d_input, uint8_t* d_bins)
{
/* This function should only contain a call to the GPU
histogramming kernel. Any memory allocations and
transfers must be done outside this function */
hipMemset(d_bins, 0, BIN_COUNT * sizeof(uint8_t));
// Working shared histogram with coalesced access: GPU TIME = 4.224s
//dim3 sharedCoalescedDimGrid(13,1);
//dim3 sharedCoalescedDimBlock(257,1);
//HistogramSharedCoalesced<<<sharedCoalescedDimGrid, sharedCoalescedDimBlock>>>(d_input, d_bins);
// Working shared histogram: GPU TIME = 8.614s
//dim3 sharedDimGrid(15,1);
//dim3 sharedDimBlock(297,1);
//HistogramShared<<<sharedDimGrid, sharedDimBlock>>>(d_input, d_bins);
// Working histogram values, don't edit: GPU TIME = 3.569
dim3 fastestDimGrid(15,1);
dim3 fastestDimBlock(257,1);
hipLaunchKernelGGL(( HistogramFastest), dim3(fastestDimGrid), dim3(fastestDimBlock), 0, 0, d_input, d_bins);
}
// Custom function to atomicIncrement with uint8_t input
__device__ void atomicIncCustom(uint8_t* addr)
{
size_t addr_offset = (size_t)addr & 0x3;
size_t bits_shift = addr_offset * 8;
size_t mask = 0xff << bits_shift;
unsigned int* aligned_addr = (unsigned int *)(addr - addr_offset);
unsigned int old = *aligned_addr;
unsigned int stored;
unsigned int new_value;
do{
stored = old;
new_value = (stored >> bits_shift) & 0xff;
if(new_value < 255) {
new_value++;
} else {
return;
}
new_value = (new_value << bits_shift) | (stored & ~mask);
old = atomicCAS(aligned_addr, stored, new_value);
} while (stored != old);
}
// Custom function to perform atomicAdd with uint8_t input
__device__ void atomicAddCustom(uint8_t* addr, unsigned int val)
{
if (val == 0) return;
//Need the 4 byte aligned adress containing this uint8_t
size_t addr_offset = (size_t)addr & 0x3;
size_t bits_shift = addr_offset * 8;
size_t mask = 0xff << bits_shift;
unsigned int* aligned_addr = (unsigned int *)(addr - addr_offset);
unsigned int old = *aligned_addr;
unsigned int stored;
unsigned int new_value;
do{
stored = old;
new_value = (stored >> bits_shift) & 0xff;
if(new_value == 255) {
return;
} else {
new_value += val;
if (new_value > 255) new_value = 255;
}
new_value = (new_value << bits_shift) | (stored & ~mask);
old = atomicCAS(aligned_addr, stored, new_value);
} while (stored != old);
}
// Trying new shared solution
__global__ void HistogramSharedCoalesced(uint32_t* d_input, uint8_t* d_bins)
{
// Shared partial histogram computation (using coalesced access)
const int globalTid = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
__shared__ uint8_t s_Hist[THREADBLOCK_MEMORY];
const int elsPerThread = INPUT_SIZE / numThreads;
uint32_t idx;
#pragma unroll
for (int i = threadIdx.x; i < BIN_COUNT; i += blockDim.x) {
s_Hist[i] = 0;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elsPerThread; i++) {
idx = globalTid * elsPerThread + i;
atomicIncCustom(&s_Hist[d_input[idx]]);
}
__syncthreads();
#pragma unroll
for (int i = threadIdx.x; i < BIN_COUNT; i += blockDim.x) {
atomicAddCustom(&d_bins[i],s_Hist[i]);
}
}
// Best Shared Solution So Far
__global__ void HistogramShared(uint32_t* d_input, uint8_t* d_bins)
{
// Shared partial histogram computation (based on slides)
const int globalTid = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
__shared__ uint8_t s_Hist[THREADBLOCK_MEMORY];
for (int i = threadIdx.x; i < BIN_COUNT; i += blockDim.x) {
s_Hist[i] = 0;
}
__syncthreads();
for (int i = globalTid; i < INPUT_SIZE; i += numThreads) {
atomicIncCustom(&s_Hist[d_input[i]]);
}
__syncthreads();
for (int i = threadIdx.x; i < BIN_COUNT; i += blockDim.x) {
atomicAddCustom(&d_bins[i],s_Hist[i]);
}
}
// Best Solution So Far
__global__ void HistogramFastest(uint32_t* d_input, uint8_t* d_bins)
{
// Computing the histogram using strided access to the global array
int globalTid = threadIdx.x + blockIdx.x * blockDim.x;
int numThreads = blockDim.x * gridDim.x;
// start at the global id of the thread and increment by the total number of threads
// until all of the input has been computed
for (int i = globalTid; i < INPUT_SIZE; i += numThreads) {
atomicIncCustom(&d_bins[d_input[i]]);
}
}
/* Include below the implementation of any other functions you need */
// Allocate memory on GPU for arrays and copy arrays to GPU
void opt_2dhisto_setup(uint32_t*& d_input, uint32_t** input, uint8_t*& d_bins, uint8_t* kernel_bins) {
// allocate memory on GPU
hipMalloc((void**)&d_input, INPUT_WIDTH * INPUT_HEIGHT * sizeof(uint32_t));
hipMalloc((void**)&d_bins, HISTO_WIDTH * HISTO_HEIGHT * sizeof(uint8_t));
// copy arrays from CPU to GPU
uint32_t* d_pointer = d_input;
for (int i=0; i<INPUT_HEIGHT; i++) {
hipMemcpy(d_pointer, input[i], INPUT_WIDTH * sizeof(uint32_t), hipMemcpyHostToDevice);
d_pointer += INPUT_WIDTH;
}
hipMemcpy(d_bins, kernel_bins, HISTO_WIDTH * HISTO_HEIGHT * sizeof(uint8_t), hipMemcpyHostToDevice);
}
// Copy bins from GPU to CPU and free memory on GPU
void opt_2dhisto_teardown(uint32_t*& d_input, uint8_t*& d_bins, uint8_t* kernel_bins) {
hipMemcpy(kernel_bins, d_bins, HISTO_WIDTH * HISTO_HEIGHT * sizeof(uint8_t), hipMemcpyDeviceToHost);
hipFree(d_input);
hipFree(d_bins);
}
| 7e3dd4ef81a9de5172d27f05b526afb0d38830b6.cu | #include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <cutil.h>
#include "util.h"
#include "ref_2dhisto.h"
// Adding a bunch of constants for testing
#define BIN_COUNT (HISTO_WIDTH * HISTO_HEIGHT)
#define INPUT_SIZE (INPUT_WIDTH * INPUT_HEIGHT)
#define WARP_COUNT 6
#define THREADBLOCK_MEMORY (WARP_COUNT * BIN_COUNT)
__global__ void HistogramSharedCoalesced(uint32_t* d_input, uint8_t* d_bins);
__global__ void HistogramShared(uint32_t* d_input, uint8_t* d_bins);
__global__ void HistogramFastest(uint32_t* d_input, uint8_t* d_bins);
void opt_2dhisto(uint32_t* d_input, uint8_t* d_bins)
{
/* This function should only contain a call to the GPU
histogramming kernel. Any memory allocations and
transfers must be done outside this function */
cudaMemset(d_bins, 0, BIN_COUNT * sizeof(uint8_t));
// Working shared histogram with coalesced access: GPU TIME = 4.224s
//dim3 sharedCoalescedDimGrid(13,1);
//dim3 sharedCoalescedDimBlock(257,1);
//HistogramSharedCoalesced<<<sharedCoalescedDimGrid, sharedCoalescedDimBlock>>>(d_input, d_bins);
// Working shared histogram: GPU TIME = 8.614s
//dim3 sharedDimGrid(15,1);
//dim3 sharedDimBlock(297,1);
//HistogramShared<<<sharedDimGrid, sharedDimBlock>>>(d_input, d_bins);
// Working histogram values, don't edit: GPU TIME = 3.569
dim3 fastestDimGrid(15,1);
dim3 fastestDimBlock(257,1);
HistogramFastest<<<fastestDimGrid, fastestDimBlock>>>(d_input, d_bins);
}
// Custom function to atomicIncrement with uint8_t input
__device__ void atomicIncCustom(uint8_t* addr)
{
size_t addr_offset = (size_t)addr & 0x3;
size_t bits_shift = addr_offset * 8;
size_t mask = 0xff << bits_shift;
unsigned int* aligned_addr = (unsigned int *)(addr - addr_offset);
unsigned int old = *aligned_addr;
unsigned int stored;
unsigned int new_value;
do{
stored = old;
new_value = (stored >> bits_shift) & 0xff;
if(new_value < 255) {
new_value++;
} else {
return;
}
new_value = (new_value << bits_shift) | (stored & ~mask);
old = atomicCAS(aligned_addr, stored, new_value);
} while (stored != old);
}
// Custom function to perform atomicAdd with uint8_t input
__device__ void atomicAddCustom(uint8_t* addr, unsigned int val)
{
if (val == 0) return;
//Need the 4 byte aligned adress containing this uint8_t
size_t addr_offset = (size_t)addr & 0x3;
size_t bits_shift = addr_offset * 8;
size_t mask = 0xff << bits_shift;
unsigned int* aligned_addr = (unsigned int *)(addr - addr_offset);
unsigned int old = *aligned_addr;
unsigned int stored;
unsigned int new_value;
do{
stored = old;
new_value = (stored >> bits_shift) & 0xff;
if(new_value == 255) {
return;
} else {
new_value += val;
if (new_value > 255) new_value = 255;
}
new_value = (new_value << bits_shift) | (stored & ~mask);
old = atomicCAS(aligned_addr, stored, new_value);
} while (stored != old);
}
// Trying new shared solution
__global__ void HistogramSharedCoalesced(uint32_t* d_input, uint8_t* d_bins)
{
// Shared partial histogram computation (using coalesced access)
const int globalTid = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
__shared__ uint8_t s_Hist[THREADBLOCK_MEMORY];
const int elsPerThread = INPUT_SIZE / numThreads;
uint32_t idx;
#pragma unroll
for (int i = threadIdx.x; i < BIN_COUNT; i += blockDim.x) {
s_Hist[i] = 0;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elsPerThread; i++) {
idx = globalTid * elsPerThread + i;
atomicIncCustom(&s_Hist[d_input[idx]]);
}
__syncthreads();
#pragma unroll
for (int i = threadIdx.x; i < BIN_COUNT; i += blockDim.x) {
atomicAddCustom(&d_bins[i],s_Hist[i]);
}
}
// Best Shared Solution So Far
__global__ void HistogramShared(uint32_t* d_input, uint8_t* d_bins)
{
// Shared partial histogram computation (based on slides)
const int globalTid = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
__shared__ uint8_t s_Hist[THREADBLOCK_MEMORY];
for (int i = threadIdx.x; i < BIN_COUNT; i += blockDim.x) {
s_Hist[i] = 0;
}
__syncthreads();
for (int i = globalTid; i < INPUT_SIZE; i += numThreads) {
atomicIncCustom(&s_Hist[d_input[i]]);
}
__syncthreads();
for (int i = threadIdx.x; i < BIN_COUNT; i += blockDim.x) {
atomicAddCustom(&d_bins[i],s_Hist[i]);
}
}
// Best Solution So Far
__global__ void HistogramFastest(uint32_t* d_input, uint8_t* d_bins)
{
// Computing the histogram using strided access to the global array
int globalTid = threadIdx.x + blockIdx.x * blockDim.x;
int numThreads = blockDim.x * gridDim.x;
// start at the global id of the thread and increment by the total number of threads
// until all of the input has been computed
for (int i = globalTid; i < INPUT_SIZE; i += numThreads) {
atomicIncCustom(&d_bins[d_input[i]]);
}
}
/* Include below the implementation of any other functions you need */
// Allocate memory on GPU for arrays and copy arrays to GPU
void opt_2dhisto_setup(uint32_t*& d_input, uint32_t** input, uint8_t*& d_bins, uint8_t* kernel_bins) {
// allocate memory on GPU
cudaMalloc((void**)&d_input, INPUT_WIDTH * INPUT_HEIGHT * sizeof(uint32_t));
cudaMalloc((void**)&d_bins, HISTO_WIDTH * HISTO_HEIGHT * sizeof(uint8_t));
// copy arrays from CPU to GPU
uint32_t* d_pointer = d_input;
for (int i=0; i<INPUT_HEIGHT; i++) {
cudaMemcpy(d_pointer, input[i], INPUT_WIDTH * sizeof(uint32_t), cudaMemcpyHostToDevice);
d_pointer += INPUT_WIDTH;
}
cudaMemcpy(d_bins, kernel_bins, HISTO_WIDTH * HISTO_HEIGHT * sizeof(uint8_t), cudaMemcpyHostToDevice);
}
// Copy bins from GPU to CPU and free memory on GPU
void opt_2dhisto_teardown(uint32_t*& d_input, uint8_t*& d_bins, uint8_t* kernel_bins) {
cudaMemcpy(kernel_bins, d_bins, HISTO_WIDTH * HISTO_HEIGHT * sizeof(uint8_t), cudaMemcpyDeviceToHost);
cudaFree(d_input);
cudaFree(d_bins);
}
|
51460813e99976cdd345a82e12cd38d17048fbc7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <float.h>
#include <math.h>
#include <time.h>
#include "../../constants.h"
#define N_RADIUS 4
#define N_THREADS_PER_BLOCK_DIM 8
__global__ void target_inner_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta
) {
const llint k = z3 + blockIdx.x * blockDim.x + threadIdx.x;
const llint j = y3 + blockIdx.y * blockDim.y + threadIdx.y;
const llint i = x3 + blockIdx.z * blockDim.z + threadIdx.z;
if (i > x4-1 || j > y4-1 || k > z4-1) { return; }
float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)]
, __fmaf_rn(coefx_1, __fadd_rn(u[IDX3_l(i+1,j,k)],u[IDX3_l(i-1,j,k)])
, __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)])
, __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)])
, __fmaf_rn(coefx_2, __fadd_rn(u[IDX3_l(i+2,j,k)],u[IDX3_l(i-2,j,k)])
, __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)])
, __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)])
, __fmaf_rn(coefx_3, __fadd_rn(u[IDX3_l(i+3,j,k)],u[IDX3_l(i-3,j,k)])
, __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)])
, __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)])
, __fmaf_rn(coefx_4, __fadd_rn(u[IDX3_l(i+4,j,k)],u[IDX3_l(i-4,j,k)])
, __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)])
, __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)])
)))))))))))));
v[IDX3_l(i,j,k)] = __fmaf_rn(2.f, u[IDX3_l(i,j,k)],
__fmaf_rn(vp[IDX3(i,j,k)], lap, -v[IDX3_l(i,j,k)])
);
}
__global__ void target_pml_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
float *__restrict__ phi, const float *__restrict__ eta
) {
const llint k = blockIdx.x * blockDim.x + threadIdx.x;
const llint j = blockIdx.y * blockDim.y + threadIdx.y;
const llint i = blockIdx.z * blockDim.z + threadIdx.z;
if (i >= x3 && i < x4 && j >= y3 && j < y4 && k >= z3 && k < z4) { return; }
if (i > nx-1 || j > ny-1 || k > nz-1) { return; }
float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)]
, __fmaf_rn(coefx_1, __fadd_rn(u[IDX3_l(i+1,j,k)],u[IDX3_l(i-1,j,k)])
, __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)])
, __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)])
, __fmaf_rn(coefx_2, __fadd_rn(u[IDX3_l(i+2,j,k)],u[IDX3_l(i-2,j,k)])
, __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)])
, __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)])
, __fmaf_rn(coefx_3, __fadd_rn(u[IDX3_l(i+3,j,k)],u[IDX3_l(i-3,j,k)])
, __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)])
, __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)])
, __fmaf_rn(coefx_4, __fadd_rn(u[IDX3_l(i+4,j,k)],u[IDX3_l(i-4,j,k)])
, __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)])
, __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)])
)))))))))))));
const float s_eta_c = eta[IDX3_eta1(i,j,k)];
v[IDX3_l(i,j,k)] = __fdiv_rn(
__fmaf_rn(
__fmaf_rn(2.f, s_eta_c,
__fsub_rn(2.f,
__fmul_rn(s_eta_c, s_eta_c)
)
),
u[IDX3_l(i,j,k)],
__fmaf_rn(
vp[IDX3(i,j,k)],
__fadd_rn(lap, phi[IDX3(i,j,k)]),
-v[IDX3_l(i,j,k)]
)
),
__fmaf_rn(2.f, s_eta_c, 1.f)
);
phi[IDX3(i,j,k)] = __fdiv_rn(
__fsub_rn(
phi[IDX3(i,j,k)],
__fmaf_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i+1,j,k)], eta[IDX3_eta1(i-1,j,k)]),
__fsub_rn(u[IDX3_l(i+1,j,k)], u[IDX3_l(i-1,j,k)])
), hdx_2,
__fmaf_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j+1,k)], eta[IDX3_eta1(i,j-1,k)]),
__fsub_rn(u[IDX3_l(i,j+1,k)], u[IDX3_l(i,j-1,k)])
), hdy_2,
__fmul_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j,k+1)], eta[IDX3_eta1(i,j,k-1)]),
__fsub_rn(u[IDX3_l(i,j,k+1)], u[IDX3_l(i,j,k-1)])
),
hdz_2)
))
)
,
__fadd_rn(1.f, s_eta_c)
);
}
__global__ void kernel_add_source_kernel(float *g_u, llint idx, float source) {
g_u[idx] += source;
}
extern "C" void target(
uint nsteps, double *time_kernel,
llint nx, llint ny, llint nz,
llint x1, llint x2, llint x3, llint x4, llint x5, llint x6,
llint y1, llint y2, llint y3, llint y4, llint y5, llint y6,
llint z1, llint z2, llint z3, llint z4, llint z5, llint z6,
llint lx, llint ly, llint lz,
llint sx, llint sy, llint sz,
float hdx_2, float hdy_2, float hdz_2,
const float *__restrict__ coefx, const float *__restrict__ coefy, const float *__restrict__ coefz,
float *__restrict__ u, const float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta, const float *__restrict__ source
) {
struct timespec start, end;
const llint size_u = (nx + 2 * lx) * (ny + 2 * ly) * (nz + 2 * lz);
const llint size_v = size_u;
const llint size_phi = nx*ny*nz;
const llint size_vp = size_phi;
const llint size_eta = (nx+2)*(ny+2)*(nz+2);
float *d_u, *d_v, *d_vp, *d_phi, *d_eta;
hipMalloc(&d_u, sizeof(float) * size_u);
hipMalloc(&d_v, sizeof(float) * size_v);
hipMalloc(&d_vp, sizeof(float) * size_vp);
hipMalloc(&d_phi, sizeof(float) * size_phi);
hipMalloc(&d_eta, sizeof(float) * size_eta);
hipMemcpy(d_u, u, sizeof(float) * size_u, hipMemcpyHostToDevice);
hipMemcpy(d_v, v, sizeof(float) * size_v, hipMemcpyHostToDevice);
hipMemcpy(d_vp, vp, sizeof(float) * size_vp, hipMemcpyHostToDevice);
hipMemcpy(d_phi, phi, sizeof(float) * size_phi, hipMemcpyHostToDevice);
hipMemcpy(d_eta, eta, sizeof(float) * size_eta, hipMemcpyHostToDevice);
dim3 threadsPerBlock(N_THREADS_PER_BLOCK_DIM, N_THREADS_PER_BLOCK_DIM, N_THREADS_PER_BLOCK_DIM);
int num_streams = 2;
hipStream_t streams[num_streams];
for (int i = 0; i < num_streams; i++) {
hipStreamCreate(&(streams[i]));
}
const uint npo = 100;
for (uint istep = 1; istep <= nsteps; ++istep) {
clock_gettime(CLOCK_REALTIME, &start);
dim3 n_block_inner(
(z4-z3+N_THREADS_PER_BLOCK_DIM-1) / N_THREADS_PER_BLOCK_DIM,
(y4-y3+N_THREADS_PER_BLOCK_DIM-1) / N_THREADS_PER_BLOCK_DIM,
(x4-x3+N_THREADS_PER_BLOCK_DIM-1) / N_THREADS_PER_BLOCK_DIM);
hipLaunchKernelGGL(( target_inner_3d_kernel), dim3(n_block_inner), dim3(threadsPerBlock), 0, streams[0],
nx,ny,nz,
x3,x4,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_pml(
(nz+N_THREADS_PER_BLOCK_DIM-1) / N_THREADS_PER_BLOCK_DIM,
(nx+N_THREADS_PER_BLOCK_DIM-1) / N_THREADS_PER_BLOCK_DIM,
(ny+N_THREADS_PER_BLOCK_DIM-1) / N_THREADS_PER_BLOCK_DIM);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_pml), dim3(threadsPerBlock), 0, streams[1],
nx,ny,nz,
x3,x4,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
for (int i = 0; i < num_streams; i++) {
hipStreamSynchronize(streams[i]);
}
hipLaunchKernelGGL(( kernel_add_source_kernel), dim3(1), dim3(1), 0, 0, d_v, IDX3_l(sx,sy,sz), source[istep]);
clock_gettime(CLOCK_REALTIME, &end);
*time_kernel += (end.tv_sec - start.tv_sec) +
(double)(end.tv_nsec - start.tv_nsec) / 1.0e9;
float *t = d_u;
d_u = d_v;
d_v = t;
// Print out
if (istep % npo == 0) {
printf("time step %u / %u\n", istep, nsteps);
}
}
for (int i = 0; i < num_streams; i++) {
hipStreamDestroy(streams[i]);
}
hipMemcpy(u, d_u, sizeof(float) * size_u, hipMemcpyDeviceToHost);
hipFree(d_u);
hipFree(d_v);
hipFree(d_vp);
hipFree(d_phi);
hipFree(d_eta);
}
| 51460813e99976cdd345a82e12cd38d17048fbc7.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <float.h>
#include <math.h>
#include <time.h>
#include "../../constants.h"
#define N_RADIUS 4
#define N_THREADS_PER_BLOCK_DIM 8
__global__ void target_inner_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta
) {
const llint k = z3 + blockIdx.x * blockDim.x + threadIdx.x;
const llint j = y3 + blockIdx.y * blockDim.y + threadIdx.y;
const llint i = x3 + blockIdx.z * blockDim.z + threadIdx.z;
if (i > x4-1 || j > y4-1 || k > z4-1) { return; }
float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)]
, __fmaf_rn(coefx_1, __fadd_rn(u[IDX3_l(i+1,j,k)],u[IDX3_l(i-1,j,k)])
, __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)])
, __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)])
, __fmaf_rn(coefx_2, __fadd_rn(u[IDX3_l(i+2,j,k)],u[IDX3_l(i-2,j,k)])
, __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)])
, __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)])
, __fmaf_rn(coefx_3, __fadd_rn(u[IDX3_l(i+3,j,k)],u[IDX3_l(i-3,j,k)])
, __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)])
, __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)])
, __fmaf_rn(coefx_4, __fadd_rn(u[IDX3_l(i+4,j,k)],u[IDX3_l(i-4,j,k)])
, __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)])
, __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)])
)))))))))))));
v[IDX3_l(i,j,k)] = __fmaf_rn(2.f, u[IDX3_l(i,j,k)],
__fmaf_rn(vp[IDX3(i,j,k)], lap, -v[IDX3_l(i,j,k)])
);
}
__global__ void target_pml_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
float *__restrict__ phi, const float *__restrict__ eta
) {
const llint k = blockIdx.x * blockDim.x + threadIdx.x;
const llint j = blockIdx.y * blockDim.y + threadIdx.y;
const llint i = blockIdx.z * blockDim.z + threadIdx.z;
if (i >= x3 && i < x4 && j >= y3 && j < y4 && k >= z3 && k < z4) { return; }
if (i > nx-1 || j > ny-1 || k > nz-1) { return; }
float lap = __fmaf_rn(coef0, u[IDX3_l(i,j,k)]
, __fmaf_rn(coefx_1, __fadd_rn(u[IDX3_l(i+1,j,k)],u[IDX3_l(i-1,j,k)])
, __fmaf_rn(coefy_1, __fadd_rn(u[IDX3_l(i,j+1,k)],u[IDX3_l(i,j-1,k)])
, __fmaf_rn(coefz_1, __fadd_rn(u[IDX3_l(i,j,k+1)],u[IDX3_l(i,j,k-1)])
, __fmaf_rn(coefx_2, __fadd_rn(u[IDX3_l(i+2,j,k)],u[IDX3_l(i-2,j,k)])
, __fmaf_rn(coefy_2, __fadd_rn(u[IDX3_l(i,j+2,k)],u[IDX3_l(i,j-2,k)])
, __fmaf_rn(coefz_2, __fadd_rn(u[IDX3_l(i,j,k+2)],u[IDX3_l(i,j,k-2)])
, __fmaf_rn(coefx_3, __fadd_rn(u[IDX3_l(i+3,j,k)],u[IDX3_l(i-3,j,k)])
, __fmaf_rn(coefy_3, __fadd_rn(u[IDX3_l(i,j+3,k)],u[IDX3_l(i,j-3,k)])
, __fmaf_rn(coefz_3, __fadd_rn(u[IDX3_l(i,j,k+3)],u[IDX3_l(i,j,k-3)])
, __fmaf_rn(coefx_4, __fadd_rn(u[IDX3_l(i+4,j,k)],u[IDX3_l(i-4,j,k)])
, __fmaf_rn(coefy_4, __fadd_rn(u[IDX3_l(i,j+4,k)],u[IDX3_l(i,j-4,k)])
, __fmul_rn(coefz_4, __fadd_rn(u[IDX3_l(i,j,k+4)],u[IDX3_l(i,j,k-4)])
)))))))))))));
const float s_eta_c = eta[IDX3_eta1(i,j,k)];
v[IDX3_l(i,j,k)] = __fdiv_rn(
__fmaf_rn(
__fmaf_rn(2.f, s_eta_c,
__fsub_rn(2.f,
__fmul_rn(s_eta_c, s_eta_c)
)
),
u[IDX3_l(i,j,k)],
__fmaf_rn(
vp[IDX3(i,j,k)],
__fadd_rn(lap, phi[IDX3(i,j,k)]),
-v[IDX3_l(i,j,k)]
)
),
__fmaf_rn(2.f, s_eta_c, 1.f)
);
phi[IDX3(i,j,k)] = __fdiv_rn(
__fsub_rn(
phi[IDX3(i,j,k)],
__fmaf_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i+1,j,k)], eta[IDX3_eta1(i-1,j,k)]),
__fsub_rn(u[IDX3_l(i+1,j,k)], u[IDX3_l(i-1,j,k)])
), hdx_2,
__fmaf_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j+1,k)], eta[IDX3_eta1(i,j-1,k)]),
__fsub_rn(u[IDX3_l(i,j+1,k)], u[IDX3_l(i,j-1,k)])
), hdy_2,
__fmul_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j,k+1)], eta[IDX3_eta1(i,j,k-1)]),
__fsub_rn(u[IDX3_l(i,j,k+1)], u[IDX3_l(i,j,k-1)])
),
hdz_2)
))
)
,
__fadd_rn(1.f, s_eta_c)
);
}
__global__ void kernel_add_source_kernel(float *g_u, llint idx, float source) {
g_u[idx] += source;
}
extern "C" void target(
uint nsteps, double *time_kernel,
llint nx, llint ny, llint nz,
llint x1, llint x2, llint x3, llint x4, llint x5, llint x6,
llint y1, llint y2, llint y3, llint y4, llint y5, llint y6,
llint z1, llint z2, llint z3, llint z4, llint z5, llint z6,
llint lx, llint ly, llint lz,
llint sx, llint sy, llint sz,
float hdx_2, float hdy_2, float hdz_2,
const float *__restrict__ coefx, const float *__restrict__ coefy, const float *__restrict__ coefz,
float *__restrict__ u, const float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta, const float *__restrict__ source
) {
struct timespec start, end;
const llint size_u = (nx + 2 * lx) * (ny + 2 * ly) * (nz + 2 * lz);
const llint size_v = size_u;
const llint size_phi = nx*ny*nz;
const llint size_vp = size_phi;
const llint size_eta = (nx+2)*(ny+2)*(nz+2);
float *d_u, *d_v, *d_vp, *d_phi, *d_eta;
cudaMalloc(&d_u, sizeof(float) * size_u);
cudaMalloc(&d_v, sizeof(float) * size_v);
cudaMalloc(&d_vp, sizeof(float) * size_vp);
cudaMalloc(&d_phi, sizeof(float) * size_phi);
cudaMalloc(&d_eta, sizeof(float) * size_eta);
cudaMemcpy(d_u, u, sizeof(float) * size_u, cudaMemcpyHostToDevice);
cudaMemcpy(d_v, v, sizeof(float) * size_v, cudaMemcpyHostToDevice);
cudaMemcpy(d_vp, vp, sizeof(float) * size_vp, cudaMemcpyHostToDevice);
cudaMemcpy(d_phi, phi, sizeof(float) * size_phi, cudaMemcpyHostToDevice);
cudaMemcpy(d_eta, eta, sizeof(float) * size_eta, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(N_THREADS_PER_BLOCK_DIM, N_THREADS_PER_BLOCK_DIM, N_THREADS_PER_BLOCK_DIM);
int num_streams = 2;
cudaStream_t streams[num_streams];
for (int i = 0; i < num_streams; i++) {
cudaStreamCreate(&(streams[i]));
}
const uint npo = 100;
for (uint istep = 1; istep <= nsteps; ++istep) {
clock_gettime(CLOCK_REALTIME, &start);
dim3 n_block_inner(
(z4-z3+N_THREADS_PER_BLOCK_DIM-1) / N_THREADS_PER_BLOCK_DIM,
(y4-y3+N_THREADS_PER_BLOCK_DIM-1) / N_THREADS_PER_BLOCK_DIM,
(x4-x3+N_THREADS_PER_BLOCK_DIM-1) / N_THREADS_PER_BLOCK_DIM);
target_inner_3d_kernel<<<n_block_inner, threadsPerBlock, 0, streams[0]>>>(
nx,ny,nz,
x3,x4,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_pml(
(nz+N_THREADS_PER_BLOCK_DIM-1) / N_THREADS_PER_BLOCK_DIM,
(nx+N_THREADS_PER_BLOCK_DIM-1) / N_THREADS_PER_BLOCK_DIM,
(ny+N_THREADS_PER_BLOCK_DIM-1) / N_THREADS_PER_BLOCK_DIM);
target_pml_3d_kernel<<<n_block_pml, threadsPerBlock, 0, streams[1]>>>(
nx,ny,nz,
x3,x4,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
for (int i = 0; i < num_streams; i++) {
cudaStreamSynchronize(streams[i]);
}
kernel_add_source_kernel<<<1, 1>>>(d_v, IDX3_l(sx,sy,sz), source[istep]);
clock_gettime(CLOCK_REALTIME, &end);
*time_kernel += (end.tv_sec - start.tv_sec) +
(double)(end.tv_nsec - start.tv_nsec) / 1.0e9;
float *t = d_u;
d_u = d_v;
d_v = t;
// Print out
if (istep % npo == 0) {
printf("time step %u / %u\n", istep, nsteps);
}
}
for (int i = 0; i < num_streams; i++) {
cudaStreamDestroy(streams[i]);
}
cudaMemcpy(u, d_u, sizeof(float) * size_u, cudaMemcpyDeviceToHost);
cudaFree(d_u);
cudaFree(d_v);
cudaFree(d_vp);
cudaFree(d_phi);
cudaFree(d_eta);
}
|
1e3b71e4555ab693f3c8f88563e599a10389e68c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <time.h>
// nvcc -o PasswordCrackingByCudaStatic PasswordCrackingByCudaStatic.cu
__device__ int is_a_match(char *check) {
char password1[]="AN9810";
char password2[]="JI2205";
char password3[]="TM5298";
char password4[]="UN6085";
char *c1 = check;
char *c2 = check;
char *c3 = check;
char *c4 = check;
char *pw1 = password1;
char *pw2 = password2;
char *pw3 = password3;
char *pw4 = password4;
while(*c1 == *pw1){
if(*c1 == '\0'){
return 1;
}
c1++;
pw1++;
}
while(*c2 == *pw2){
if(*c2 == '\0'){
return 1;
}
c2++;
pw2++;
}
while(*c3 == *pw3){
if(*c3 == '\0'){
return 1;
}
c3++;
pw3++;
}
while(*c4 == *pw4){
if(*c4 == '\0'){
return 1;
}
c4++;
pw4++;
}
return 0;
}
__global__ void kernel() {
//char *alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
char alpha[26] = {'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'};
//alphabet[27] = '\0';
char num[10] = {'0','1','2','3','4','5','6','7','8','9'};
//numbers[11] = '\0';
char check[7];
check[6] = '\0';
int i, j, k, l;
for(i=0;i<10;i++){
for(j=0; j<10; j++){
for(k=0; k<10; k++){
for(l=0; l<10; l++){
check[0] = alpha[blockIdx.x];
check[1] = alpha[threadIdx.x];
check[2] = num[i];
check[3] = num[j];
check[4] = num[k];
check[5] = num[l];
if(is_a_match(check)){
printf("Password successfully cracked: %s\n", check);
}
//else {
//printf("tried: %s\n", check);
//}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 )
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
//Calculating time
int main(int argc, char *argv[])
{
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipLaunchKernelGGL(( kernel) , dim3(26), dim3(26), 0, 0, );
hipDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
| 1e3b71e4555ab693f3c8f88563e599a10389e68c.cu | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
// nvcc -o PasswordCrackingByCudaStatic PasswordCrackingByCudaStatic.cu
__device__ int is_a_match(char *check) {
char password1[]="AN9810";
char password2[]="JI2205";
char password3[]="TM5298";
char password4[]="UN6085";
char *c1 = check;
char *c2 = check;
char *c3 = check;
char *c4 = check;
char *pw1 = password1;
char *pw2 = password2;
char *pw3 = password3;
char *pw4 = password4;
while(*c1 == *pw1){
if(*c1 == '\0'){
return 1;
}
c1++;
pw1++;
}
while(*c2 == *pw2){
if(*c2 == '\0'){
return 1;
}
c2++;
pw2++;
}
while(*c3 == *pw3){
if(*c3 == '\0'){
return 1;
}
c3++;
pw3++;
}
while(*c4 == *pw4){
if(*c4 == '\0'){
return 1;
}
c4++;
pw4++;
}
return 0;
}
__global__ void kernel() {
//char *alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
char alpha[26] = {'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'};
//alphabet[27] = '\0';
char num[10] = {'0','1','2','3','4','5','6','7','8','9'};
//numbers[11] = '\0';
char check[7];
check[6] = '\0';
int i, j, k, l;
for(i=0;i<10;i++){
for(j=0; j<10; j++){
for(k=0; k<10; k++){
for(l=0; l<10; l++){
check[0] = alpha[blockIdx.x];
check[1] = alpha[threadIdx.x];
check[2] = num[i];
check[3] = num[j];
check[4] = num[k];
check[5] = num[l];
if(is_a_match(check)){
printf("Password successfully cracked: %s\n", check);
}
//else {
//printf("tried: %s\n", check);
//}
}
}
}
}
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 )
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
//Calculating time
int main(int argc, char *argv[])
{
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26, 26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
3ce1b460e733531b114b22a19ea2046e574e6a4f.hip | // !!! This is a file automatically generated by hipify!!!
#include "HolidaySplitGPU.h"
HolidaySplitGPU::HolidaySplitGPU()
{
}
HolidaySplitGPU::~HolidaySplitGPU()
{
}
int HolidaySplitGPU::Init(Holiday_LayerParameter& inputparam, HolidayNetResource<float> *pNetResource)
{
pNetResourceGpu = (HolidayNetResourceGpu *)pNetResource->pNetResourceGpu;
//bottom_data_size = inputparam.bottom_data_size;
int index = inputparam.bottom_index(0);
bottom_data_size.resize(1);
bottom_data_size[0] = pNetResource->feature_vector_size[index];
top_data_size.resize(inputparam.top_index().size());
for (int i = 0; i < inputparam.top_index().size(); i++)
{
top_data_size[i] = bottom_data_size[0];
}
#ifdef _DEBUG
hipDeviceSynchronize();
printf("Split Init:%s\n", hipGetErrorString(hipGetLastError()));
#endif
return CUDA_RETURN_VALUE;
}
int HolidaySplitGPU::Exit()
{
return CUDA_RETURN_VALUE;
}
//int Split_gpu::GetTopSize(std::vector<DataSize>& out_data_size)
//{
// out_data_size = top_data_size;
// return CUDA_RETURN_VALUE;
//}
int HolidaySplitGPU::Process(std::vector<HolidayFeatureMap<float>*> input_data_map, std::vector<HolidayFeatureMap<float>*>& output_data_map)
{
#ifdef _DEBUG
hipEvent_t start1;
hipEventCreate(&start1);
hipEvent_t stop1;
hipEventCreate(&stop1);
hipEventRecord(start1, NULL);
#endif
input_data_map[0]->m_gpu.shape_ = input_data_map[0]->data_shape;
input_data_map[0]->m_gpu.Gpu_DataIn(pNetResourceGpu, input_data_map[0]->dwStorageType, input_data_map[0]->m_cpu.dataMemoryPtr());
input_data_map[0]->dwStorageType = DATA_GPU;
int all_counts = 1;
for (size_t i = 0; i < input_data_map[0]->data_shape.size(); i++)
{
all_counts *= input_data_map[0]->data_shape[i];
}
for (int i = 0; i < output_data_map.size(); i++)
{
hipMemcpyAsync(output_data_map[i]->m_gpu.pfData_gpu, (float *)input_data_map[0]->m_gpu.pfData_gpu, all_counts* sizeof(float), hipMemcpyDeviceToDevice, pNetResourceGpu->main_stream);
output_data_map[i]->dwStorageType = DATA_GPU;
output_data_map[i]->data_shape = input_data_map[0]->data_shape;
output_data_map[i]->m_gpu.shape_ = output_data_map[i]->data_shape;
output_data_map[i]->m_gpu.data_size = input_data_map[0]->m_gpu.data_size;
}
#ifdef _DEBUG
hipEventRecord(stop1, NULL);
hipEventSynchronize(stop1);
float msecTotal1 = 0.0f;
hipEventElapsedTime(&msecTotal1, start1, stop1);
printf(" Split: %f ms \n", msecTotal1);
#endif
#ifdef _DEBUG
int dwSize2 = top_data_size[0].data_dim[2] * top_data_size[0].data_dim[3] * top_data_size[0].data_dim[1];
float *pfDataOut = new float[dwSize2];
hipMemcpy(pfDataOut, output_data_map[0]->m_gpu.pfData_gpu, dwSize2 * sizeof(float), hipMemcpyDeviceToHost);
delete[] pfDataOut;
hipDeviceSynchronize();
printf("Split:%s\n", hipGetErrorString(hipGetLastError()));
#endif
return CUDA_RETURN_VALUE;
}
| 3ce1b460e733531b114b22a19ea2046e574e6a4f.cu | #include "HolidaySplitGPU.h"
HolidaySplitGPU::HolidaySplitGPU()
{
}
HolidaySplitGPU::~HolidaySplitGPU()
{
}
int HolidaySplitGPU::Init(Holiday_LayerParameter& inputparam, HolidayNetResource<float> *pNetResource)
{
pNetResourceGpu = (HolidayNetResourceGpu *)pNetResource->pNetResourceGpu;
//bottom_data_size = inputparam.bottom_data_size;
int index = inputparam.bottom_index(0);
bottom_data_size.resize(1);
bottom_data_size[0] = pNetResource->feature_vector_size[index];
top_data_size.resize(inputparam.top_index().size());
for (int i = 0; i < inputparam.top_index().size(); i++)
{
top_data_size[i] = bottom_data_size[0];
}
#ifdef _DEBUG
cudaDeviceSynchronize();
printf("Split Init:%s\n", cudaGetErrorString(cudaGetLastError()));
#endif
return CUDA_RETURN_VALUE;
}
int HolidaySplitGPU::Exit()
{
return CUDA_RETURN_VALUE;
}
//int Split_gpu::GetTopSize(std::vector<DataSize>& out_data_size)
//{
// out_data_size = top_data_size;
// return CUDA_RETURN_VALUE;
//}
int HolidaySplitGPU::Process(std::vector<HolidayFeatureMap<float>*> input_data_map, std::vector<HolidayFeatureMap<float>*>& output_data_map)
{
#ifdef _DEBUG
cudaEvent_t start1;
cudaEventCreate(&start1);
cudaEvent_t stop1;
cudaEventCreate(&stop1);
cudaEventRecord(start1, NULL);
#endif
input_data_map[0]->m_gpu.shape_ = input_data_map[0]->data_shape;
input_data_map[0]->m_gpu.Gpu_DataIn(pNetResourceGpu, input_data_map[0]->dwStorageType, input_data_map[0]->m_cpu.dataMemoryPtr());
input_data_map[0]->dwStorageType = DATA_GPU;
int all_counts = 1;
for (size_t i = 0; i < input_data_map[0]->data_shape.size(); i++)
{
all_counts *= input_data_map[0]->data_shape[i];
}
for (int i = 0; i < output_data_map.size(); i++)
{
cudaMemcpyAsync(output_data_map[i]->m_gpu.pfData_gpu, (float *)input_data_map[0]->m_gpu.pfData_gpu, all_counts* sizeof(float), cudaMemcpyDeviceToDevice, pNetResourceGpu->main_stream);
output_data_map[i]->dwStorageType = DATA_GPU;
output_data_map[i]->data_shape = input_data_map[0]->data_shape;
output_data_map[i]->m_gpu.shape_ = output_data_map[i]->data_shape;
output_data_map[i]->m_gpu.data_size = input_data_map[0]->m_gpu.data_size;
}
#ifdef _DEBUG
cudaEventRecord(stop1, NULL);
cudaEventSynchronize(stop1);
float msecTotal1 = 0.0f;
cudaEventElapsedTime(&msecTotal1, start1, stop1);
printf(" Split: %f ms \n", msecTotal1);
#endif
#ifdef _DEBUG
int dwSize2 = top_data_size[0].data_dim[2] * top_data_size[0].data_dim[3] * top_data_size[0].data_dim[1];
float *pfDataOut = new float[dwSize2];
cudaMemcpy(pfDataOut, output_data_map[0]->m_gpu.pfData_gpu, dwSize2 * sizeof(float), cudaMemcpyDeviceToHost);
delete[] pfDataOut;
cudaDeviceSynchronize();
printf("Split:%s\n", cudaGetErrorString(cudaGetLastError()));
#endif
return CUDA_RETURN_VALUE;
}
|
59fcb4d13d126e7ffaf107239947c5e3f5192e33.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include "helper_cuda.h"
#define BLOCK_DIM 16
////////////////////////////////////////////////////////////////////////////////
//! Compute reference data set matrix multiply on GPU
//! C = alpha * A * B + beta * C
//! @param A matrix A as provided to device
//! @param B matrix B as provided to device
//! @param C matrix C as provided to device
//! @param N height of matrix A and matrix C
//! @param M width of matrix B and matrix C
//! @param K width of matrix A and height of matrix C
//! @param alpha scala value for matrix multiplication
//! @param beta scala value for matrix summation with C
////////////////////////////////////////////////////////////////////////////////
__global__ void sgemm_kernel(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
row += 1;
float sum = 0.f;
for (int i = 0; i < K; ++i)
sum += A[row * K + i] * B[i * K + col];
C[row * M + col] = alpha * sum + beta * C[row * M + col];
}
void random_init(float *data, int size)
{
for (int i = 0; i < size; ++i)
{
data[i] = (rand() & 0xFF) / (float)RAND_MAX;
}
}
int main()
{
float *A, *B, *C;
float *d_A, *d_B, *d_C;
int N, M, K;
float alpha = 2.f;
float beta = 1.f;
int n_iter = 1;
N = M = K = 2048;
// allocation of linear memory space
A = (float *)malloc(N * K * sizeof(float));
B = (float *)malloc(K * M * sizeof(float));
C = (float *)malloc(N * M * sizeof(float));
// allocation of gpu linear memory space
checkCudaErrors(hipMalloc((void **)&d_A, N * K * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_B, K * M * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_C, N * M * sizeof(float)));
// initialize randomized values for memory space
random_init(A, N * K);
random_init(B, K * M);
random_init(C, N * M);
// copy initial value for gpu memory
checkCudaErrors(hipMemcpy(d_A, A, N * K * sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_B, B, K * M * sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_C, C, N * M * sizeof(float), hipMemcpyHostToDevice));
// do operation
for (int i = 0; i < n_iter; i++) {
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM);
dim3 dimGrid(M / dimBlock.x, N / dimBlock.y);
hipLaunchKernelGGL(( sgemm_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, N, M, K, alpha, beta);
checkCudaErrors(hipGetLastError());
}
checkCudaErrors(hipDeviceSynchronize());
printf("Application finished successfully.");
// terminates allocated gpu memory space
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
// terminates allocated memory space
free(A);
free(B);
free(C);
return 0;
}
| 59fcb4d13d126e7ffaf107239947c5e3f5192e33.cu | #include <stdio.h>
#include <cuda_profiler_api.h>
#include "helper_cuda.h"
#define BLOCK_DIM 16
////////////////////////////////////////////////////////////////////////////////
//! Compute reference data set matrix multiply on GPU
//! C = alpha * A * B + beta * C
//! @param A matrix A as provided to device
//! @param B matrix B as provided to device
//! @param C matrix C as provided to device
//! @param N height of matrix A and matrix C
//! @param M width of matrix B and matrix C
//! @param K width of matrix A and height of matrix C
//! @param alpha scala value for matrix multiplication
//! @param beta scala value for matrix summation with C
////////////////////////////////////////////////////////////////////////////////
__global__ void sgemm_kernel(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
row += 1;
float sum = 0.f;
for (int i = 0; i < K; ++i)
sum += A[row * K + i] * B[i * K + col];
C[row * M + col] = alpha * sum + beta * C[row * M + col];
}
void random_init(float *data, int size)
{
for (int i = 0; i < size; ++i)
{
data[i] = (rand() & 0xFF) / (float)RAND_MAX;
}
}
int main()
{
float *A, *B, *C;
float *d_A, *d_B, *d_C;
int N, M, K;
float alpha = 2.f;
float beta = 1.f;
int n_iter = 1;
N = M = K = 2048;
// allocation of linear memory space
A = (float *)malloc(N * K * sizeof(float));
B = (float *)malloc(K * M * sizeof(float));
C = (float *)malloc(N * M * sizeof(float));
// allocation of gpu linear memory space
checkCudaErrors(cudaMalloc((void **)&d_A, N * K * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_B, K * M * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_C, N * M * sizeof(float)));
// initialize randomized values for memory space
random_init(A, N * K);
random_init(B, K * M);
random_init(C, N * M);
// copy initial value for gpu memory
checkCudaErrors(cudaMemcpy(d_A, A, N * K * sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_B, B, K * M * sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_C, C, N * M * sizeof(float), cudaMemcpyHostToDevice));
// do operation
for (int i = 0; i < n_iter; i++) {
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM);
dim3 dimGrid(M / dimBlock.x, N / dimBlock.y);
sgemm_kernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, N, M, K, alpha, beta);
checkCudaErrors(cudaGetLastError());
}
checkCudaErrors(cudaDeviceSynchronize());
printf("Application finished successfully.");
// terminates allocated gpu memory space
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
// terminates allocated memory space
free(A);
free(B);
free(C);
return 0;
}
|
424771392f6c1179c207c3a1b559d7e06940e4b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/PReLU.cu"
#else
void THNN_(PReLU_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
int64_t nOutputPlane)
{
THCTensor_(resizeAs)(state, output, input);
weight = THCTensor_(newContiguous)(state, weight);
real *w = THCTensor_(data)(state, weight);
if (nOutputPlane == 0)
{
THC_pointwiseApply2(state, output, input, PReLUUpdateOutput<real>(w));
}
else
{
int ndim = THCTensor_(nDimension)(state, input);
input = THCTensor_(newContiguous)(state, input);
int n = THCTensor_(nElement)(state, input);
if (input->size[ndim > 1] != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[ndim > 1]);
int mapSize = 1;
for (int d = 2; d < ndim; d++) {
mapSize *= input->size[d];
}
int nElemsPerSample = nOutputPlane * mapSize;
hipLaunchKernelGGL(( preluForward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
w,
n, nElemsPerSample, mapSize
);
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
}
THCTensor_(free)(state, weight);
}
void THNN_(PReLU_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
int64_t nOutputPlane)
{
THCUNN_check_nElement(state, input, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
weight = THCTensor_(newContiguous)(state, weight);
real *w = THCTensor_(data)(state, weight);
if (nOutputPlane == 0)
{
THC_pointwiseApply3(state, gradInput, gradOutput, input, PReLUUpdateGradInput<real>(w));
}
else
{
int ndim = THCTensor_(nDimension)(state, input);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int n = THCTensor_(nElement)(state, input);
if (input->size[ndim > 1] != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[ndim > 1]);
int mapSize = 1;
for (int d = 2; d < ndim; d++) {
mapSize *= input->size[d];
}
int nElemsPerSample = nOutputPlane * mapSize;
hipLaunchKernelGGL(( preluBackward), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, input),
w,
THCTensor_(data)(state, gradOutput),
n, nElemsPerSample, mapSize
);
THCudaCheck(hipGetLastError());
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
THCTensor_(free)(state, weight);
}
void THNN_(PReLU_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
THCTensor *gradWeight,
THCTensor *gradWeightBuf,
THCTensor *gradWeightBuf2,
int64_t nOutputPlane,
accreal scale_)
{
real scale = ScalarConvert<accreal, real>::to(scale_);
THCUNN_check_nElement(state, input, gradOutput);
// use grad input for temporary storage, then call updateGradInput again
if (nOutputPlane == 0)
{
THC_pointwiseApply3(state, gradInput, input, gradOutput, PReLUAccGradParametersShared<real>());
// introduces a sync point
real sum = ScalarConvert<accreal, real>::to(THCTensor_(sumall)(state, gradInput));
real w = THCTensor_(get1d)(state, gradWeight, 0);
THCTensor_(set1d)(state, gradWeight, 0, w + sum * scale);
// restore gradInput
THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight, nOutputPlane);
}
else
{
int ndim = THCTensor_(nDimension)(state, input);
if (ndim == 1)
{
THC_pointwiseApply3(state, gradWeight, input, gradOutput, PReLUAccGradParameters1to1<real>(scale));
}
else
{
THC_pointwiseApply3(state, gradInput, input, gradOutput, PReLUAccGradParameters<real>(scale));
THCTensor *sumbuf = gradWeightBuf2;
THCTensor_(resizeAs)(state, gradWeightBuf, gradWeight);
if (ndim == 2)
{
THCTensor_(sum)(state, gradWeightBuf, gradInput, 0, 1);
THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf);
}
else
{
THCTensor *buffer = THCTensor_(newContiguous)(state, gradInput);
int64_t size3 = 1;
for (int d = 2; d < ndim; d++) {
size3 *= input->size[d];
}
THCTensor_(resize3d)(state, buffer, input->size[0], nOutputPlane, size3);
THCTensor_(resize2d)(state, sumbuf, input->size[0], nOutputPlane);
THCTensor_(sum)(state, sumbuf, buffer, 2, 1);
THCTensor_(sum)(state, gradWeightBuf, sumbuf, 0, 1);
THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf);
THCTensor_(free)(state, buffer);
}
// restore gradInput
THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight, nOutputPlane);
}
}
}
#endif
| 424771392f6c1179c207c3a1b559d7e06940e4b2.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/PReLU.cu"
#else
void THNN_(PReLU_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
int64_t nOutputPlane)
{
THCTensor_(resizeAs)(state, output, input);
weight = THCTensor_(newContiguous)(state, weight);
real *w = THCTensor_(data)(state, weight);
if (nOutputPlane == 0)
{
THC_pointwiseApply2(state, output, input, PReLUUpdateOutput<real>(w));
}
else
{
int ndim = THCTensor_(nDimension)(state, input);
input = THCTensor_(newContiguous)(state, input);
int n = THCTensor_(nElement)(state, input);
if (input->size[ndim > 1] != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[ndim > 1]);
int mapSize = 1;
for (int d = 2; d < ndim; d++) {
mapSize *= input->size[d];
}
int nElemsPerSample = nOutputPlane * mapSize;
preluForward<<<GET_BLOCKS(n), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
w,
n, nElemsPerSample, mapSize
);
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
}
THCTensor_(free)(state, weight);
}
void THNN_(PReLU_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
int64_t nOutputPlane)
{
THCUNN_check_nElement(state, input, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
weight = THCTensor_(newContiguous)(state, weight);
real *w = THCTensor_(data)(state, weight);
if (nOutputPlane == 0)
{
THC_pointwiseApply3(state, gradInput, gradOutput, input, PReLUUpdateGradInput<real>(w));
}
else
{
int ndim = THCTensor_(nDimension)(state, input);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
int n = THCTensor_(nElement)(state, input);
if (input->size[ndim > 1] != nOutputPlane)
THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[ndim > 1]);
int mapSize = 1;
for (int d = 2; d < ndim; d++) {
mapSize *= input->size[d];
}
int nElemsPerSample = nOutputPlane * mapSize;
preluBackward<<<GET_BLOCKS(n), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, input),
w,
THCTensor_(data)(state, gradOutput),
n, nElemsPerSample, mapSize
);
THCudaCheck(cudaGetLastError());
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
}
THCTensor_(free)(state, weight);
}
void THNN_(PReLU_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
THCTensor *gradWeight,
THCTensor *gradWeightBuf,
THCTensor *gradWeightBuf2,
int64_t nOutputPlane,
accreal scale_)
{
real scale = ScalarConvert<accreal, real>::to(scale_);
THCUNN_check_nElement(state, input, gradOutput);
// use grad input for temporary storage, then call updateGradInput again
if (nOutputPlane == 0)
{
THC_pointwiseApply3(state, gradInput, input, gradOutput, PReLUAccGradParametersShared<real>());
// introduces a sync point
real sum = ScalarConvert<accreal, real>::to(THCTensor_(sumall)(state, gradInput));
real w = THCTensor_(get1d)(state, gradWeight, 0);
THCTensor_(set1d)(state, gradWeight, 0, w + sum * scale);
// restore gradInput
THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight, nOutputPlane);
}
else
{
int ndim = THCTensor_(nDimension)(state, input);
if (ndim == 1)
{
THC_pointwiseApply3(state, gradWeight, input, gradOutput, PReLUAccGradParameters1to1<real>(scale));
}
else
{
THC_pointwiseApply3(state, gradInput, input, gradOutput, PReLUAccGradParameters<real>(scale));
THCTensor *sumbuf = gradWeightBuf2;
THCTensor_(resizeAs)(state, gradWeightBuf, gradWeight);
if (ndim == 2)
{
THCTensor_(sum)(state, gradWeightBuf, gradInput, 0, 1);
THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf);
}
else
{
THCTensor *buffer = THCTensor_(newContiguous)(state, gradInput);
int64_t size3 = 1;
for (int d = 2; d < ndim; d++) {
size3 *= input->size[d];
}
THCTensor_(resize3d)(state, buffer, input->size[0], nOutputPlane, size3);
THCTensor_(resize2d)(state, sumbuf, input->size[0], nOutputPlane);
THCTensor_(sum)(state, sumbuf, buffer, 2, 1);
THCTensor_(sum)(state, gradWeightBuf, sumbuf, 0, 1);
THCTensor_(cadd)(state, gradWeight, gradWeight, scale, gradWeightBuf);
THCTensor_(free)(state, buffer);
}
// restore gradInput
THNN_(PReLU_updateGradInput)(state, input, gradOutput, gradInput, weight, nOutputPlane);
}
}
}
#endif
|
8c37ab6bda3d0f825e24c1183b993d8001bbefc1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "hist-equ.h"
void run_cpu_gray_test(PGM_IMG img_in, char *out_filename);
int main(int argc, char *argv[]){
PGM_IMG img_ibuf_g;
if (argc != 3) {
printf("Run with input file name and output file name as arguments\n");
exit(1);
}
printf("Running contrast enhancement for gray-scale images.\n");
img_ibuf_g = read_pgm(argv[1]);
run_cpu_gray_test(img_ibuf_g, argv[2]);
free_pgm(img_ibuf_g);
return 0;
}
void run_cpu_gray_test(PGM_IMG img_in, char *out_filename)
{
unsigned int timer = 0;
PGM_IMG img_obuf;
float gpu_time;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
printf("Starting GPU processing...\n");
hipEventRecord(start,0);
img_obuf = contrast_enhancement_g(img_in);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpu_time,start,stop);
printf("GPU time used in seconds is %f\n", gpu_time );
write_pgm(img_obuf, out_filename);
free_pgm(img_obuf);
}
PGM_IMG read_pgm(const char * path){
FILE * in_file;
char sbuf[256];
PGM_IMG result;
int v_max;//, i;
in_file = fopen(path, "r");
if (in_file == NULL){
printf("Input file not found!\n");
exit(1);
}
fscanf(in_file, "%s", sbuf); /*Skip the magic number*/
fscanf(in_file, "%d",&result.w);
fscanf(in_file, "%d",&result.h);
fscanf(in_file, "%d\n",&v_max);
printf("Image size: %d x %d\n", result.w, result.h);
result.img = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
fread(result.img,sizeof(unsigned char), result.w*result.h, in_file);
fclose(in_file);
return result;
}
void write_pgm(PGM_IMG img, const char * path){
FILE * out_file;
out_file = fopen(path, "wb");
fprintf(out_file, "P5\n");
fprintf(out_file, "%d %d\n255\n",img.w, img.h);
fwrite(img.img,sizeof(unsigned char), img.w*img.h, out_file);
fclose(out_file);
}
void free_pgm(PGM_IMG img)
{
free(img.img);
}
| 8c37ab6bda3d0f825e24c1183b993d8001bbefc1.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "hist-equ.h"
void run_cpu_gray_test(PGM_IMG img_in, char *out_filename);
int main(int argc, char *argv[]){
PGM_IMG img_ibuf_g;
if (argc != 3) {
printf("Run with input file name and output file name as arguments\n");
exit(1);
}
printf("Running contrast enhancement for gray-scale images.\n");
img_ibuf_g = read_pgm(argv[1]);
run_cpu_gray_test(img_ibuf_g, argv[2]);
free_pgm(img_ibuf_g);
return 0;
}
void run_cpu_gray_test(PGM_IMG img_in, char *out_filename)
{
unsigned int timer = 0;
PGM_IMG img_obuf;
float gpu_time;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("Starting GPU processing...\n");
cudaEventRecord(start,0);
img_obuf = contrast_enhancement_g(img_in);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_time,start,stop);
printf("GPU time used in seconds is %f\n", gpu_time );
write_pgm(img_obuf, out_filename);
free_pgm(img_obuf);
}
PGM_IMG read_pgm(const char * path){
FILE * in_file;
char sbuf[256];
PGM_IMG result;
int v_max;//, i;
in_file = fopen(path, "r");
if (in_file == NULL){
printf("Input file not found!\n");
exit(1);
}
fscanf(in_file, "%s", sbuf); /*Skip the magic number*/
fscanf(in_file, "%d",&result.w);
fscanf(in_file, "%d",&result.h);
fscanf(in_file, "%d\n",&v_max);
printf("Image size: %d x %d\n", result.w, result.h);
result.img = (unsigned char *)malloc(result.w * result.h * sizeof(unsigned char));
fread(result.img,sizeof(unsigned char), result.w*result.h, in_file);
fclose(in_file);
return result;
}
void write_pgm(PGM_IMG img, const char * path){
FILE * out_file;
out_file = fopen(path, "wb");
fprintf(out_file, "P5\n");
fprintf(out_file, "%d %d\n255\n",img.w, img.h);
fwrite(img.img,sizeof(unsigned char), img.w*img.h, out_file);
fclose(out_file);
}
void free_pgm(PGM_IMG img)
{
free(img.img);
}
|
9edaf52b8daa77c81b8562db31193da42965a14a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define DIM 3
#define GRID 16
#define VALIDATE 10
// function declarations
void validate_grid (const float *c, const float *intervals, const int *grid_c,
const int *points_block_c, int D);
void validate_search (const float *q, const float *c, const int *closest, int N,
int D);
void write_file (double time_var, const char *filename, const char *mode);
/**
* Find grid location of each block with gpu
* @method find_grid_loc_gpu
* @param points points matrix
* @param grid_loc grid location for each point result
* @param n num of elements
* @param d grid dimension (cube)
*/
__global__ void
find_grid_loc_gpu (float *points, int *grid_loc, int n, int d, int k)
{
// int thread_id = blockIdx.x*blockDim.x + threadIdx.x;
int x, y, z;
int dd = d * d;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x)
{
x = (int) (points[i * DIM + 0] * d);
y = (int) (points[i * DIM + 1] * d);
z = (int) (points[i * DIM + 2] * d);
grid_loc[i] = x + d * y + dd * z;
}
}
/**
* [search_block_gpu description]
* @method search_block_gpu
* @param closest current closest point index
* @param current_min current closest point distance
* @param block_offset block location in point array
* @param q point
* @param block block point array
* @param points_in_block num of points the current block
*/
__device__ void
search_block_gpu (int *closest, float *current_min, int block_offset, float *q,
float *block, int points_in_block)
{
float dist;
for (int i = 0; i < points_in_block; i++)
{
dist = (block[i * DIM + 0] - q[0]) * (block[i * DIM + 0] - q[0]);
dist += (block[i * DIM + 1] - q[1]) * (block[i * DIM + 1] - q[1]);
dist += (block[i * DIM + 2] - q[2]) * (block[i * DIM + 2] - q[2]);
dist = sqrtf (dist);
if (dist < *current_min)
{
*current_min = dist;
*closest = i + block_offset;
}
}
}
/**
* find closet point in c of each point in q with gpu
* @method void search_gpu
* @param q [q points]
* @param c [c points]
* @param grid [c points location of each grid block]
* @param points_per_block [points in each grid block]
* @param closests [result array index]
* @param mindists [result array min dist found]
* @param N [number of elements]
* @param d [grid dimension cube]
*/
__global__ void
search_gpu (float *q, float *c, int *grid, int *points_per_block, int *closests,
float *mindists, int n, int d, int dd)
{
// int thread_id = blockIdx.x*blockDim.x + threadIdx.x;
int x, y, z;
int grid_loc;
float b;
int stage = 0, finished = 0;
// int start = thread_id * points_per_thread ;
// int end = thread_id * points_per_thread + points_per_thread ;
float block_size = 1 / (float) d;
float d_mindist;
float point[3];
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x)
{
point[0] = q[i * DIM + 0];
point[1] = q[i * DIM + 1];
point[2] = q[i * DIM + 2];
x = (int) (point[0] * d);
y = (int) (point[1] * d);
z = (int) (point[2] * d);
grid_loc = x + d * y + dd * z;
mindists[i] = (1 << 10); // Inf
search_block_gpu (&closests[i], &mindists[i], grid[grid_loc], &q[i * DIM],
&c[grid[grid_loc] * DIM], points_per_block[grid_loc]);
finished = 0;
stage = 0;
while (!finished)
{
finished = 1;
//-------X---------
if (x + stage + 1 < d)
{
b = block_size * (x + stage + 1) - point[0];
if (b < mindists[i])
finished = 0;
}
if (x - stage - 1 > -1)
{
b = point[0] - block_size * (x - stage);
if (b < mindists[i])
finished = 0;
}
//-------Y---------
if (y + stage + 1 < d)
{
b = block_size * (y + stage + 1) - point[1];
if (b < mindists[i])
finished = 0;
}
if (y - stage - 1 > -1)
{
b = point[1] - block_size * (y - stage);
if (b < mindists[i])
finished = 0;
}
//-------Z---------
if (z + stage + 1 < d)
{
b = block_size * (z + stage + 1) - point[2];
if (b < mindists[i])
finished = 0;
}
if (z - stage - 1 > -1)
{
b = point[2] - block_size * (z - stage);
if (b < mindists[i])
finished = 0;
}
stage++;
if (!finished)
{
for (int dx = x - stage; dx <= x + stage; dx++)
{
for (int dy = y - stage; dy <= y + stage; dy++)
{
for (int dz = z - stage; dz <= z + stage; dz++)
{
if (dx == x - stage || dx == x + stage
|| dy == y - stage || dy == y + stage
|| dz == z - stage || dz == z + stage)
{
grid_loc = dx + d * dy + dd * dz;
if (dx > -1 && dx < d && dy > -1 && dy < d
&& dz > -1 && dz < d)
search_block_gpu (&closests[i], &mindists[i],
grid[grid_loc], point,
&c[grid[grid_loc] * DIM],
points_per_block[grid_loc]);
}
}
}
}
}
}
}
}
/**
* Find grid location of each block
* @method voidfind_grid_loc
* @param points points matrix
* @param grid_loc grid location for each point result
* @param n num of elements
* @param d grid dimension (cube)
*/
void
find_grid_loc (float *points, int *grid_loc, int n, int d)
{
int x, y, z;
int dd = d * d;
for (int i = 0; i < n; i++)
{
x = (int) (points[i * DIM + 0] * d);
y = (int) (points[i * DIM + 1] * d);
z = (int) (points[i * DIM + 2] * d);
grid_loc[i] = x + d * y + dd * z;
}
}
void
init_rand_points (float *p, int n)
{
int i;
for (i = 0; i < n * DIM; i++)
p[i] = (rand () % 1000000 / (float) (1000001));
}
/**
* [search_block description]
* @method search_block
* @param closest current closest point index
* @param current_min current closest point distance
* @param block_offset block location in point array
* @param q point
* @param block block point array
* @param points_in_block num of points the current block
*/
void
search_block (int *closest, float *current_min, int block_offset, float *q,
float *block, int points_in_block)
{
float dist;
for (int i = 0; i < points_in_block; i++)
{
dist = (block[i * DIM + 0] - q[0]) * (block[i * DIM + 0] - q[0]);
dist += (block[i * DIM + 1] - q[1]) * (block[i * DIM + 1] - q[1]);
dist += (block[i * DIM + 2] - q[2]) * (block[i * DIM + 2] - q[2]);
dist = sqrtf (dist);
if (dist < *current_min)
{
*current_min = dist;
*closest = i + block_offset;
}
}
}
/**
* find closet point in c of each point in q with cpu
* @method voidsearch
* @param q [q points]
* @param c [c points]
* @param grid [c points location of each grid block]
* @param points_per_block [points in each grid block]
* @param closests [result array index]
* @param mindists [result array min dist found]
* @param N [number of elements]
* @param d [grid dimension cube]
*/
void
search (float *q, float *c, int *grid, int *points_per_block, int *closests,
float *mindists, int N, int d)
{
int x, y, z;
int grid_loc;
float b;
int stage = 0, finished = 0;
float block_size = 1 / (float) d;
float point[3];
for (int i = 0; i < N; i++)
{
/*if(i % 100 == 0){
float per = (float) i / (float) N * 100;
printf("%f\n",per );
} */
point[0] = q[i * DIM + 0];
point[1] = q[i * DIM + 1];
point[2] = q[i * DIM + 2];
x = (int) (point[0] * (float) d);
y = (int) (point[1] * (float) d);
z = (int) (point[2] * (float) d);
grid_loc = x + d * y + d * d * z;
mindists[i] = (1 << 10); // Inf
search_block (&closests[i], &mindists[i], grid[grid_loc], point,
&c[grid[grid_loc] * DIM], points_per_block[grid_loc]);
finished = 0;
stage = 0;
while (!finished)
{
// if(stage > 2 ) printf("%d\n",stage );
finished = 1;
//-------X---------
if (x + stage + 1 < d)
{
b = block_size * (x + stage + 1) - point[0];
if (b < mindists[i])
finished = 0;
}
if (x - stage - 1 > -1)
{
b = point[0] - block_size * (x - stage);
if (b < mindists[i])
finished = 0;
}
//-------Y---------
if (y + stage + 1 < d)
{
b = block_size * (y + stage + 1) - point[1];
if (b < mindists[i])
finished = 0;
}
if (y - stage - 1 > -1)
{
b = point[1] - block_size * (y - stage);
if (b < mindists[i])
finished = 0;
}
//-------Z---------
if (z + stage + 1 < d)
{
b = block_size * (z + stage + 1) - point[2];
if (b < mindists[i])
finished = 0;
}
if (z - stage - 1 > -1)
{
b = point[2] - block_size * (z - stage);
if (b < mindists[i])
finished = 0;
}
stage++;
// if (stage == 1)
// finished = 0;
if (!finished)
{
for (int dx = x - stage; dx <= x + stage; dx++)
{
for (int dy = y - stage; dy <= y + stage; dy++)
{
for (int dz = z - stage; dz <= z + stage; dz++)
{
if (dx == x - stage || dx == x + stage
|| dy == y - stage || dy == y + stage
|| dz == z - stage || dz == z + stage)
{
grid_loc = dx + d * dy + d * d * dz;
if (dx > -1 && dx < d && dy > -1 && dy < d
&& dz > -1 && dz < d)
search_block (&closests[i], &mindists[i],
grid[grid_loc], point,
&c[grid[grid_loc] * DIM],
points_per_block[grid_loc]);
}
}
}
}
}
}
}
}
float *
rearrange (float *p, int *intex, int *points_per_block, int *grid, int n, int k)
{
for (int i = 0; i < k; i++)
points_per_block[i] = 0;
for (int i = 0; i < n; i++)
{
points_per_block[intex[i]]++;
}
grid[0] = 0;
grid[1] = points_per_block[0];
for (int i = 2; i < k; i++)
{
grid[i] = grid[i - 1] + points_per_block[i - 1];
}
int *positions = (int *) malloc (k * sizeof (int));
for (int i = 0; i < k; i++)
{
positions[i] = grid[i];
}
float *arrangedpoints = (float *) malloc (n * DIM * sizeof (float));
int pos;
int posDim = 0, iDim = 0;
for (int i = 0; i < n; i++)
{
pos = positions[intex[i]];
posDim = pos * DIM;
arrangedpoints[posDim + 0] = p[iDim + 0];
arrangedpoints[posDim + 1] = p[iDim + 1];
arrangedpoints[posDim + 2] = p[iDim + 2];
iDim = iDim + DIM;
positions[intex[i]]++;
}
// free(p);
return arrangedpoints;
}
int
main (int argc, char **argv)
{
if (argc != 3)
{
printf ("Invalid argument\n");
exit (1);
}
int NQ = 1 << atoi (argv[1]);
int NC = 1 << atoi (argv[1]);
int N = NQ;
int D = 1 << atoi (argv[2]);
write_file (atoi (argv[1]), "problem_size.data", "a");
write_file (atoi (argv[2]), "grid_size.data", "a");
int block_num = D * D * D;
printf ("NQ=%d NC=%d D=%d block_num=%d\n", NQ, NC, D, block_num);
float *intervals = (float *) malloc (D * sizeof (float));
for (int i = 1; i <= D; i++)
intervals[i - 1] = 1 / (float) D * i;
struct timeval startwtime, endwtime;
double elapsed_time;
float *q, *c;
int *grid_q, *grid_c;
int *q_block, *c_block;
int *points_block_q, *points_block_c;
int *closest;
float *mindists;
// malloc points
q = (float *) malloc (N * DIM * sizeof (float));
c = (float *) malloc (N * DIM * sizeof (float));
// malloc location of grid block in array q/c
grid_q = (int *) malloc (block_num * sizeof (int));
grid_c = (int *) malloc (block_num * sizeof (int));
// malloc grid of each point
q_block = (int *) malloc (N * sizeof (int));
c_block = (int *) malloc (N * sizeof (int));
// malloc points per block
points_block_q = (int *) malloc (block_num * sizeof (int));
points_block_c = (int *) malloc (block_num * sizeof (int));
closest = (int *) malloc (N * sizeof (int));
mindists = (float *) malloc (N * sizeof (float));
init_rand_points (q, N);
init_rand_points (c, N);
// find_grid_loc(q,q_block,N,D);
// find_grid_loc(c,c_block,N,D);
int blocks = 1280;
int threads_pblock = 64;
int threads = blocks * threads_pblock;
int k = N / threads;
printf ("the k: %d\n", k);
float *d_q, *d_c, *d_mindists;
int *d_q_block, *d_c_block, *points_per_block, *block_loc, *d_closests;
hipMalloc (&d_q, N * DIM * sizeof (float));
hipMalloc (&d_c, N * DIM * sizeof (float));
hipMalloc (&d_q_block, N * sizeof (float));
hipMalloc (&d_c_block, N * sizeof (float));
hipMalloc (&points_per_block, block_num * sizeof (float));
hipMalloc (&block_loc, block_num * sizeof (float));
hipMalloc (&d_closests, N * sizeof (float));
hipMalloc (&d_mindists, N * sizeof (float));
//-------------ASSING POINTS TO GRID IN GPU-----------------------
hipMemcpy (d_q, q, N * DIM * sizeof (float), hipMemcpyHostToDevice);
hipMemcpy (d_c, c, N * DIM * sizeof (float), hipMemcpyHostToDevice);
gettimeofday (&startwtime, NULL);
hipLaunchKernelGGL(( find_grid_loc_gpu), dim3(blocks), dim3(threads_pblock), 0, 0, d_q, d_q_block, N, D, k);
hipLaunchKernelGGL(( find_grid_loc_gpu), dim3(blocks), dim3(threads_pblock), 0, 0, d_c, d_c_block, N, D, k);
hipDeviceSynchronize ();
gettimeofday (&endwtime, NULL);
hipMemcpy (q_block, d_q_block, N * sizeof (int), hipMemcpyDeviceToHost);
hipMemcpy (c_block, d_c_block, N * sizeof (int), hipMemcpyDeviceToHost);
elapsed_time = (double) ((endwtime.tv_usec - startwtime.tv_usec) / 1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf ("Time : %f\n", elapsed_time);
//-------------REARRANGE POINTS IN GRID IN CPU-----------------------
gettimeofday (&startwtime, NULL);
// q=rearrange(q,q_block,points_block_q,grid_q,N,block_num);
c = rearrange (c, c_block, points_block_c, grid_c, N, block_num);
hipMemcpy (d_c, c, N * DIM * sizeof (float), hipMemcpyHostToDevice);
hipMemcpy (points_per_block, points_block_c, block_num * sizeof (int),
hipMemcpyHostToDevice);
hipMemcpy (block_loc, grid_c, block_num * sizeof (int),
hipMemcpyHostToDevice);
gettimeofday (&endwtime, NULL);
elapsed_time = (double) ((endwtime.tv_usec - startwtime.tv_usec) / 1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf ("Time : %f\n", elapsed_time);
write_file (elapsed_time, "rearrange_time.data", "a");
//---------------GRID VALIDATION IN // CPU-----------------------
validate_grid (c, intervals, grid_c, points_block_c, D);
//-------------SEARCH GRID IN GPU-----------------------
gettimeofday (&startwtime, NULL);
hipLaunchKernelGGL(( search_gpu), dim3(blocks), dim3(threads_pblock), 0, 0, d_q, d_c, block_loc, points_per_block,
d_closests, d_mindists, N, D, D * D);
hipDeviceSynchronize ();
hipMemcpy (closest, d_closests, N * sizeof (int), hipMemcpyDeviceToHost);
hipMemcpy (mindists, d_mindists, N * sizeof (int), hipMemcpyDeviceToHost);
gettimeofday (&endwtime, NULL);
elapsed_time = (double) ((endwtime.tv_usec - startwtime.tv_usec) / 1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
validate_search (q, c, closest, N, D);
printf ("Search Time GPU: %f\n", elapsed_time);
write_file (elapsed_time, "search_gpu_time.data", "a");
//---------------SEARCH GRID IN CPU-----------------------
gettimeofday (&startwtime, NULL);
// search(q,c,grid_c,points_block_c ,closest,mindists ,N, D);
gettimeofday (&endwtime, NULL);
elapsed_time = (double) ((endwtime.tv_usec - startwtime.tv_usec) / 1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf ("Search Time CPU : %f\n", elapsed_time);
write_file (elapsed_time, "search_cpu_time.data", "a");
//-----------------------------------VALIDATE SEARCH IN
// CPU-----------------------
//---------------CLEAN UP-------------------------------------
hipFree (d_q_block);
hipFree (d_c_block);
hipFree (d_q);
hipFree (d_c);
}
void
validate_grid (const float *c, const float *intervals, const int *grid_c,
const int *points_block_c, int D)
{
int sum = 0;
int fails = 0;
float xmax, ymax, zmax, xmin, ymin, zmin;
int pos, block_pos, point_pos;
for (int x = 0; x < D; x++)
{
xmax = intervals[x];
if (x == 0)
{
xmin = 0;
}
else
{
xmin = intervals[x - 1];
}
for (int y = 0; y < D; y++)
{
ymax = intervals[y];
if (x == 0)
{
ymin = 0;
}
else
{
ymin = intervals[y - 1];
}
for (int z = 0; z < D; z++)
{
zmax = intervals[z];
if (x == 0)
{
zmin = 0;
}
else
{
zmin = intervals[z - 1];
}
pos = x + D * y + D * D * z;
block_pos = grid_c[pos];
for (int point = 0; point < points_block_c[pos]; point++)
{
sum++;
if (c[(block_pos + point) * DIM + 0] >= xmax
|| c[(block_pos + point) * DIM + 0] < xmin)
{
fails++;
// printf("fail at %d \n", block_pos );
}
if (c[(block_pos + point) * DIM + 1] >= ymax
|| c[(block_pos + point) * DIM + 1] < ymin)
{
fails++;
// printf("fail at %d \n", block_pos );
}
if (c[(block_pos + point) * DIM + 2] >= zmax
|| c[(block_pos + point) * DIM + 2] < zmin)
{
fails++;
// printf("fail at %d \n", block_pos );
}
}
}
}
}
printf ("GRID VALIDATION POINTS:%d FAILS:%d\n", sum, fails);
}
void
validate_search (const float *q, const float *c, const int *closest, int N,
int D)
{
float mindist, dist;
int close;
int fails = 0;
for (int i = 0; i < VALIDATE; i++)
{
mindist = (1 << 10);
for (int j = 0; j < N; j++)
{
dist = (q[i * DIM + 0] - c[j * DIM + 0])
* (q[i * DIM + 0] - c[j * DIM + 0]);
dist += (q[i * DIM + 1] - c[j * DIM + 1])
* (q[i * DIM + 1] - c[j * DIM + 1]);
dist += (q[i * DIM + 2] - c[j * DIM + 2])
* (q[i * DIM + 2] - c[j * DIM + 2]);
dist = sqrtf (dist);
if (dist < mindist)
{
close = j;
mindist = dist;
}
}
if (close != closest[i])
{
// printf ("intex %d %d dists %f %f q :%f %f %f c: %f %f %f\n",
// close,
// closest[i], mindist, mindists[i], q[i * DIM + 0],
// q[i * DIM + 1], q[i * DIM + 2], c[close * DIM + 0],
// c[close * DIM + 1], c[close * DIM + 2]);
int x, y, z;
x = (int) (q[i * DIM + 0] * D);
y = (int) (q[i * DIM + 1] * D);
z = (int) (q[i * DIM + 2] * D);
// printf ("q : %d %d %d ", x, y, z);
x = (int) (c[close * DIM + 0] * D);
y = (int) (c[close * DIM + 1] * D);
z = (int) (c[close * DIM + 2] * D);
// printf ("c: %d %d %d \n", x, y, z);
fails++;
}
}
float failrate = fails / (float) 1024 * 100;
printf ("SEARCH VALIDATION POINTS: %d FAILS: %d\n", VALIDATE, fails);
}
void
write_file (double time_var, const char *filename, const char *mode)
{
FILE *fptr;
// open the file
char filepath[64] = "output_data/";
strcat (filepath, filename);
fptr = fopen (filepath, mode);
if (!fptr)
{
printf ("Error: Can't open file %s", filepath);
return;
}
// print the time in file
fprintf (fptr, "%lf ", time_var);
// close file
fclose (fptr);
return;
}
| 9edaf52b8daa77c81b8562db31193da42965a14a.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define DIM 3
#define GRID 16
#define VALIDATE 10
// function declarations
void validate_grid (const float *c, const float *intervals, const int *grid_c,
const int *points_block_c, int D);
void validate_search (const float *q, const float *c, const int *closest, int N,
int D);
void write_file (double time_var, const char *filename, const char *mode);
/**
* Find grid location of each block with gpu
* @method find_grid_loc_gpu
* @param points points matrix
* @param grid_loc grid location for each point result
* @param n num of elements
* @param d grid dimension (cube)
*/
__global__ void
find_grid_loc_gpu (float *points, int *grid_loc, int n, int d, int k)
{
// int thread_id = blockIdx.x*blockDim.x + threadIdx.x;
int x, y, z;
int dd = d * d;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x)
{
x = (int) (points[i * DIM + 0] * d);
y = (int) (points[i * DIM + 1] * d);
z = (int) (points[i * DIM + 2] * d);
grid_loc[i] = x + d * y + dd * z;
}
}
/**
* [search_block_gpu description]
* @method search_block_gpu
* @param closest current closest point index
* @param current_min current closest point distance
* @param block_offset block location in point array
* @param q point
* @param block block point array
* @param points_in_block num of points the current block
*/
__device__ void
search_block_gpu (int *closest, float *current_min, int block_offset, float *q,
float *block, int points_in_block)
{
float dist;
for (int i = 0; i < points_in_block; i++)
{
dist = (block[i * DIM + 0] - q[0]) * (block[i * DIM + 0] - q[0]);
dist += (block[i * DIM + 1] - q[1]) * (block[i * DIM + 1] - q[1]);
dist += (block[i * DIM + 2] - q[2]) * (block[i * DIM + 2] - q[2]);
dist = sqrtf (dist);
if (dist < *current_min)
{
*current_min = dist;
*closest = i + block_offset;
}
}
}
/**
* find closet point in c of each point in q with gpu
* @method void search_gpu
* @param q [q points]
* @param c [c points]
* @param grid [c points location of each grid block]
* @param points_per_block [points in each grid block]
* @param closests [result array index]
* @param mindists [result array min dist found]
* @param N [number of elements]
* @param d [grid dimension cube]
*/
__global__ void
search_gpu (float *q, float *c, int *grid, int *points_per_block, int *closests,
float *mindists, int n, int d, int dd)
{
// int thread_id = blockIdx.x*blockDim.x + threadIdx.x;
int x, y, z;
int grid_loc;
float b;
int stage = 0, finished = 0;
// int start = thread_id * points_per_thread ;
// int end = thread_id * points_per_thread + points_per_thread ;
float block_size = 1 / (float) d;
float d_mindist;
float point[3];
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n;
i += blockDim.x * gridDim.x)
{
point[0] = q[i * DIM + 0];
point[1] = q[i * DIM + 1];
point[2] = q[i * DIM + 2];
x = (int) (point[0] * d);
y = (int) (point[1] * d);
z = (int) (point[2] * d);
grid_loc = x + d * y + dd * z;
mindists[i] = (1 << 10); // Inf
search_block_gpu (&closests[i], &mindists[i], grid[grid_loc], &q[i * DIM],
&c[grid[grid_loc] * DIM], points_per_block[grid_loc]);
finished = 0;
stage = 0;
while (!finished)
{
finished = 1;
//-------X---------
if (x + stage + 1 < d)
{
b = block_size * (x + stage + 1) - point[0];
if (b < mindists[i])
finished = 0;
}
if (x - stage - 1 > -1)
{
b = point[0] - block_size * (x - stage);
if (b < mindists[i])
finished = 0;
}
//-------Y---------
if (y + stage + 1 < d)
{
b = block_size * (y + stage + 1) - point[1];
if (b < mindists[i])
finished = 0;
}
if (y - stage - 1 > -1)
{
b = point[1] - block_size * (y - stage);
if (b < mindists[i])
finished = 0;
}
//-------Z---------
if (z + stage + 1 < d)
{
b = block_size * (z + stage + 1) - point[2];
if (b < mindists[i])
finished = 0;
}
if (z - stage - 1 > -1)
{
b = point[2] - block_size * (z - stage);
if (b < mindists[i])
finished = 0;
}
stage++;
if (!finished)
{
for (int dx = x - stage; dx <= x + stage; dx++)
{
for (int dy = y - stage; dy <= y + stage; dy++)
{
for (int dz = z - stage; dz <= z + stage; dz++)
{
if (dx == x - stage || dx == x + stage
|| dy == y - stage || dy == y + stage
|| dz == z - stage || dz == z + stage)
{
grid_loc = dx + d * dy + dd * dz;
if (dx > -1 && dx < d && dy > -1 && dy < d
&& dz > -1 && dz < d)
search_block_gpu (&closests[i], &mindists[i],
grid[grid_loc], point,
&c[grid[grid_loc] * DIM],
points_per_block[grid_loc]);
}
}
}
}
}
}
}
}
/**
* Find grid location of each block
* @method voidfind_grid_loc
* @param points points matrix
* @param grid_loc grid location for each point result
* @param n num of elements
* @param d grid dimension (cube)
*/
void
find_grid_loc (float *points, int *grid_loc, int n, int d)
{
int x, y, z;
int dd = d * d;
for (int i = 0; i < n; i++)
{
x = (int) (points[i * DIM + 0] * d);
y = (int) (points[i * DIM + 1] * d);
z = (int) (points[i * DIM + 2] * d);
grid_loc[i] = x + d * y + dd * z;
}
}
void
init_rand_points (float *p, int n)
{
int i;
for (i = 0; i < n * DIM; i++)
p[i] = (rand () % 1000000 / (float) (1000001));
}
/**
* [search_block description]
* @method search_block
* @param closest current closest point index
* @param current_min current closest point distance
* @param block_offset block location in point array
* @param q point
* @param block block point array
* @param points_in_block num of points the current block
*/
void
search_block (int *closest, float *current_min, int block_offset, float *q,
float *block, int points_in_block)
{
float dist;
for (int i = 0; i < points_in_block; i++)
{
dist = (block[i * DIM + 0] - q[0]) * (block[i * DIM + 0] - q[0]);
dist += (block[i * DIM + 1] - q[1]) * (block[i * DIM + 1] - q[1]);
dist += (block[i * DIM + 2] - q[2]) * (block[i * DIM + 2] - q[2]);
dist = sqrtf (dist);
if (dist < *current_min)
{
*current_min = dist;
*closest = i + block_offset;
}
}
}
/**
* find closet point in c of each point in q with cpu
* @method voidsearch
* @param q [q points]
* @param c [c points]
* @param grid [c points location of each grid block]
* @param points_per_block [points in each grid block]
* @param closests [result array index]
* @param mindists [result array min dist found]
* @param N [number of elements]
* @param d [grid dimension cube]
*/
void
search (float *q, float *c, int *grid, int *points_per_block, int *closests,
float *mindists, int N, int d)
{
int x, y, z;
int grid_loc;
float b;
int stage = 0, finished = 0;
float block_size = 1 / (float) d;
float point[3];
for (int i = 0; i < N; i++)
{
/*if(i % 100 == 0){
float per = (float) i / (float) N * 100;
printf("%f\n",per );
} */
point[0] = q[i * DIM + 0];
point[1] = q[i * DIM + 1];
point[2] = q[i * DIM + 2];
x = (int) (point[0] * (float) d);
y = (int) (point[1] * (float) d);
z = (int) (point[2] * (float) d);
grid_loc = x + d * y + d * d * z;
mindists[i] = (1 << 10); // Inf
search_block (&closests[i], &mindists[i], grid[grid_loc], point,
&c[grid[grid_loc] * DIM], points_per_block[grid_loc]);
finished = 0;
stage = 0;
while (!finished)
{
// if(stage > 2 ) printf("%d\n",stage );
finished = 1;
//-------X---------
if (x + stage + 1 < d)
{
b = block_size * (x + stage + 1) - point[0];
if (b < mindists[i])
finished = 0;
}
if (x - stage - 1 > -1)
{
b = point[0] - block_size * (x - stage);
if (b < mindists[i])
finished = 0;
}
//-------Y---------
if (y + stage + 1 < d)
{
b = block_size * (y + stage + 1) - point[1];
if (b < mindists[i])
finished = 0;
}
if (y - stage - 1 > -1)
{
b = point[1] - block_size * (y - stage);
if (b < mindists[i])
finished = 0;
}
//-------Z---------
if (z + stage + 1 < d)
{
b = block_size * (z + stage + 1) - point[2];
if (b < mindists[i])
finished = 0;
}
if (z - stage - 1 > -1)
{
b = point[2] - block_size * (z - stage);
if (b < mindists[i])
finished = 0;
}
stage++;
// if (stage == 1)
// finished = 0;
if (!finished)
{
for (int dx = x - stage; dx <= x + stage; dx++)
{
for (int dy = y - stage; dy <= y + stage; dy++)
{
for (int dz = z - stage; dz <= z + stage; dz++)
{
if (dx == x - stage || dx == x + stage
|| dy == y - stage || dy == y + stage
|| dz == z - stage || dz == z + stage)
{
grid_loc = dx + d * dy + d * d * dz;
if (dx > -1 && dx < d && dy > -1 && dy < d
&& dz > -1 && dz < d)
search_block (&closests[i], &mindists[i],
grid[grid_loc], point,
&c[grid[grid_loc] * DIM],
points_per_block[grid_loc]);
}
}
}
}
}
}
}
}
float *
rearrange (float *p, int *intex, int *points_per_block, int *grid, int n, int k)
{
for (int i = 0; i < k; i++)
points_per_block[i] = 0;
for (int i = 0; i < n; i++)
{
points_per_block[intex[i]]++;
}
grid[0] = 0;
grid[1] = points_per_block[0];
for (int i = 2; i < k; i++)
{
grid[i] = grid[i - 1] + points_per_block[i - 1];
}
int *positions = (int *) malloc (k * sizeof (int));
for (int i = 0; i < k; i++)
{
positions[i] = grid[i];
}
float *arrangedpoints = (float *) malloc (n * DIM * sizeof (float));
int pos;
int posDim = 0, iDim = 0;
for (int i = 0; i < n; i++)
{
pos = positions[intex[i]];
posDim = pos * DIM;
arrangedpoints[posDim + 0] = p[iDim + 0];
arrangedpoints[posDim + 1] = p[iDim + 1];
arrangedpoints[posDim + 2] = p[iDim + 2];
iDim = iDim + DIM;
positions[intex[i]]++;
}
// free(p);
return arrangedpoints;
}
int
main (int argc, char **argv)
{
if (argc != 3)
{
printf ("Invalid argument\n");
exit (1);
}
int NQ = 1 << atoi (argv[1]);
int NC = 1 << atoi (argv[1]);
int N = NQ;
int D = 1 << atoi (argv[2]);
write_file (atoi (argv[1]), "problem_size.data", "a");
write_file (atoi (argv[2]), "grid_size.data", "a");
int block_num = D * D * D;
printf ("NQ=%d NC=%d D=%d block_num=%d\n", NQ, NC, D, block_num);
float *intervals = (float *) malloc (D * sizeof (float));
for (int i = 1; i <= D; i++)
intervals[i - 1] = 1 / (float) D * i;
struct timeval startwtime, endwtime;
double elapsed_time;
float *q, *c;
int *grid_q, *grid_c;
int *q_block, *c_block;
int *points_block_q, *points_block_c;
int *closest;
float *mindists;
// malloc points
q = (float *) malloc (N * DIM * sizeof (float));
c = (float *) malloc (N * DIM * sizeof (float));
// malloc location of grid block in array q/c
grid_q = (int *) malloc (block_num * sizeof (int));
grid_c = (int *) malloc (block_num * sizeof (int));
// malloc grid of each point
q_block = (int *) malloc (N * sizeof (int));
c_block = (int *) malloc (N * sizeof (int));
// malloc points per block
points_block_q = (int *) malloc (block_num * sizeof (int));
points_block_c = (int *) malloc (block_num * sizeof (int));
closest = (int *) malloc (N * sizeof (int));
mindists = (float *) malloc (N * sizeof (float));
init_rand_points (q, N);
init_rand_points (c, N);
// find_grid_loc(q,q_block,N,D);
// find_grid_loc(c,c_block,N,D);
int blocks = 1280;
int threads_pblock = 64;
int threads = blocks * threads_pblock;
int k = N / threads;
printf ("the k: %d\n", k);
float *d_q, *d_c, *d_mindists;
int *d_q_block, *d_c_block, *points_per_block, *block_loc, *d_closests;
cudaMalloc (&d_q, N * DIM * sizeof (float));
cudaMalloc (&d_c, N * DIM * sizeof (float));
cudaMalloc (&d_q_block, N * sizeof (float));
cudaMalloc (&d_c_block, N * sizeof (float));
cudaMalloc (&points_per_block, block_num * sizeof (float));
cudaMalloc (&block_loc, block_num * sizeof (float));
cudaMalloc (&d_closests, N * sizeof (float));
cudaMalloc (&d_mindists, N * sizeof (float));
//-------------ASSING POINTS TO GRID IN GPU-----------------------
cudaMemcpy (d_q, q, N * DIM * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpy (d_c, c, N * DIM * sizeof (float), cudaMemcpyHostToDevice);
gettimeofday (&startwtime, NULL);
find_grid_loc_gpu<<<blocks, threads_pblock>>> (d_q, d_q_block, N, D, k);
find_grid_loc_gpu<<<blocks, threads_pblock>>> (d_c, d_c_block, N, D, k);
cudaDeviceSynchronize ();
gettimeofday (&endwtime, NULL);
cudaMemcpy (q_block, d_q_block, N * sizeof (int), cudaMemcpyDeviceToHost);
cudaMemcpy (c_block, d_c_block, N * sizeof (int), cudaMemcpyDeviceToHost);
elapsed_time = (double) ((endwtime.tv_usec - startwtime.tv_usec) / 1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf ("Time : %f\n", elapsed_time);
//-------------REARRANGE POINTS IN GRID IN CPU-----------------------
gettimeofday (&startwtime, NULL);
// q=rearrange(q,q_block,points_block_q,grid_q,N,block_num);
c = rearrange (c, c_block, points_block_c, grid_c, N, block_num);
cudaMemcpy (d_c, c, N * DIM * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpy (points_per_block, points_block_c, block_num * sizeof (int),
cudaMemcpyHostToDevice);
cudaMemcpy (block_loc, grid_c, block_num * sizeof (int),
cudaMemcpyHostToDevice);
gettimeofday (&endwtime, NULL);
elapsed_time = (double) ((endwtime.tv_usec - startwtime.tv_usec) / 1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf ("Time : %f\n", elapsed_time);
write_file (elapsed_time, "rearrange_time.data", "a");
//---------------GRID VALIDATION IN // CPU-----------------------
validate_grid (c, intervals, grid_c, points_block_c, D);
//-------------SEARCH GRID IN GPU-----------------------
gettimeofday (&startwtime, NULL);
search_gpu<<<blocks, threads_pblock>>> (d_q, d_c, block_loc, points_per_block,
d_closests, d_mindists, N, D, D * D);
cudaDeviceSynchronize ();
cudaMemcpy (closest, d_closests, N * sizeof (int), cudaMemcpyDeviceToHost);
cudaMemcpy (mindists, d_mindists, N * sizeof (int), cudaMemcpyDeviceToHost);
gettimeofday (&endwtime, NULL);
elapsed_time = (double) ((endwtime.tv_usec - startwtime.tv_usec) / 1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
validate_search (q, c, closest, N, D);
printf ("Search Time GPU: %f\n", elapsed_time);
write_file (elapsed_time, "search_gpu_time.data", "a");
//---------------SEARCH GRID IN CPU-----------------------
gettimeofday (&startwtime, NULL);
// search(q,c,grid_c,points_block_c ,closest,mindists ,N, D);
gettimeofday (&endwtime, NULL);
elapsed_time = (double) ((endwtime.tv_usec - startwtime.tv_usec) / 1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
printf ("Search Time CPU : %f\n", elapsed_time);
write_file (elapsed_time, "search_cpu_time.data", "a");
//-----------------------------------VALIDATE SEARCH IN
// CPU-----------------------
//---------------CLEAN UP-------------------------------------
cudaFree (d_q_block);
cudaFree (d_c_block);
cudaFree (d_q);
cudaFree (d_c);
}
void
validate_grid (const float *c, const float *intervals, const int *grid_c,
const int *points_block_c, int D)
{
int sum = 0;
int fails = 0;
float xmax, ymax, zmax, xmin, ymin, zmin;
int pos, block_pos, point_pos;
for (int x = 0; x < D; x++)
{
xmax = intervals[x];
if (x == 0)
{
xmin = 0;
}
else
{
xmin = intervals[x - 1];
}
for (int y = 0; y < D; y++)
{
ymax = intervals[y];
if (x == 0)
{
ymin = 0;
}
else
{
ymin = intervals[y - 1];
}
for (int z = 0; z < D; z++)
{
zmax = intervals[z];
if (x == 0)
{
zmin = 0;
}
else
{
zmin = intervals[z - 1];
}
pos = x + D * y + D * D * z;
block_pos = grid_c[pos];
for (int point = 0; point < points_block_c[pos]; point++)
{
sum++;
if (c[(block_pos + point) * DIM + 0] >= xmax
|| c[(block_pos + point) * DIM + 0] < xmin)
{
fails++;
// printf("fail at %d \n", block_pos );
}
if (c[(block_pos + point) * DIM + 1] >= ymax
|| c[(block_pos + point) * DIM + 1] < ymin)
{
fails++;
// printf("fail at %d \n", block_pos );
}
if (c[(block_pos + point) * DIM + 2] >= zmax
|| c[(block_pos + point) * DIM + 2] < zmin)
{
fails++;
// printf("fail at %d \n", block_pos );
}
}
}
}
}
printf ("GRID VALIDATION POINTS:%d FAILS:%d\n", sum, fails);
}
void
validate_search (const float *q, const float *c, const int *closest, int N,
int D)
{
float mindist, dist;
int close;
int fails = 0;
for (int i = 0; i < VALIDATE; i++)
{
mindist = (1 << 10);
for (int j = 0; j < N; j++)
{
dist = (q[i * DIM + 0] - c[j * DIM + 0])
* (q[i * DIM + 0] - c[j * DIM + 0]);
dist += (q[i * DIM + 1] - c[j * DIM + 1])
* (q[i * DIM + 1] - c[j * DIM + 1]);
dist += (q[i * DIM + 2] - c[j * DIM + 2])
* (q[i * DIM + 2] - c[j * DIM + 2]);
dist = sqrtf (dist);
if (dist < mindist)
{
close = j;
mindist = dist;
}
}
if (close != closest[i])
{
// printf ("intex %d %d dists %f %f q :%f %f %f c: %f %f %f\n",
// close,
// closest[i], mindist, mindists[i], q[i * DIM + 0],
// q[i * DIM + 1], q[i * DIM + 2], c[close * DIM + 0],
// c[close * DIM + 1], c[close * DIM + 2]);
int x, y, z;
x = (int) (q[i * DIM + 0] * D);
y = (int) (q[i * DIM + 1] * D);
z = (int) (q[i * DIM + 2] * D);
// printf ("q : %d %d %d ", x, y, z);
x = (int) (c[close * DIM + 0] * D);
y = (int) (c[close * DIM + 1] * D);
z = (int) (c[close * DIM + 2] * D);
// printf ("c: %d %d %d \n", x, y, z);
fails++;
}
}
float failrate = fails / (float) 1024 * 100;
printf ("SEARCH VALIDATION POINTS: %d FAILS: %d\n", VALIDATE, fails);
}
void
write_file (double time_var, const char *filename, const char *mode)
{
FILE *fptr;
// open the file
char filepath[64] = "output_data/";
strcat (filepath, filename);
fptr = fopen (filepath, mode);
if (!fptr)
{
printf ("Error: Can't open file %s", filepath);
return;
}
// print the time in file
fprintf (fptr, "%lf ", time_var);
// close file
fclose (fptr);
return;
}
|
c10ae72df85f37738867ccaf16af138c199b41bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cudaclaw5_update_q_cuda2(int mbc, int mx, int my, int meqn, double dtdx, double dtdy, double* qold, double* fm, double* fp, double* gm, double* gp)
{
int ix = threadIdx.x + blockIdx.x*blockDim.x;
int iy = threadIdx.y + blockIdx.y*blockDim.y;
if (ix < mx && iy < my)
{
int x_stride = meqn;
int y_stride = (2*mbc + mx)*x_stride;
int I_q = (ix+mbc)*x_stride + (iy+mbc)*y_stride;
int mq;
for(mq = 0; mq < meqn; mq++)
{
int i = I_q+mq;
qold[i] = qold[i] - dtdx * (fm[i+x_stride] - fp[i])
- dtdy * (gm[i+y_stride] - gp[i]);
}
}
} | c10ae72df85f37738867ccaf16af138c199b41bf.cu | #include "includes.h"
__global__ void cudaclaw5_update_q_cuda2(int mbc, int mx, int my, int meqn, double dtdx, double dtdy, double* qold, double* fm, double* fp, double* gm, double* gp)
{
int ix = threadIdx.x + blockIdx.x*blockDim.x;
int iy = threadIdx.y + blockIdx.y*blockDim.y;
if (ix < mx && iy < my)
{
int x_stride = meqn;
int y_stride = (2*mbc + mx)*x_stride;
int I_q = (ix+mbc)*x_stride + (iy+mbc)*y_stride;
int mq;
for(mq = 0; mq < meqn; mq++)
{
int i = I_q+mq;
qold[i] = qold[i] - dtdx * (fm[i+x_stride] - fp[i])
- dtdy * (gm[i+y_stride] - gp[i]);
}
}
} |
7f0f874938352deb7fe90c782da32f829bbd4704.hip | // !!! This is a file automatically generated by hipify!!!
#include <rocblas.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <omp.h>
#include <thread>
#include <thrust\device_vector.h>
#include "mkl.h"
#include <nvtx3\roctracer/roctx.h>
#include <iostream>
using std::cout; using std::endl;
using std::chrono::duration_cast;
using std::chrono::milliseconds;
using std::chrono::seconds;
using std::chrono::system_clock;
#include "simulation_gpu.h"
#include "scan_gpu.h"
#define FULL_MASK 0xffffffff
#define TILE_DIM 51
#define TIMEPOINTS 51
#define VOL_DIM 3
#define BLOCKSIZE 32
#define BLOCK_SIZE 64
#define WARPSIZE 32
#define MAX_BLOCK_SZ 256
#define BATCH_SZ 1000
//#define double_ACC 1
//#define EXPECTED_EXPOSURE_DEBUG1 1
#undef HJM_SDE_DEBUG
#define MC_RDM_DEBUG
#undef HJM_PATH_SIMULATION_DEBUG
#undef HJM_NUMERAIRE_DEBUG
#undef EXPOSURE_PROFILES_DEBUG
#define DEV_CURND_HOSTGEN
#undef EXPOSURE_PROFILES_AGGR_DEBUG
#define EXPECTED_EXPOSURE_DEBUG
#define CONST_MEMORY
#define RNG_HOST_API
#undef RNG_DEV_API
#define UM_HINTS
#define TIME_COUNTERS
#define MULTI_GPU_SIMULATION1
#define OPT_SHARED_MEMORY1
//#define SINGLE_PRECISION
#define DOUBLE_PRECISION
//#define SHARED_MEMORY_OPTIMIZATION
//#define CUDA_SYNCHR_OPTIMIZATION
#define MULTI_GPU_SIMULATION
#define CUDA_SYNC
//#define double double
#define CUDA_RT_CALL(call) \
{ \
hipError_t cudaStatus = call; \
if (hipSuccess != cudaStatus) \
fprintf(stderr, \
"ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \
"with " \
"%s (%d).\n", \
#call, __LINE__, __FILE__, hipGetErrorString(cudaStatus), cudaStatus); \
}
#define TIMED_RT_CALL(x, y) \
{ \
{auto t_start = std::chrono::high_resolution_clock::now(); \
x; \
auto t_end = std::chrono::high_resolution_clock::now(); \
double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end - t_start).count(); \
printf("%s %f (ms) \n", y , elapsed_time_ms); }\
\
}
#define CURAND_CALL(x) \
{ \
if((x)!=HIPRAND_STATUS_SUCCESS) \
printf("ERROR: CURAND call at %s:%d\n",__FILE__,__LINE__);\
\
}
#define CUBLAS_CALL(x) \
{ \
if((x)!=HIPBLAS_STATUS_SUCCESS) \
printf("ERROR: CUBLAS call at %s:%d\n",__FILE__,__LINE__);\
\
}
#ifdef DOUBLE_PRECISION
__constant__ double d_accrual[TIMEPOINTS];
__constant__ double d_spot_rates[TIMEPOINTS];
__constant__ double d_drifts[TIMEPOINTS];
__constant__ double d_volatilities[VOL_DIM * TIMEPOINTS];
#else
__constant__ float d_accrual[TIMEPOINTS];
__constant__ float d_spot_rates[TIMEPOINTS];
__constant__ float d_drifts[TIMEPOINTS];
__constant__ float d_volatilities[VOL_DIM * TIMEPOINTS];
#endif
/*
* MarketData Struct
*/
struct MarketData {
double* accrual;
double* spot_rates;
double* drifts;
double* volatilities;
};
/*
* CUDA utility function
*/
void cudaMemsetValue(double *buffer, int N, double initial_value) {
thrust::device_ptr<double> dev_ptr(buffer);
thrust::fill(dev_ptr, dev_ptr + N, initial_value);
}
void cudaMemsetValue(float* buffer, int N, float initial_value) {
thrust::device_ptr<float> dev_ptr(buffer);
thrust::fill(dev_ptr, dev_ptr + N, initial_value);
}
/*
* Musiela Parametrization SDE
* We simulate the SDE f(t+dt)=f(t) + dfbar
* where SDE dfbar = m(t)*dt+SUM(Vol_i*phi[i]*SQRT(dt))+dF/dtau*dt and phi ~ N(0,1)
*/
__device__ __forceinline__ float __musiela_sde2(float drift, float vol0, float vol1, float vol2, float phi0, float phi1, float phi2, float sqrt_dt, float dF, float rate0, float dtau, float dt) {
float vol_sum = vol0 * phi0;
vol_sum += vol1 * phi1;
vol_sum += vol2 * phi2;
vol_sum *= sqrtf(dt);
float dfbar = drift * dt;
dfbar += vol_sum;
dfbar += (dF / dtau) * dt;
// apply Euler Maruyana
double result = rate0 + dfbar;
return result;
}
template<typename real>
__device__ __forceinline__ double __musiela_sde2(double drift, double vol0, double vol1, double vol2, double phi0, double phi1, double phi2, double sqrt_dt, double dF, double rate0, double dtau, double dt) {
double vol_sum = vol0 * phi0;
vol_sum += vol1 * phi1;
vol_sum += vol2 * phi2;
vol_sum *= sqrtf(dt);
double dfbar = drift * dt;
dfbar += vol_sum;
dfbar += (dF / dtau) * dt;
// apply Euler Maruyana
double result = rate0 + dfbar;
return result;
}
/**
* * RNG init Kernel
*/
#ifdef RNG_HOST_API
void initRNG2_kernel(hiprandGenerator_t generator, double* rngNrmVar, const unsigned int seed, unsigned long long offset, int rnd_count, const double mean, const double stddev)
{
//hiprandGenerator_t generator;
//CURAND_CALL(hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT));
//CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(generator, seed));
//CURAND_CALL(hiprandSetGeneratorOffset(generator, offset));
CURAND_CALL(hiprandGenerateNormalDouble(generator, rngNrmVar, rnd_count, mean, stddev));
CUDA_RT_CALL(hipDeviceSynchronize());
//CURAND_CALL(hiprandDestroyGenerator(generator));
}
void initRNG2_kernel(hiprandGenerator_t generator, float* rngNrmVar, const unsigned int seed, unsigned long long offset, int rnd_count, const double mean, const double stddev)
{
//hiprandGenerator_t generator;
//CURAND_CALL(hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT));
//CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(generator, seed));
//CURAND_CALL(hiprandSetGeneratorOffset(generator, offset));
CURAND_CALL(hiprandGenerateNormal(generator, rngNrmVar, rnd_count, mean, stddev));
CUDA_RT_CALL(hipDeviceSynchronize());
//CURAND_CALL(hiprandDestroyGenerator(generator));
}
#else
__global__ void initRNG2_kernel(curandStateMRG32k3a* const rngStates, const unsigned int seed, int rnd_count)
{
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
for (; index < rnd_count; index += blockDim.x * gridDim.x) {
hiprand_init(seed, index, 0, &rngStates[index]);
}
}
#endif
/*
* Random initialization on device
*/
__global__ void initRNG2_kernel_ondevice(curandStateMRG32k3a* const rngStates, const unsigned int seed, int rnd_count)
{
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
for (; index < rnd_count; index += blockDim.x * gridDim.x) {
hiprand_init(seed, index, 0, &rngStates[index]);
}
}
__global__ void initRNG(hiprandState_t* const rngStates, const unsigned int seed, int offset)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(seed, tid, offset, &rngStates[tid]);
}
/*
* Monte Carlo HJM Path Generation Constant Memory
*/
__global__
void __generatePaths_kernelOld(double2* numeraires,
void* rngNrmVar,
double* simulated_rates, double* simulated_rates0, double* accum_rates,
const int pathN, int path,
double dtau = 0.5, double dt = 0.01)
{
// calculated rate
double rate;
double sum_rate;
// Simulation Parameters
int stride = dtau / dt; //
const double sqrt_dt = sqrtf(dt);
int t = threadIdx.x;
int gindex = blockIdx.x * TIMEPOINTS + threadIdx.x;
#ifdef RNG_HOST_API
double phi0;
double phi1;
double phi2;
#else
__shared__ double phi0;
__shared__ double phi1;
__shared__ double phi2;
#endif
// Evolve the whole curve from 0 to T ( 1:1 mapping t with threadIdx.x)
if (t < TIMEPOINTS)
{
if (path == 0) {
rate = d_spot_rates[t];
}
else {
// Calculate dF term in Musiela Parametrization SDE
double dF = 0;
if (t == (TIMEPOINTS - 1)) {
dF = simulated_rates[gindex] - simulated_rates[gindex - 1];
}
else {
dF = simulated_rates[gindex + 1] - simulated_rates[gindex];
}
// Normal random variates
#ifdef RNG_HOST_API
double *rngNrms = (double*)rngNrmVar;
int rndIdx = blockIdx.x * pathN * VOL_DIM + path * VOL_DIM;
phi0 = rngNrms[rndIdx];
phi1 = rngNrms[rndIdx + 1];
phi2 = rngNrms[rndIdx + 2];
#else
if (threadIdx.x == 0) {
curandStateMRG32k3a *state = (curandStateMRG32k3a*) rngNrmVar;
curandStateMRG32k3a localState = state[blockIdx.x];
phi0 = hiprand_uniform(&localState);
phi1 = hiprand_uniform(&localState);
phi2 = hiprand_uniform(&localState);
state[blockIdx.x] = localState;
}
__syncthreads();
#endif
// simulate the sde
rate = __musiela_sde2(
d_drifts[t],
d_volatilities[t],
d_volatilities[TIMEPOINTS + t],
d_volatilities[TIMEPOINTS * 2 + t],
phi0,
phi1,
phi2,
sqrt_dt,
dF,
simulated_rates[gindex],
dtau,
dt
);
}
#ifdef HJM_PATH_SIMULATION_DEBUG
printf("Path %d Block %d Thread %d index %d Forward Rate %f phi0 %f phi1 %f phi2 %f \n", path, blockIdx.x, threadIdx.x, gindex, rate, phi0, phi1, phi2);
#endif
// accumulate rate for discount calculation
sum_rate = accum_rates[gindex];
sum_rate += rate;
accum_rates[gindex] = sum_rate;
// store the simulated rate
simulated_rates0[gindex] = rate; //
// update numeraire based on simulation block
if (path % stride == 0) {
if (t == (path / stride)) {
numeraires[gindex].x = rate;
numeraires[gindex].y = __expf(-sum_rate * dt);
#ifdef HJM_NUMERAIRE_DEBUG
printf("Path %d Block %d Thread %d index %d Forward Rate %f Discount %f\n", path, blockIdx.x, threadIdx.x, gindex, rate, __expf(-sum_rate * dt));
#endif
}
}
}
}
/*
* Monte Carlo HJM Path Generation Constant Memory & BlockSize multiple of TIMEPOINTS
*/
template <typename real, typename real2>
__global__
void __generatePaths_kernel(
int numberOfPoints,
real2* numeraires,
real* rngNrmVar,
real* simulated_rates,
real* simulated_rates0,
real* accum_rates,
const int pathN, int path,
real dtau = 0.5, double dt = 0.01)
{
// calculated rate
real rate;
real sum_rate;
#ifdef RNG_HOST_API
real phi0;
real phi1;
real phi2;
#endif
// Simulation Parameters
int stride = dtau / dt; //
real sqrt_dt = sqrtf(dt);
int t = threadIdx.x % TIMEPOINTS;
int gindex = blockIdx.x * numberOfPoints + threadIdx.x;
// Evolve the whole curve from 0 to T ( 1:1 mapping t with threadIdx.x)
if ((threadIdx.x < numberOfPoints) && (gindex < gridDim.x * numberOfPoints))
{
if (path == 0) {
rate = d_spot_rates[t];
}
else {
// Calculate dF term in Musiela Parametrization SDE
real dF = 0;
if (t == (TIMEPOINTS - 1)) {
dF = simulated_rates[gindex] - simulated_rates[gindex - 1];
}
else {
dF = simulated_rates[gindex + 1] - simulated_rates[gindex];
}
// Normal random variates
#ifdef RNG_HOST_API
real* rngNrms = (real*)rngNrmVar;
int rndIdx = blockIdx.x * pathN * VOL_DIM + path * VOL_DIM;
phi0 = rngNrms[rndIdx];
phi1 = rngNrms[rndIdx + 1];
phi2 = rngNrms[rndIdx + 2];
#else
if (threadIdx.x == 0) {
curandStateMRG32k3a* state = (curandStateMRG32k3a*)rngNrmVar;
curandStateMRG32k3a localState = state[blockIdx.x];
phi0 = hiprand_uniform(&localState);
phi1 = hiprand_uniform(&localState);
phi2 = hiprand_uniform(&localState);
state[blockIdx.x] = localState;
}
__syncthreads();
#endif
// simulate the sde
rate = __musiela_sde2(
d_drifts[t],
d_volatilities[t],
d_volatilities[TIMEPOINTS + t],
d_volatilities[TIMEPOINTS * 2 + t],
phi0,
phi1,
phi2,
sqrt_dt,
dF,
simulated_rates[gindex],
dtau,
dt
);
}
#ifdef HJM_PATH_SIMULATION_DEBUG
printf("Path %d Block %d Thread %d index %d Forward Rate %f phi0 %f phi1 %f phi2 %f \n", path, blockIdx.x, threadIdx.x, gindex, rate, phi0, phi1, phi2);
#endif
// accumulate rate for discount calculation
sum_rate = accum_rates[gindex];
sum_rate += rate;
accum_rates[gindex] = sum_rate;
// store the simulated rate
simulated_rates0[gindex] = rate; //
// update numeraire based on simulation block
if (path % stride == 0) {
if (t == (path / stride)) {
numeraires[gindex].x = rate;
numeraires[gindex].y = __expf(-sum_rate * dt);
#ifdef HJM_NUMERAIRE_DEBUG
printf("Path %d Block %d Thread %d index %d Forward Rate %f Discount %f\n", path, blockIdx.x, threadIdx.x, gindex, rate, __expf(-sum_rate * dt));
#endif
}
}
}
}
/**
* Shared Memory & Global Access Memory optimizations & block simulation
*/
template <typename real, typename real2>
__global__
void __generatePaths_kernel4(
int numberOfPoints,
real2* numeraires,
real* rngNrmVar,
real* simulated_rates,
real* simulated_rates0,
real* accum_rates,
const int pathN,
int path,
real dtau = 0.5, real dt = 0.01)
{
// calculated rate
real rate;
real sum_rate = 0;
real phi0;
real phi1;
real phi2;
__shared__ real _ssimulated_rates[BLOCK_SIZE];
// Simulation Parameters
int stride = dtau / dt; //
real sqrt_dt = sqrtf(dt);
//int t = threadIdx.x % TIMEPOINTS;
int t = threadIdx.x;
int gindex = blockIdx.x * numberOfPoints + threadIdx.x;
// load the accumulated rate for a given timepoint
sum_rate = accum_rates[gindex];
// load the latest simulated rate from global memory
if ((threadIdx.x < numberOfPoints) && (gindex < gridDim.x * numberOfPoints)) {
if ( path > 0) {
_ssimulated_rates[threadIdx.x] = simulated_rates[gindex];
}
}
__syncthreads();
//
for (int s = 0; s < stride; s++)
{
if ((threadIdx.x < numberOfPoints) && (gindex < gridDim.x * numberOfPoints))
{
if (path == 0) {
rate = d_spot_rates[t];
}
else {
// Calculate dF term in Musiela Parametrization SDE
real dF = 0;
if (t == (TIMEPOINTS - 1)) {
dF = _ssimulated_rates[threadIdx.x] - _ssimulated_rates[threadIdx.x - 1];
}
else {
dF = _ssimulated_rates[threadIdx.x + 1] - _ssimulated_rates[threadIdx.x];
}
// Normal random variates broadcast if access same memory location in shared memory
real* rngNrms = (real*)rngNrmVar;
int rndIdx = blockIdx.x * pathN * VOL_DIM + (path + s)* VOL_DIM;
phi0 = rngNrms[rndIdx];
phi1 = rngNrms[rndIdx + 1];
phi2 = rngNrms[rndIdx + 2];
// simulate the sde
rate = __musiela_sde2(
d_drifts[t],
d_volatilities[t],
d_volatilities[TIMEPOINTS + t],
d_volatilities[TIMEPOINTS * 2 + t],
phi0,
phi1,
phi2,
sqrt_dt,
dF,
_ssimulated_rates[threadIdx.x],
dtau,
dt
);
}
// accumulate rate for discount calculation
sum_rate += rate;
}
__syncthreads();
if ((threadIdx.x < numberOfPoints) && (gindex < gridDim.x * numberOfPoints))
{
_ssimulated_rates[threadIdx.x] = rate;
}
__syncthreads();
}
// update the rates and the rate summation for the next simulation block
if ((threadIdx.x < numberOfPoints) && (gindex < gridDim.x * numberOfPoints))
{
simulated_rates0[gindex] = rate;
accum_rates[gindex] = sum_rate;
}
// update numeraire based on simulation block
if ( t == (path + stride) / stride ) {
numeraires[gindex].x = rate; // forward rate
#ifdef double_ACC
numeraires[gindex].y = expf(-sum_rate * dt);
#else
numeraires[gindex].y = exp(-sum_rate * dt);
#endif
}
}
/*
* Risk Factor Generation block simulation with Shared Memory
*/
void riskFactorSim4(
int gridSize,
int blockSize,
int numberOfPoints,
double2* numeraires,
double* rngNrmVar,
double* simulated_rates,
double* simulated_rates0,
double* accum_rates,
const int pathN,
double dtau = 0.5,
double dt = 0.01)
{
int simBlockSize = dtau / dt;
for (int path = 0; path < pathN; path += simBlockSize)
{
hipLaunchKernelGGL(( __generatePaths_kernel4) , dim3(gridSize), dim3(blockSize) , 0, 0,
numberOfPoints,
numeraires,
rngNrmVar,
simulated_rates,
simulated_rates0,
accum_rates,
pathN,
path,
dtau,
dt
);
#ifdef CUDA_SYNC
CUDA_RT_CALL(hipDeviceSynchronize());
#endif
// update simulated rates (swap pointers)
std::swap(simulated_rates, simulated_rates0);
}
}
void riskFactorSim4(
int gridSize,
int blockSize,
int numberOfPoints,
float2* numeraires,
float* rngNrmVar,
float* simulated_rates,
float* simulated_rates0,
float* accum_rates,
const int pathN,
float dtau = 0.5,
float dt = 0.01)
{
int simBlockSize = dtau / dt;
for (int path = 0; path < pathN; path += simBlockSize)
{
hipLaunchKernelGGL(( __generatePaths_kernel4) , dim3(gridSize), dim3(blockSize) , 0, 0,
numberOfPoints,
numeraires,
rngNrmVar,
simulated_rates,
simulated_rates0,
accum_rates,
pathN,
path,
dtau,
dt
);
#ifdef CUDA_SYNC
CUDA_RT_CALL(hipDeviceSynchronize());
#endif
// update simulated rates (swap pointers)
std::swap(simulated_rates, simulated_rates0);
}
}
/*
* Run the riskFactor Simulation using CUDA Streams
*/
void riskFactorSimStream(int gridSize, int blockSize, int numberOfPoints,
double2* numeraires,
double* rngNrmVar,
double* simulated_rates,
double* simulated_rates0,
double* accum_rates,
const int pathN,
int nstreams,
int operPerStream,
hipStream_t* streams,
double dtau = 0.5, double dt = 0.01)
{
int blockPerStream = gridSize / nstreams;
int repBlock = blockPerStream / operPerStream;
int simBlockSize = dtau / dt;
for (int i = 0; i < blockPerStream; i += operPerStream)
{
for (int path = 0; path < pathN; path += simBlockSize)
{
for (int b = 0; b < repBlock; b++)
{
for (int s = 0; s < nstreams; s++)
{
__generatePaths_kernel4 << < repBlock, blockSize, 0, streams[s] >> > (
numberOfPoints,
numeraires + (s * blockPerStream + b * operPerStream) * numberOfPoints,
rngNrmVar + (s * blockPerStream + b * operPerStream) * pathN * 3,
simulated_rates + (s * blockPerStream + b* operPerStream) * numberOfPoints,
simulated_rates0 + (s * blockPerStream + b * operPerStream) * numberOfPoints,
accum_rates,
pathN,
path,
dtau,
dt
);
}
}
// update simulated rates (swap pointers)
std::swap(simulated_rates, simulated_rates0);
}
}
}
/*
* Risk Factor Generation naive acceleration
*/
void riskFactorSim(
int gridSize,
int blockSize,
int numberOfPoints,
double2* numeraires,
double* rngNrmVar,
double* simulated_rates,
double* simulated_rates0,
double* accum_rates,
const int pathN,
double dtau = 0.5,
double dt = 0.01)
{
for (int path = 0; path < pathN; path++)
{
hipLaunchKernelGGL(( __generatePaths_kernel) , dim3(gridSize), dim3(blockSize) , 0, 0,
numberOfPoints,
numeraires,
rngNrmVar,
simulated_rates,
simulated_rates0,
accum_rates,
pathN,
path,
dtau,
dt
);
CUDA_RT_CALL(hipDeviceSynchronize());
// update simulated rates (swap pointers)
std::swap(simulated_rates, simulated_rates0);
}
}
void riskFactorSim(
int gridSize,
int blockSize,
int numberOfPoints,
float2* numeraires,
float* rngNrmVar,
float* simulated_rates,
float* simulated_rates0,
float* accum_rates,
const int pathN,
float dtau = 0.5,
float dt = 0.01)
{
for (int path = 0; path < pathN; path++)
{
__generatePaths_kernel << < gridSize, blockSize >> > (
numberOfPoints,
numeraires,
rngNrmVar,
simulated_rates,
simulated_rates0,
accum_rates,
pathN,
path,
dtau,
dt
);
CUDA_RT_CALL(hipDeviceSynchronize());
// update simulated rates (swap pointers)
std::swap(simulated_rates, simulated_rates0);
}
}
/*
* Exposure generation kernel
* one to one mapping between threadIdx.x and tenor
*/
template <typename real, typename real2>
__global__
void _exposure_calc_kernel(real* exposure, real2* numeraires, real notional, real K, int simN, real dtau = 0.5f)
{
__shared__ real cash_flows[TIMEPOINTS];
real discount_factor;
real forward_rate;
real libor;
real cash_flow;
real sum = 0.0;
real m = (1.0 / dtau);
int globaltid = blockIdx.x * TIMEPOINTS + threadIdx.x;
// calculate and load the cash flow in shared memory
if (threadIdx.x < TIMEPOINTS) {
forward_rate = numeraires[globaltid].x;
#ifdef SINGLE_PRECISION
libor = m * (expf(forward_rate / m) - 1.0);
#else
libor = m * (exp(forward_rate / m) - 1.0);
#endif
discount_factor = numeraires[globaltid].y;
cash_flow = discount_factor * notional * d_accrual[threadIdx.x] * (libor - K);
cash_flows[threadIdx.x] = cash_flow;
#ifdef EXPOSURE_PROFILES_DEBUG
printf("Block %d Thread %d Forward Rate %f libor %f Discount %f CashFlow %f \n", blockIdx.x, threadIdx.x, forward_rate, libor, discount_factor, cash_flow);
#endif
}
__syncthreads();
#ifdef EXPOSURE_PROFILES_DEBUG2
if (threadIdx.x == 0) {
for (int t = 0; t < TIMEPOINTS; t++) {
printf("t - indext %d CashFlow %f \n", t, cash_flows[t]);
}
}
#endif
// calculate the exposure profile
if ( threadIdx.x < TIMEPOINTS )
{
for (int t = threadIdx.x + 1; t < TIMEPOINTS; t++) {
sum += cash_flows[t];
}
sum = (sum > 0.0) ? sum : 0.0;
exposure[globaltid] = sum;
#ifdef EXPOSURE_PROFILES_DEBUG
printf("Block %d Thread %d Exposure %f \n", blockIdx.x, threadIdx.x, sum);
#endif
}
__syncthreads();
}
/*
* Exposure calculation
*/
void exposureCalculation(int gridSize, int blockSize, double *d_exposures, double2 *d_numeraire, double notional, double K, int scenarios) {
hipLaunchKernelGGL(( _exposure_calc_kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, d_exposures, d_numeraire, notional, K, scenarios);
#ifdef CUDA_SYNC
CUDA_RT_CALL(hipDeviceSynchronize());
#endif
}
void exposureCalculation(int gridSize, int blockSize, float* d_exposures, float2* d_numeraire, float notional, float K, int scenarios) {
_exposure_calc_kernel << < gridSize, blockSize >> > (d_exposures, d_numeraire, notional, K, scenarios);
#ifdef CUDA_SYNC
CUDA_RT_CALL(hipDeviceSynchronize());
#endif
}
/*
* Calculate Expected Exposure Profile
* 2D Aggregation using cublas sgemv
*/
void __expectedexposure_calc_kernel(float* expected_exposure, float* exposures, float *d_x, float *d_y, hipblasHandle_t handle, int exposureCount) {
float alpha = 1.;
float beta = 1. ;
float cols = (float) TIMEPOINTS;
float rows = (float) exposureCount;
// Apply matrix x identity vector (all 1) to do a column reduction by rows
CUBLAS_CALL(hipblasSgemv(handle, HIPBLAS_OP_N, cols, rows, &alpha, exposures, cols, d_x, 1, &beta, d_y, 1));
#ifdef CUDA_SYNC
CUDA_RT_CALL(hipDeviceSynchronize());
#endif
#ifdef DEV_CURND_HOSTGEN1
printf("Exposure 2D Matrix Aggregation by Cols \n");
printf("Matrix Cols (%d) Rows(%d) x Vector (%d) in elapsed time %f ms \n", TIMEPOINTS, simN, simN, elapsed_time);
printf("Effective Bandwidth: %f GB/s \n", 2 * TIMEPOINTS * simN * 4 / elapsed_time / 1e6);
#endif
}
void __expectedexposure_calc_kernel(double* expected_exposure, double* exposures, double* d_x, double* d_y, hipblasHandle_t handle, int exposureCount) {
double alpha = 1.;
double beta = 1.;
double cols = (double)TIMEPOINTS;
double rows = (double)exposureCount;
// Apply matrix x identity vector (all 1) to do a column reduction by rows
CUBLAS_CALL(hipblasDgemv(handle, HIPBLAS_OP_N, cols, rows, &alpha, exposures, cols, d_x, 1, &beta, d_y, 1));
//CUDA_RT_CALL(hipMemcpy(expected_exposure, d_y, TIMEPOINTS * sizeof(double), hipMemcpyDeviceToHost));
#ifdef CUDA_SYNC
CUDA_RT_CALL(hipDeviceSynchronize());
#endif
#ifdef DEV_CURND_HOSTGEN1
printf("Exposure 2D Matrix Aggregation by Cols \n");
printf("Matrix Cols (%d) Rows(%d) x Vector (%d) in elapsed time %f ms \n", TIMEPOINTS, simN, simN, elapsed_time);
printf("Effective Bandwidth: %f GB/s \n", 2 * TIMEPOINTS * simN * 4 / elapsed_time / 1e6);
#endif
}
/*
Exposure Calculation Kernel Invocation
*/
template <typename real>
void vAdd(int size, real *a, real* b, real* c) {
for (int i = 0; i < size; i++) {
c[i] = a[i] + b[i];
}
}
template <typename real>
void saxpy(int size, real multiplier, real *a, real *b) {
for (int i = 0; i < size; i++) {
b[i] = multiplier * a[i];
}
}
template <typename real, typename real2>
void __calculateExposureMultiGPU(real* expected_exposure, InterestRateSwap<real> payOff, real* accrual, real* spot_rates, real* drifts, real* volatilities, real scale, const int num_gpus, int scenarios, real dt) {
std::vector<real*> rngNrmVar(num_gpus);
const int pathN = payOff.expiry / dt; // 25Y requires 2500 simulations
int scenarios_gpus = scenarios / num_gpus; // total work distribution across gpus
int rnd_count = scenarios_gpus * VOL_DIM * pathN;
const unsigned int seed = 1234ULL;
real mean = 0.0;
real _stddev = 1.0;
const int curveSizeBytes = TIMEPOINTS * sizeof(real); // Total memory occupancy for 51 timepoints
std::cout << scenarios_gpus << " " << num_gpus << " pathN" << pathN << " dt " << dt << std::endl;
// intermediate & final results memory reservation on device data
std::vector<real2*> d_numeraire(num_gpus);
std::vector<real*> d_exposures(num_gpus);
std::vector<real*> simulated_rates(num_gpus);
std::vector<real*> simulated_rates0(num_gpus);
std::vector<real*> accum_rates(num_gpus);
std::vector<real*> d_x(num_gpus);
std::vector<real*> d_y(num_gpus);
std::vector<real*> partial_exposure(num_gpus);
std::vector<hipblasHandle_t> cublas_handle(num_gpus);
std::vector<hiprandGenerator_t> rngs(num_gpus);
std::vector<hiprandGenerator_t> d_rngStates(num_gpus);
roctxRangePush("total_execution_time");
roctxRangePushA("data_initializing");
auto t_start = std::chrono::high_resolution_clock::now();
// memory allocation
#pragma omp parallel num_threads(num_gpus)
//for (int gpuDevice = 0; gpuDevice < num_gpus; gpuDevice++)
{
int gpuDevice = omp_get_thread_num();
hipSetDevice(gpuDevice);
// Reserve on device memory structures
CUDA_RT_CALL(hipMalloc((void**)&simulated_rates[gpuDevice], scenarios_gpus * TIMEPOINTS * sizeof(real)));
CUDA_RT_CALL(hipMalloc((void**)&simulated_rates0[gpuDevice], scenarios_gpus * TIMEPOINTS * sizeof(real)));
CUDA_RT_CALL(hipMalloc((void**)&rngNrmVar[gpuDevice], rnd_count * sizeof(real)));
CUDA_RT_CALL(hipMalloc((void**)&d_numeraire[gpuDevice], scenarios_gpus * TIMEPOINTS * sizeof(real2))); // Numeraire (discount_factor, forward_rates)
CUDA_RT_CALL(hipMalloc((void**)&d_exposures[gpuDevice], scenarios_gpus * TIMEPOINTS * sizeof(real))); // Exposure profiles
CUDA_RT_CALL(hipMalloc((void**)&accum_rates[gpuDevice], scenarios_gpus * TIMEPOINTS * sizeof(real)));
CUDA_RT_CALL(hipMalloc((void**)&d_x[gpuDevice], scenarios_gpus * sizeof(real)));
CUDA_RT_CALL(hipMalloc((void**)&d_y[gpuDevice], TIMEPOINTS * sizeof(real)));
//
unsigned long long offset = gpuDevice * rnd_count;
hiprandGenerator_t rng = rngs[gpuDevice];
CURAND_CALL(hiprandCreateGenerator(&rngs[gpuDevice], HIPRAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(rngs[gpuDevice], seed));
CURAND_CALL(hiprandSetGeneratorOffset(rngs[gpuDevice], offset));
CUBLAS_CALL(hipblasCreate(&cublas_handle[gpuDevice]));
#ifdef STREAM_ACC
// allocate and initialize an array of stream handles
int nstreams = 16;
hipStream_t* streams = (hipStream_t*)malloc(nstreams * sizeof(hipStream_t));
for (int i = 0; i < nstreams; i++)
{
CUDA_RT_CALL(hipStreamCreate(&(streams[i])));
}
#endif
// Reserve memory for ondevice random generation
//CUDA_RT_CALL(hipMalloc((void**)&d_rngStates[gpuDevice], scenarios_gpus * BLOCK_SIZE * sizeof(hiprandState_t)));
partial_exposure[gpuDevice] = (real*)malloc(TIMEPOINTS * sizeof(real));
// copy accrual, spot_rates, drifts, volatilites as marketData and copy to device constant memory
CUDA_RT_CALL(hipMemcpyToSymbol(d_accrual, accrual, curveSizeBytes));
CUDA_RT_CALL(hipMemcpyToSymbol(d_spot_rates, spot_rates, curveSizeBytes));
CUDA_RT_CALL(hipMemcpyToSymbol(d_drifts, drifts, curveSizeBytes));
CUDA_RT_CALL(hipMemcpyToSymbol(d_volatilities, volatilities, VOL_DIM * curveSizeBytes));
// initialize array structures
CUDA_RT_CALL(hipMemset(accum_rates[gpuDevice], 0, scenarios_gpus * TIMEPOINTS * sizeof(real)));
CUDA_RT_CALL(hipMemset(simulated_rates0[gpuDevice], 0, scenarios_gpus * TIMEPOINTS * sizeof(real)));
CUDA_RT_CALL(hipMemset(d_y[gpuDevice], 0, TIMEPOINTS * sizeof(real)));
cudaMemsetValue(d_x[gpuDevice], scenarios_gpus, 1.0f);
}
auto t_end = std::chrono::high_resolution_clock::now();
double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end - t_start).count(); \
printf("%f data_initializing %f (ms) \n", elapsed_time_ms);
roctxRangePop();
// random generation
roctxRangePush("gpu_random_generation");
// TODO - Use different streams for each openmp threads (4)
#ifdef MULTI_GPU_SIMULATION
#pragma omp parallel num_threads(num_gpus)
{
int gpuDevice = omp_get_thread_num();
#else
int gpuDevice = 0;
#endif
hipSetDevice(gpuDevice);
// create Random Numbers (change seed by adding the gpuDevice)
unsigned long long offset = gpuDevice * rnd_count;
TIMED_RT_CALL(
initRNG2_kernel(rngs[gpuDevice], rngNrmVar[gpuDevice], seed, offset, rnd_count, mean, _stddev), "normal variate generation"
);
#ifdef MULTI_GPU_SIMULATION
}
#endif
roctxRangePop();
// risk factor evolution
#ifdef MULTI_GPU_SIMULATION
#pragma omp parallel num_threads(num_gpus)
{
int gpuDevice = omp_get_thread_num();
#endif
hipSetDevice(gpuDevice);
// Kernel Execution Parameters
int N = 1;
int blockSize = N * BLOCK_SIZE;
int numberOfPoints = N * TIMEPOINTS;
int gridSize = scenarios_gpus / N;
roctxRangePush("Simulation");
#ifdef SHARED_MEMORY_OPTIMIZATION
TIMED_RT_CALL(
riskFactorSim4(
gridSize,
blockSize,
numberOfPoints,
d_numeraire[gpuDevice],
rngNrmVar[gpuDevice],
simulated_rates[gpuDevice],
simulated_rates0[gpuDevice],
accum_rates[gpuDevice],
pathN,
payOff.dtau,
dt
),
"Execution Time Partial HJM MC simulation"
);
#else
TIMED_RT_CALL(
riskFactorSim(
gridSize,
blockSize,
numberOfPoints,
d_numeraire[gpuDevice],
rngNrmVar[gpuDevice],
simulated_rates[gpuDevice],
simulated_rates0[gpuDevice],
accum_rates[gpuDevice],
pathN,
payOff.dtau,
dt
),
"Execution Time Partial HJM MC simulation"
);
#endif
// TRACE main
roctxRangePop();
roctxRangePush("Pricing");
// Exposure Profile Calculation TODO (d_exposures + gpuDevice * TIMEPOINTS)
// Apply Scan algorithm here
TIMED_RT_CALL(
exposureCalculation(scenarios_gpus, BLOCK_SIZE, d_exposures[gpuDevice], d_numeraire[gpuDevice], payOff.notional, payOff.K, scenarios_gpus),
"exposure calculation"
);
roctxRangePop();
//Replace all this block by a column reduction of the matrix
// Partial Expected Exposure Calculation and scattered across gpus
roctxRangePushA("Exposure");
TIMED_RT_CALL(
__expectedexposure_calc_kernel(partial_exposure[gpuDevice], d_exposures[gpuDevice], d_x[gpuDevice], d_y[gpuDevice], cublas_handle[gpuDevice], scenarios_gpus),
"partial expected exposure profile"
);
roctxRangePop();
#ifdef MULTI_GPU_SIMULATION
}
#endif
roctxRangePop();
roctxRangePush("Expected Exposure");
// collect partial results and reduce them
memset(expected_exposure, 0.0f, TIMEPOINTS * sizeof(real));
CUDA_RT_CALL(hipMemcpy(partial_exposure[0], d_y[0], TIMEPOINTS * sizeof(real), hipMemcpyDeviceToHost));
#ifdef MULTI_GPU_SIMULATION
for (int gpuDevice = 1; gpuDevice < num_gpus; gpuDevice++) {
CUDA_RT_CALL(hipMemcpy(partial_exposure[gpuDevice], d_y[gpuDevice], TIMEPOINTS * sizeof(real), hipMemcpyDeviceToHost));
#ifdef EXPECTED_EXPOSURE_DEBUG
printf("Exposure Profile\n");
for (int t = 0; t < TIMEPOINTS; t++) {
printf("%1.4f ", partial_exposure[gpuDevice][t]);
}
printf("\n");
#endif
vAdd(TIMEPOINTS, partial_exposure[0], partial_exposure[gpuDevice], partial_exposure[0]);
}
#endif
// average the result partial summation of exposure profiles
real avg = 1.0f / (real)scenarios;
saxpy(TIMEPOINTS, avg, partial_exposure[0], expected_exposure);
roctxRangePop();
// TRACE main
// free up resources
#pragma omp parallel num_threads(num_gpus)
//for (int gpuDevice = 0; gpuDevice < num_gpus; gpuDevice++) {
{
int gpuDevice = omp_get_thread_num();
hipSetDevice(gpuDevice);
#ifdef STREAM_ACC
for (int i = 0; i < nstreams; i++)
{
CUDA_RT_CALL(hipStreamDestroy(streams[i]));
}
#endif
CUBLAS_CALL(hipblasDestroy( cublas_handle[gpuDevice] ));
CURAND_CALL(hiprandDestroyGenerator(rngs[gpuDevice])); //
if (rngNrmVar[gpuDevice]) {
CUDA_RT_CALL(hipFree(rngNrmVar[gpuDevice]));
}
if (d_numeraire[gpuDevice]) {
CUDA_RT_CALL(hipFree(d_numeraire[gpuDevice]));
}
if (d_exposures[gpuDevice]) {
CUDA_RT_CALL(hipFree(d_exposures[gpuDevice]));
}
if (simulated_rates[gpuDevice]) {
CUDA_RT_CALL(hipFree(simulated_rates[gpuDevice]));
}
if (simulated_rates0[gpuDevice]) {
CUDA_RT_CALL(hipFree(simulated_rates0[gpuDevice]));
}
if (accum_rates[gpuDevice]) {
CUDA_RT_CALL(hipFree(accum_rates[gpuDevice]));
}
if (d_x[gpuDevice]) {
CUDA_RT_CALL(hipFree(d_x[gpuDevice]));
}
if (d_y[gpuDevice]) {
CUDA_RT_CALL(hipFree(d_y[gpuDevice]));
}
if (partial_exposure[gpuDevice]) {
free(partial_exposure[gpuDevice]);
}
}
roctxRangePop();
}
void calculateExposureMultiGPU(double* expected_exposure, InterestRateSwap<double> payOff, double* accrual, double* spot_rates, double* drifts, double* volatilities, double scale, const int num_gpus, int scenarios, double dt) {
__calculateExposureMultiGPU<double, double2>(expected_exposure, payOff, accrual, spot_rates, drifts, volatilities, scale, num_gpus, scenarios, dt);
}
void calculateExposureMultiGPU(float* expected_exposure, InterestRateSwap<float> payOff, float* accrual, float* spot_rates, float* drifts, float* volatilities, float scale, const int num_gpus, int scenarios, float dt) {
__calculateExposureMultiGPU<float, float2>(expected_exposure, payOff, accrual, spot_rates, drifts, volatilities, scale, num_gpus, scenarios, dt);
}
| 7f0f874938352deb7fe90c782da32f829bbd4704.cu |
#include <cublas_v2.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include <omp.h>
#include <thread>
#include <thrust\device_vector.h>
#include "mkl.h"
#include <nvtx3\nvToolsExt.h>
#include <iostream>
using std::cout; using std::endl;
using std::chrono::duration_cast;
using std::chrono::milliseconds;
using std::chrono::seconds;
using std::chrono::system_clock;
#include "simulation_gpu.h"
#include "scan_gpu.h"
#define FULL_MASK 0xffffffff
#define TILE_DIM 51
#define TIMEPOINTS 51
#define VOL_DIM 3
#define BLOCKSIZE 32
#define BLOCK_SIZE 64
#define WARPSIZE 32
#define MAX_BLOCK_SZ 256
#define BATCH_SZ 1000
//#define double_ACC 1
//#define EXPECTED_EXPOSURE_DEBUG1 1
#undef HJM_SDE_DEBUG
#define MC_RDM_DEBUG
#undef HJM_PATH_SIMULATION_DEBUG
#undef HJM_NUMERAIRE_DEBUG
#undef EXPOSURE_PROFILES_DEBUG
#define DEV_CURND_HOSTGEN
#undef EXPOSURE_PROFILES_AGGR_DEBUG
#define EXPECTED_EXPOSURE_DEBUG
#define CONST_MEMORY
#define RNG_HOST_API
#undef RNG_DEV_API
#define UM_HINTS
#define TIME_COUNTERS
#define MULTI_GPU_SIMULATION1
#define OPT_SHARED_MEMORY1
//#define SINGLE_PRECISION
#define DOUBLE_PRECISION
//#define SHARED_MEMORY_OPTIMIZATION
//#define CUDA_SYNCHR_OPTIMIZATION
#define MULTI_GPU_SIMULATION
#define CUDA_SYNC
//#define double double
#define CUDA_RT_CALL(call) \
{ \
cudaError_t cudaStatus = call; \
if (cudaSuccess != cudaStatus) \
fprintf(stderr, \
"ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \
"with " \
"%s (%d).\n", \
#call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \
}
#define TIMED_RT_CALL(x, y) \
{ \
{auto t_start = std::chrono::high_resolution_clock::now(); \
x; \
auto t_end = std::chrono::high_resolution_clock::now(); \
double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end - t_start).count(); \
printf("%s %f (ms) \n", y , elapsed_time_ms); }\
\
}
#define CURAND_CALL(x) \
{ \
if((x)!=CURAND_STATUS_SUCCESS) \
printf("ERROR: CURAND call at %s:%d\n",__FILE__,__LINE__);\
\
}
#define CUBLAS_CALL(x) \
{ \
if((x)!=CUBLAS_STATUS_SUCCESS) \
printf("ERROR: CUBLAS call at %s:%d\n",__FILE__,__LINE__);\
\
}
#ifdef DOUBLE_PRECISION
__constant__ double d_accrual[TIMEPOINTS];
__constant__ double d_spot_rates[TIMEPOINTS];
__constant__ double d_drifts[TIMEPOINTS];
__constant__ double d_volatilities[VOL_DIM * TIMEPOINTS];
#else
__constant__ float d_accrual[TIMEPOINTS];
__constant__ float d_spot_rates[TIMEPOINTS];
__constant__ float d_drifts[TIMEPOINTS];
__constant__ float d_volatilities[VOL_DIM * TIMEPOINTS];
#endif
/*
* MarketData Struct
*/
struct MarketData {
double* accrual;
double* spot_rates;
double* drifts;
double* volatilities;
};
/*
* CUDA utility function
*/
void cudaMemsetValue(double *buffer, int N, double initial_value) {
thrust::device_ptr<double> dev_ptr(buffer);
thrust::fill(dev_ptr, dev_ptr + N, initial_value);
}
void cudaMemsetValue(float* buffer, int N, float initial_value) {
thrust::device_ptr<float> dev_ptr(buffer);
thrust::fill(dev_ptr, dev_ptr + N, initial_value);
}
/*
* Musiela Parametrization SDE
* We simulate the SDE f(t+dt)=f(t) + dfbar
* where SDE dfbar = m(t)*dt+SUM(Vol_i*phi[i]*SQRT(dt))+dF/dtau*dt and phi ~ N(0,1)
*/
__device__ __forceinline__ float __musiela_sde2(float drift, float vol0, float vol1, float vol2, float phi0, float phi1, float phi2, float sqrt_dt, float dF, float rate0, float dtau, float dt) {
float vol_sum = vol0 * phi0;
vol_sum += vol1 * phi1;
vol_sum += vol2 * phi2;
vol_sum *= sqrtf(dt);
float dfbar = drift * dt;
dfbar += vol_sum;
dfbar += (dF / dtau) * dt;
// apply Euler Maruyana
double result = rate0 + dfbar;
return result;
}
template<typename real>
__device__ __forceinline__ double __musiela_sde2(double drift, double vol0, double vol1, double vol2, double phi0, double phi1, double phi2, double sqrt_dt, double dF, double rate0, double dtau, double dt) {
double vol_sum = vol0 * phi0;
vol_sum += vol1 * phi1;
vol_sum += vol2 * phi2;
vol_sum *= sqrtf(dt);
double dfbar = drift * dt;
dfbar += vol_sum;
dfbar += (dF / dtau) * dt;
// apply Euler Maruyana
double result = rate0 + dfbar;
return result;
}
/**
* * RNG init Kernel
*/
#ifdef RNG_HOST_API
void initRNG2_kernel(curandGenerator_t generator, double* rngNrmVar, const unsigned int seed, unsigned long long offset, int rnd_count, const double mean, const double stddev)
{
//curandGenerator_t generator;
//CURAND_CALL(curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT));
//CURAND_CALL(curandSetPseudoRandomGeneratorSeed(generator, seed));
//CURAND_CALL(curandSetGeneratorOffset(generator, offset));
CURAND_CALL(curandGenerateNormalDouble(generator, rngNrmVar, rnd_count, mean, stddev));
CUDA_RT_CALL(cudaDeviceSynchronize());
//CURAND_CALL(curandDestroyGenerator(generator));
}
void initRNG2_kernel(curandGenerator_t generator, float* rngNrmVar, const unsigned int seed, unsigned long long offset, int rnd_count, const double mean, const double stddev)
{
//curandGenerator_t generator;
//CURAND_CALL(curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT));
//CURAND_CALL(curandSetPseudoRandomGeneratorSeed(generator, seed));
//CURAND_CALL(curandSetGeneratorOffset(generator, offset));
CURAND_CALL(curandGenerateNormal(generator, rngNrmVar, rnd_count, mean, stddev));
CUDA_RT_CALL(cudaDeviceSynchronize());
//CURAND_CALL(curandDestroyGenerator(generator));
}
#else
__global__ void initRNG2_kernel(curandStateMRG32k3a* const rngStates, const unsigned int seed, int rnd_count)
{
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
for (; index < rnd_count; index += blockDim.x * gridDim.x) {
curand_init(seed, index, 0, &rngStates[index]);
}
}
#endif
/*
* Random initialization on device
*/
__global__ void initRNG2_kernel_ondevice(curandStateMRG32k3a* const rngStates, const unsigned int seed, int rnd_count)
{
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
for (; index < rnd_count; index += blockDim.x * gridDim.x) {
curand_init(seed, index, 0, &rngStates[index]);
}
}
__global__ void initRNG(curandState* const rngStates, const unsigned int seed, int offset)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(seed, tid, offset, &rngStates[tid]);
}
/*
* Monte Carlo HJM Path Generation Constant Memory
*/
__global__
void __generatePaths_kernelOld(double2* numeraires,
void* rngNrmVar,
double* simulated_rates, double* simulated_rates0, double* accum_rates,
const int pathN, int path,
double dtau = 0.5, double dt = 0.01)
{
// calculated rate
double rate;
double sum_rate;
// Simulation Parameters
int stride = dtau / dt; //
const double sqrt_dt = sqrtf(dt);
int t = threadIdx.x;
int gindex = blockIdx.x * TIMEPOINTS + threadIdx.x;
#ifdef RNG_HOST_API
double phi0;
double phi1;
double phi2;
#else
__shared__ double phi0;
__shared__ double phi1;
__shared__ double phi2;
#endif
// Evolve the whole curve from 0 to T ( 1:1 mapping t with threadIdx.x)
if (t < TIMEPOINTS)
{
if (path == 0) {
rate = d_spot_rates[t];
}
else {
// Calculate dF term in Musiela Parametrization SDE
double dF = 0;
if (t == (TIMEPOINTS - 1)) {
dF = simulated_rates[gindex] - simulated_rates[gindex - 1];
}
else {
dF = simulated_rates[gindex + 1] - simulated_rates[gindex];
}
// Normal random variates
#ifdef RNG_HOST_API
double *rngNrms = (double*)rngNrmVar;
int rndIdx = blockIdx.x * pathN * VOL_DIM + path * VOL_DIM;
phi0 = rngNrms[rndIdx];
phi1 = rngNrms[rndIdx + 1];
phi2 = rngNrms[rndIdx + 2];
#else
if (threadIdx.x == 0) {
curandStateMRG32k3a *state = (curandStateMRG32k3a*) rngNrmVar;
curandStateMRG32k3a localState = state[blockIdx.x];
phi0 = curand_uniform(&localState);
phi1 = curand_uniform(&localState);
phi2 = curand_uniform(&localState);
state[blockIdx.x] = localState;
}
__syncthreads();
#endif
// simulate the sde
rate = __musiela_sde2(
d_drifts[t],
d_volatilities[t],
d_volatilities[TIMEPOINTS + t],
d_volatilities[TIMEPOINTS * 2 + t],
phi0,
phi1,
phi2,
sqrt_dt,
dF,
simulated_rates[gindex],
dtau,
dt
);
}
#ifdef HJM_PATH_SIMULATION_DEBUG
printf("Path %d Block %d Thread %d index %d Forward Rate %f phi0 %f phi1 %f phi2 %f \n", path, blockIdx.x, threadIdx.x, gindex, rate, phi0, phi1, phi2);
#endif
// accumulate rate for discount calculation
sum_rate = accum_rates[gindex];
sum_rate += rate;
accum_rates[gindex] = sum_rate;
// store the simulated rate
simulated_rates0[gindex] = rate; //
// update numeraire based on simulation block
if (path % stride == 0) {
if (t == (path / stride)) {
numeraires[gindex].x = rate;
numeraires[gindex].y = __expf(-sum_rate * dt);
#ifdef HJM_NUMERAIRE_DEBUG
printf("Path %d Block %d Thread %d index %d Forward Rate %f Discount %f\n", path, blockIdx.x, threadIdx.x, gindex, rate, __expf(-sum_rate * dt));
#endif
}
}
}
}
/*
* Monte Carlo HJM Path Generation Constant Memory & BlockSize multiple of TIMEPOINTS
*/
template <typename real, typename real2>
__global__
void __generatePaths_kernel(
int numberOfPoints,
real2* numeraires,
real* rngNrmVar,
real* simulated_rates,
real* simulated_rates0,
real* accum_rates,
const int pathN, int path,
real dtau = 0.5, double dt = 0.01)
{
// calculated rate
real rate;
real sum_rate;
#ifdef RNG_HOST_API
real phi0;
real phi1;
real phi2;
#endif
// Simulation Parameters
int stride = dtau / dt; //
real sqrt_dt = sqrtf(dt);
int t = threadIdx.x % TIMEPOINTS;
int gindex = blockIdx.x * numberOfPoints + threadIdx.x;
// Evolve the whole curve from 0 to T ( 1:1 mapping t with threadIdx.x)
if ((threadIdx.x < numberOfPoints) && (gindex < gridDim.x * numberOfPoints))
{
if (path == 0) {
rate = d_spot_rates[t];
}
else {
// Calculate dF term in Musiela Parametrization SDE
real dF = 0;
if (t == (TIMEPOINTS - 1)) {
dF = simulated_rates[gindex] - simulated_rates[gindex - 1];
}
else {
dF = simulated_rates[gindex + 1] - simulated_rates[gindex];
}
// Normal random variates
#ifdef RNG_HOST_API
real* rngNrms = (real*)rngNrmVar;
int rndIdx = blockIdx.x * pathN * VOL_DIM + path * VOL_DIM;
phi0 = rngNrms[rndIdx];
phi1 = rngNrms[rndIdx + 1];
phi2 = rngNrms[rndIdx + 2];
#else
if (threadIdx.x == 0) {
curandStateMRG32k3a* state = (curandStateMRG32k3a*)rngNrmVar;
curandStateMRG32k3a localState = state[blockIdx.x];
phi0 = curand_uniform(&localState);
phi1 = curand_uniform(&localState);
phi2 = curand_uniform(&localState);
state[blockIdx.x] = localState;
}
__syncthreads();
#endif
// simulate the sde
rate = __musiela_sde2(
d_drifts[t],
d_volatilities[t],
d_volatilities[TIMEPOINTS + t],
d_volatilities[TIMEPOINTS * 2 + t],
phi0,
phi1,
phi2,
sqrt_dt,
dF,
simulated_rates[gindex],
dtau,
dt
);
}
#ifdef HJM_PATH_SIMULATION_DEBUG
printf("Path %d Block %d Thread %d index %d Forward Rate %f phi0 %f phi1 %f phi2 %f \n", path, blockIdx.x, threadIdx.x, gindex, rate, phi0, phi1, phi2);
#endif
// accumulate rate for discount calculation
sum_rate = accum_rates[gindex];
sum_rate += rate;
accum_rates[gindex] = sum_rate;
// store the simulated rate
simulated_rates0[gindex] = rate; //
// update numeraire based on simulation block
if (path % stride == 0) {
if (t == (path / stride)) {
numeraires[gindex].x = rate;
numeraires[gindex].y = __expf(-sum_rate * dt);
#ifdef HJM_NUMERAIRE_DEBUG
printf("Path %d Block %d Thread %d index %d Forward Rate %f Discount %f\n", path, blockIdx.x, threadIdx.x, gindex, rate, __expf(-sum_rate * dt));
#endif
}
}
}
}
/**
* Shared Memory & Global Access Memory optimizations & block simulation
*/
template <typename real, typename real2>
__global__
void __generatePaths_kernel4(
int numberOfPoints,
real2* numeraires,
real* rngNrmVar,
real* simulated_rates,
real* simulated_rates0,
real* accum_rates,
const int pathN,
int path,
real dtau = 0.5, real dt = 0.01)
{
// calculated rate
real rate;
real sum_rate = 0;
real phi0;
real phi1;
real phi2;
__shared__ real _ssimulated_rates[BLOCK_SIZE];
// Simulation Parameters
int stride = dtau / dt; //
real sqrt_dt = sqrtf(dt);
//int t = threadIdx.x % TIMEPOINTS;
int t = threadIdx.x;
int gindex = blockIdx.x * numberOfPoints + threadIdx.x;
// load the accumulated rate for a given timepoint
sum_rate = accum_rates[gindex];
// load the latest simulated rate from global memory
if ((threadIdx.x < numberOfPoints) && (gindex < gridDim.x * numberOfPoints)) {
if ( path > 0) {
_ssimulated_rates[threadIdx.x] = simulated_rates[gindex];
}
}
__syncthreads();
//
for (int s = 0; s < stride; s++)
{
if ((threadIdx.x < numberOfPoints) && (gindex < gridDim.x * numberOfPoints))
{
if (path == 0) {
rate = d_spot_rates[t];
}
else {
// Calculate dF term in Musiela Parametrization SDE
real dF = 0;
if (t == (TIMEPOINTS - 1)) {
dF = _ssimulated_rates[threadIdx.x] - _ssimulated_rates[threadIdx.x - 1];
}
else {
dF = _ssimulated_rates[threadIdx.x + 1] - _ssimulated_rates[threadIdx.x];
}
// Normal random variates broadcast if access same memory location in shared memory
real* rngNrms = (real*)rngNrmVar;
int rndIdx = blockIdx.x * pathN * VOL_DIM + (path + s)* VOL_DIM;
phi0 = rngNrms[rndIdx];
phi1 = rngNrms[rndIdx + 1];
phi2 = rngNrms[rndIdx + 2];
// simulate the sde
rate = __musiela_sde2(
d_drifts[t],
d_volatilities[t],
d_volatilities[TIMEPOINTS + t],
d_volatilities[TIMEPOINTS * 2 + t],
phi0,
phi1,
phi2,
sqrt_dt,
dF,
_ssimulated_rates[threadIdx.x],
dtau,
dt
);
}
// accumulate rate for discount calculation
sum_rate += rate;
}
__syncthreads();
if ((threadIdx.x < numberOfPoints) && (gindex < gridDim.x * numberOfPoints))
{
_ssimulated_rates[threadIdx.x] = rate;
}
__syncthreads();
}
// update the rates and the rate summation for the next simulation block
if ((threadIdx.x < numberOfPoints) && (gindex < gridDim.x * numberOfPoints))
{
simulated_rates0[gindex] = rate;
accum_rates[gindex] = sum_rate;
}
// update numeraire based on simulation block
if ( t == (path + stride) / stride ) {
numeraires[gindex].x = rate; // forward rate
#ifdef double_ACC
numeraires[gindex].y = expf(-sum_rate * dt);
#else
numeraires[gindex].y = exp(-sum_rate * dt);
#endif
}
}
/*
* Risk Factor Generation block simulation with Shared Memory
*/
void riskFactorSim4(
int gridSize,
int blockSize,
int numberOfPoints,
double2* numeraires,
double* rngNrmVar,
double* simulated_rates,
double* simulated_rates0,
double* accum_rates,
const int pathN,
double dtau = 0.5,
double dt = 0.01)
{
int simBlockSize = dtau / dt;
for (int path = 0; path < pathN; path += simBlockSize)
{
__generatePaths_kernel4 <<< gridSize, blockSize >>> (
numberOfPoints,
numeraires,
rngNrmVar,
simulated_rates,
simulated_rates0,
accum_rates,
pathN,
path,
dtau,
dt
);
#ifdef CUDA_SYNC
CUDA_RT_CALL(cudaDeviceSynchronize());
#endif
// update simulated rates (swap pointers)
std::swap(simulated_rates, simulated_rates0);
}
}
void riskFactorSim4(
int gridSize,
int blockSize,
int numberOfPoints,
float2* numeraires,
float* rngNrmVar,
float* simulated_rates,
float* simulated_rates0,
float* accum_rates,
const int pathN,
float dtau = 0.5,
float dt = 0.01)
{
int simBlockSize = dtau / dt;
for (int path = 0; path < pathN; path += simBlockSize)
{
__generatePaths_kernel4 <<< gridSize, blockSize >>> (
numberOfPoints,
numeraires,
rngNrmVar,
simulated_rates,
simulated_rates0,
accum_rates,
pathN,
path,
dtau,
dt
);
#ifdef CUDA_SYNC
CUDA_RT_CALL(cudaDeviceSynchronize());
#endif
// update simulated rates (swap pointers)
std::swap(simulated_rates, simulated_rates0);
}
}
/*
* Run the riskFactor Simulation using CUDA Streams
*/
void riskFactorSimStream(int gridSize, int blockSize, int numberOfPoints,
double2* numeraires,
double* rngNrmVar,
double* simulated_rates,
double* simulated_rates0,
double* accum_rates,
const int pathN,
int nstreams,
int operPerStream,
cudaStream_t* streams,
double dtau = 0.5, double dt = 0.01)
{
int blockPerStream = gridSize / nstreams;
int repBlock = blockPerStream / operPerStream;
int simBlockSize = dtau / dt;
for (int i = 0; i < blockPerStream; i += operPerStream)
{
for (int path = 0; path < pathN; path += simBlockSize)
{
for (int b = 0; b < repBlock; b++)
{
for (int s = 0; s < nstreams; s++)
{
__generatePaths_kernel4 << < repBlock, blockSize, 0, streams[s] >> > (
numberOfPoints,
numeraires + (s * blockPerStream + b * operPerStream) * numberOfPoints,
rngNrmVar + (s * blockPerStream + b * operPerStream) * pathN * 3,
simulated_rates + (s * blockPerStream + b* operPerStream) * numberOfPoints,
simulated_rates0 + (s * blockPerStream + b * operPerStream) * numberOfPoints,
accum_rates,
pathN,
path,
dtau,
dt
);
}
}
// update simulated rates (swap pointers)
std::swap(simulated_rates, simulated_rates0);
}
}
}
/*
* Risk Factor Generation naive acceleration
*/
void riskFactorSim(
int gridSize,
int blockSize,
int numberOfPoints,
double2* numeraires,
double* rngNrmVar,
double* simulated_rates,
double* simulated_rates0,
double* accum_rates,
const int pathN,
double dtau = 0.5,
double dt = 0.01)
{
for (int path = 0; path < pathN; path++)
{
__generatePaths_kernel <<< gridSize, blockSize >>> (
numberOfPoints,
numeraires,
rngNrmVar,
simulated_rates,
simulated_rates0,
accum_rates,
pathN,
path,
dtau,
dt
);
CUDA_RT_CALL(cudaDeviceSynchronize());
// update simulated rates (swap pointers)
std::swap(simulated_rates, simulated_rates0);
}
}
void riskFactorSim(
int gridSize,
int blockSize,
int numberOfPoints,
float2* numeraires,
float* rngNrmVar,
float* simulated_rates,
float* simulated_rates0,
float* accum_rates,
const int pathN,
float dtau = 0.5,
float dt = 0.01)
{
for (int path = 0; path < pathN; path++)
{
__generatePaths_kernel << < gridSize, blockSize >> > (
numberOfPoints,
numeraires,
rngNrmVar,
simulated_rates,
simulated_rates0,
accum_rates,
pathN,
path,
dtau,
dt
);
CUDA_RT_CALL(cudaDeviceSynchronize());
// update simulated rates (swap pointers)
std::swap(simulated_rates, simulated_rates0);
}
}
/*
* Exposure generation kernel
* one to one mapping between threadIdx.x and tenor
*/
template <typename real, typename real2>
__global__
void _exposure_calc_kernel(real* exposure, real2* numeraires, real notional, real K, int simN, real dtau = 0.5f)
{
__shared__ real cash_flows[TIMEPOINTS];
real discount_factor;
real forward_rate;
real libor;
real cash_flow;
real sum = 0.0;
real m = (1.0 / dtau);
int globaltid = blockIdx.x * TIMEPOINTS + threadIdx.x;
// calculate and load the cash flow in shared memory
if (threadIdx.x < TIMEPOINTS) {
forward_rate = numeraires[globaltid].x;
#ifdef SINGLE_PRECISION
libor = m * (expf(forward_rate / m) - 1.0);
#else
libor = m * (exp(forward_rate / m) - 1.0);
#endif
discount_factor = numeraires[globaltid].y;
cash_flow = discount_factor * notional * d_accrual[threadIdx.x] * (libor - K);
cash_flows[threadIdx.x] = cash_flow;
#ifdef EXPOSURE_PROFILES_DEBUG
printf("Block %d Thread %d Forward Rate %f libor %f Discount %f CashFlow %f \n", blockIdx.x, threadIdx.x, forward_rate, libor, discount_factor, cash_flow);
#endif
}
__syncthreads();
#ifdef EXPOSURE_PROFILES_DEBUG2
if (threadIdx.x == 0) {
for (int t = 0; t < TIMEPOINTS; t++) {
printf("t - indext %d CashFlow %f \n", t, cash_flows[t]);
}
}
#endif
// calculate the exposure profile
if ( threadIdx.x < TIMEPOINTS )
{
for (int t = threadIdx.x + 1; t < TIMEPOINTS; t++) {
sum += cash_flows[t];
}
sum = (sum > 0.0) ? sum : 0.0;
exposure[globaltid] = sum;
#ifdef EXPOSURE_PROFILES_DEBUG
printf("Block %d Thread %d Exposure %f \n", blockIdx.x, threadIdx.x, sum);
#endif
}
__syncthreads();
}
/*
* Exposure calculation
*/
void exposureCalculation(int gridSize, int blockSize, double *d_exposures, double2 *d_numeraire, double notional, double K, int scenarios) {
_exposure_calc_kernel <<< gridSize, blockSize >>> (d_exposures, d_numeraire, notional, K, scenarios);
#ifdef CUDA_SYNC
CUDA_RT_CALL(cudaDeviceSynchronize());
#endif
}
void exposureCalculation(int gridSize, int blockSize, float* d_exposures, float2* d_numeraire, float notional, float K, int scenarios) {
_exposure_calc_kernel << < gridSize, blockSize >> > (d_exposures, d_numeraire, notional, K, scenarios);
#ifdef CUDA_SYNC
CUDA_RT_CALL(cudaDeviceSynchronize());
#endif
}
/*
* Calculate Expected Exposure Profile
* 2D Aggregation using cublas sgemv
*/
void __expectedexposure_calc_kernel(float* expected_exposure, float* exposures, float *d_x, float *d_y, cublasHandle_t handle, int exposureCount) {
float alpha = 1.;
float beta = 1. ;
float cols = (float) TIMEPOINTS;
float rows = (float) exposureCount;
// Apply matrix x identity vector (all 1) to do a column reduction by rows
CUBLAS_CALL(cublasSgemv(handle, CUBLAS_OP_N, cols, rows, &alpha, exposures, cols, d_x, 1, &beta, d_y, 1));
#ifdef CUDA_SYNC
CUDA_RT_CALL(cudaDeviceSynchronize());
#endif
#ifdef DEV_CURND_HOSTGEN1
printf("Exposure 2D Matrix Aggregation by Cols \n");
printf("Matrix Cols (%d) Rows(%d) x Vector (%d) in elapsed time %f ms \n", TIMEPOINTS, simN, simN, elapsed_time);
printf("Effective Bandwidth: %f GB/s \n", 2 * TIMEPOINTS * simN * 4 / elapsed_time / 1e6);
#endif
}
void __expectedexposure_calc_kernel(double* expected_exposure, double* exposures, double* d_x, double* d_y, cublasHandle_t handle, int exposureCount) {
double alpha = 1.;
double beta = 1.;
double cols = (double)TIMEPOINTS;
double rows = (double)exposureCount;
// Apply matrix x identity vector (all 1) to do a column reduction by rows
CUBLAS_CALL(cublasDgemv(handle, CUBLAS_OP_N, cols, rows, &alpha, exposures, cols, d_x, 1, &beta, d_y, 1));
//CUDA_RT_CALL(cudaMemcpy(expected_exposure, d_y, TIMEPOINTS * sizeof(double), cudaMemcpyDeviceToHost));
#ifdef CUDA_SYNC
CUDA_RT_CALL(cudaDeviceSynchronize());
#endif
#ifdef DEV_CURND_HOSTGEN1
printf("Exposure 2D Matrix Aggregation by Cols \n");
printf("Matrix Cols (%d) Rows(%d) x Vector (%d) in elapsed time %f ms \n", TIMEPOINTS, simN, simN, elapsed_time);
printf("Effective Bandwidth: %f GB/s \n", 2 * TIMEPOINTS * simN * 4 / elapsed_time / 1e6);
#endif
}
/*
Exposure Calculation Kernel Invocation
*/
template <typename real>
void vAdd(int size, real *a, real* b, real* c) {
for (int i = 0; i < size; i++) {
c[i] = a[i] + b[i];
}
}
template <typename real>
void saxpy(int size, real multiplier, real *a, real *b) {
for (int i = 0; i < size; i++) {
b[i] = multiplier * a[i];
}
}
template <typename real, typename real2>
void __calculateExposureMultiGPU(real* expected_exposure, InterestRateSwap<real> payOff, real* accrual, real* spot_rates, real* drifts, real* volatilities, real scale, const int num_gpus, int scenarios, real dt) {
std::vector<real*> rngNrmVar(num_gpus);
const int pathN = payOff.expiry / dt; // 25Y requires 2500 simulations
int scenarios_gpus = scenarios / num_gpus; // total work distribution across gpus
int rnd_count = scenarios_gpus * VOL_DIM * pathN;
const unsigned int seed = 1234ULL;
real mean = 0.0;
real _stddev = 1.0;
const int curveSizeBytes = TIMEPOINTS * sizeof(real); // Total memory occupancy for 51 timepoints
std::cout << scenarios_gpus << " " << num_gpus << " pathN" << pathN << " dt " << dt << std::endl;
// intermediate & final results memory reservation on device data
std::vector<real2*> d_numeraire(num_gpus);
std::vector<real*> d_exposures(num_gpus);
std::vector<real*> simulated_rates(num_gpus);
std::vector<real*> simulated_rates0(num_gpus);
std::vector<real*> accum_rates(num_gpus);
std::vector<real*> d_x(num_gpus);
std::vector<real*> d_y(num_gpus);
std::vector<real*> partial_exposure(num_gpus);
std::vector<cublasHandle_t> cublas_handle(num_gpus);
std::vector<curandGenerator_t> rngs(num_gpus);
std::vector<curandGenerator_t> d_rngStates(num_gpus);
nvtxRangePush("total_execution_time");
nvtxRangePushA("data_initializing");
auto t_start = std::chrono::high_resolution_clock::now();
// memory allocation
#pragma omp parallel num_threads(num_gpus)
//for (int gpuDevice = 0; gpuDevice < num_gpus; gpuDevice++)
{
int gpuDevice = omp_get_thread_num();
cudaSetDevice(gpuDevice);
// Reserve on device memory structures
CUDA_RT_CALL(cudaMalloc((void**)&simulated_rates[gpuDevice], scenarios_gpus * TIMEPOINTS * sizeof(real)));
CUDA_RT_CALL(cudaMalloc((void**)&simulated_rates0[gpuDevice], scenarios_gpus * TIMEPOINTS * sizeof(real)));
CUDA_RT_CALL(cudaMalloc((void**)&rngNrmVar[gpuDevice], rnd_count * sizeof(real)));
CUDA_RT_CALL(cudaMalloc((void**)&d_numeraire[gpuDevice], scenarios_gpus * TIMEPOINTS * sizeof(real2))); // Numeraire (discount_factor, forward_rates)
CUDA_RT_CALL(cudaMalloc((void**)&d_exposures[gpuDevice], scenarios_gpus * TIMEPOINTS * sizeof(real))); // Exposure profiles
CUDA_RT_CALL(cudaMalloc((void**)&accum_rates[gpuDevice], scenarios_gpus * TIMEPOINTS * sizeof(real)));
CUDA_RT_CALL(cudaMalloc((void**)&d_x[gpuDevice], scenarios_gpus * sizeof(real)));
CUDA_RT_CALL(cudaMalloc((void**)&d_y[gpuDevice], TIMEPOINTS * sizeof(real)));
//
unsigned long long offset = gpuDevice * rnd_count;
curandGenerator_t rng = rngs[gpuDevice];
CURAND_CALL(curandCreateGenerator(&rngs[gpuDevice], CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(rngs[gpuDevice], seed));
CURAND_CALL(curandSetGeneratorOffset(rngs[gpuDevice], offset));
CUBLAS_CALL(cublasCreate(&cublas_handle[gpuDevice]));
#ifdef STREAM_ACC
// allocate and initialize an array of stream handles
int nstreams = 16;
cudaStream_t* streams = (cudaStream_t*)malloc(nstreams * sizeof(cudaStream_t));
for (int i = 0; i < nstreams; i++)
{
CUDA_RT_CALL(cudaStreamCreate(&(streams[i])));
}
#endif
// Reserve memory for ondevice random generation
//CUDA_RT_CALL(cudaMalloc((void**)&d_rngStates[gpuDevice], scenarios_gpus * BLOCK_SIZE * sizeof(curandState)));
partial_exposure[gpuDevice] = (real*)malloc(TIMEPOINTS * sizeof(real));
// copy accrual, spot_rates, drifts, volatilites as marketData and copy to device constant memory
CUDA_RT_CALL(cudaMemcpyToSymbol(d_accrual, accrual, curveSizeBytes));
CUDA_RT_CALL(cudaMemcpyToSymbol(d_spot_rates, spot_rates, curveSizeBytes));
CUDA_RT_CALL(cudaMemcpyToSymbol(d_drifts, drifts, curveSizeBytes));
CUDA_RT_CALL(cudaMemcpyToSymbol(d_volatilities, volatilities, VOL_DIM * curveSizeBytes));
// initialize array structures
CUDA_RT_CALL(cudaMemset(accum_rates[gpuDevice], 0, scenarios_gpus * TIMEPOINTS * sizeof(real)));
CUDA_RT_CALL(cudaMemset(simulated_rates0[gpuDevice], 0, scenarios_gpus * TIMEPOINTS * sizeof(real)));
CUDA_RT_CALL(cudaMemset(d_y[gpuDevice], 0, TIMEPOINTS * sizeof(real)));
cudaMemsetValue(d_x[gpuDevice], scenarios_gpus, 1.0f);
}
auto t_end = std::chrono::high_resolution_clock::now();
double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end - t_start).count(); \
printf("%f data_initializing %f (ms) \n", elapsed_time_ms);
nvtxRangePop();
// random generation
nvtxRangePush("gpu_random_generation");
// TODO - Use different streams for each openmp threads (4)
#ifdef MULTI_GPU_SIMULATION
#pragma omp parallel num_threads(num_gpus)
{
int gpuDevice = omp_get_thread_num();
#else
int gpuDevice = 0;
#endif
cudaSetDevice(gpuDevice);
// create Random Numbers (change seed by adding the gpuDevice)
unsigned long long offset = gpuDevice * rnd_count;
TIMED_RT_CALL(
initRNG2_kernel(rngs[gpuDevice], rngNrmVar[gpuDevice], seed, offset, rnd_count, mean, _stddev), "normal variate generation"
);
#ifdef MULTI_GPU_SIMULATION
}
#endif
nvtxRangePop();
// risk factor evolution
#ifdef MULTI_GPU_SIMULATION
#pragma omp parallel num_threads(num_gpus)
{
int gpuDevice = omp_get_thread_num();
#endif
cudaSetDevice(gpuDevice);
// Kernel Execution Parameters
int N = 1;
int blockSize = N * BLOCK_SIZE;
int numberOfPoints = N * TIMEPOINTS;
int gridSize = scenarios_gpus / N;
nvtxRangePush("Simulation");
#ifdef SHARED_MEMORY_OPTIMIZATION
TIMED_RT_CALL(
riskFactorSim4(
gridSize,
blockSize,
numberOfPoints,
d_numeraire[gpuDevice],
rngNrmVar[gpuDevice],
simulated_rates[gpuDevice],
simulated_rates0[gpuDevice],
accum_rates[gpuDevice],
pathN,
payOff.dtau,
dt
),
"Execution Time Partial HJM MC simulation"
);
#else
TIMED_RT_CALL(
riskFactorSim(
gridSize,
blockSize,
numberOfPoints,
d_numeraire[gpuDevice],
rngNrmVar[gpuDevice],
simulated_rates[gpuDevice],
simulated_rates0[gpuDevice],
accum_rates[gpuDevice],
pathN,
payOff.dtau,
dt
),
"Execution Time Partial HJM MC simulation"
);
#endif
// TRACE main
nvtxRangePop();
nvtxRangePush("Pricing");
// Exposure Profile Calculation TODO (d_exposures + gpuDevice * TIMEPOINTS)
// Apply Scan algorithm here
TIMED_RT_CALL(
exposureCalculation(scenarios_gpus, BLOCK_SIZE, d_exposures[gpuDevice], d_numeraire[gpuDevice], payOff.notional, payOff.K, scenarios_gpus),
"exposure calculation"
);
nvtxRangePop();
//Replace all this block by a column reduction of the matrix
// Partial Expected Exposure Calculation and scattered across gpus
nvtxRangePushA("Exposure");
TIMED_RT_CALL(
__expectedexposure_calc_kernel(partial_exposure[gpuDevice], d_exposures[gpuDevice], d_x[gpuDevice], d_y[gpuDevice], cublas_handle[gpuDevice], scenarios_gpus),
"partial expected exposure profile"
);
nvtxRangePop();
#ifdef MULTI_GPU_SIMULATION
}
#endif
nvtxRangePop();
nvtxRangePush("Expected Exposure");
// collect partial results and reduce them
memset(expected_exposure, 0.0f, TIMEPOINTS * sizeof(real));
CUDA_RT_CALL(cudaMemcpy(partial_exposure[0], d_y[0], TIMEPOINTS * sizeof(real), cudaMemcpyDeviceToHost));
#ifdef MULTI_GPU_SIMULATION
for (int gpuDevice = 1; gpuDevice < num_gpus; gpuDevice++) {
CUDA_RT_CALL(cudaMemcpy(partial_exposure[gpuDevice], d_y[gpuDevice], TIMEPOINTS * sizeof(real), cudaMemcpyDeviceToHost));
#ifdef EXPECTED_EXPOSURE_DEBUG
printf("Exposure Profile\n");
for (int t = 0; t < TIMEPOINTS; t++) {
printf("%1.4f ", partial_exposure[gpuDevice][t]);
}
printf("\n");
#endif
vAdd(TIMEPOINTS, partial_exposure[0], partial_exposure[gpuDevice], partial_exposure[0]);
}
#endif
// average the result partial summation of exposure profiles
real avg = 1.0f / (real)scenarios;
saxpy(TIMEPOINTS, avg, partial_exposure[0], expected_exposure);
nvtxRangePop();
// TRACE main
// free up resources
#pragma omp parallel num_threads(num_gpus)
//for (int gpuDevice = 0; gpuDevice < num_gpus; gpuDevice++) {
{
int gpuDevice = omp_get_thread_num();
cudaSetDevice(gpuDevice);
#ifdef STREAM_ACC
for (int i = 0; i < nstreams; i++)
{
CUDA_RT_CALL(cudaStreamDestroy(streams[i]));
}
#endif
CUBLAS_CALL(cublasDestroy( cublas_handle[gpuDevice] ));
CURAND_CALL(curandDestroyGenerator(rngs[gpuDevice])); //
if (rngNrmVar[gpuDevice]) {
CUDA_RT_CALL(cudaFree(rngNrmVar[gpuDevice]));
}
if (d_numeraire[gpuDevice]) {
CUDA_RT_CALL(cudaFree(d_numeraire[gpuDevice]));
}
if (d_exposures[gpuDevice]) {
CUDA_RT_CALL(cudaFree(d_exposures[gpuDevice]));
}
if (simulated_rates[gpuDevice]) {
CUDA_RT_CALL(cudaFree(simulated_rates[gpuDevice]));
}
if (simulated_rates0[gpuDevice]) {
CUDA_RT_CALL(cudaFree(simulated_rates0[gpuDevice]));
}
if (accum_rates[gpuDevice]) {
CUDA_RT_CALL(cudaFree(accum_rates[gpuDevice]));
}
if (d_x[gpuDevice]) {
CUDA_RT_CALL(cudaFree(d_x[gpuDevice]));
}
if (d_y[gpuDevice]) {
CUDA_RT_CALL(cudaFree(d_y[gpuDevice]));
}
if (partial_exposure[gpuDevice]) {
free(partial_exposure[gpuDevice]);
}
}
nvtxRangePop();
}
void calculateExposureMultiGPU(double* expected_exposure, InterestRateSwap<double> payOff, double* accrual, double* spot_rates, double* drifts, double* volatilities, double scale, const int num_gpus, int scenarios, double dt) {
__calculateExposureMultiGPU<double, double2>(expected_exposure, payOff, accrual, spot_rates, drifts, volatilities, scale, num_gpus, scenarios, dt);
}
void calculateExposureMultiGPU(float* expected_exposure, InterestRateSwap<float> payOff, float* accrual, float* spot_rates, float* drifts, float* volatilities, float scale, const int num_gpus, int scenarios, float dt) {
__calculateExposureMultiGPU<float, float2>(expected_exposure, payOff, accrual, spot_rates, drifts, volatilities, scale, num_gpus, scenarios, dt);
}
|
b58e72479cccb9fe6bbe2253de419777f4f4c4d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/cudnn/cudnn.hpp>
#include <nbla/cuda/cudnn/function/warp_by_grid.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void kernel_warp_by_grid_add(const int size, T *y, const T *x) {
NBLA_CUDA_KERNEL_LOOP(idx, size) { y[idx] += x[idx]; }
}
template <typename T>
void WarpByGridCudaCudnn<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
WarpByGridCuda<T>::setup_impl(inputs, outputs);
cuda_set_device(this->device_);
auto oshape = outputs[0]->shape();
if (!warp_by_grid::cudnn_condition(
outputs[0]->shape().size(), this->mode_, this->padding_mode_t_,
this->align_corners_, this->channel_last_)) {
return;
}
int B = oshape[0];
int C = oshape[1];
int Ho = oshape[2];
int Wo = oshape[3];
vector<int> dimA{B, C, Ho, Wo};
NBLA_CUDNN_CHECK(cudnnSetSpatialTransformerNdDescriptor(
spatial_tf_desc_, CUDNN_SAMPLER_BILINEAR, cudnn_data_type<T>::type(), 4,
dimA.data()));
auto ishape = inputs[0]->shape();
int Hi = ishape[2];
int Wi = ishape[3];
vector<int> dimX{B, C, Hi, Wi};
cudnn_set_tensor_nd_descriptor_force_dim(x_desc_, cudnn_data_type<T>::type(),
dimX, dimX.size(),
this->channel_last_, false);
vector<int> dimY{B, C, Ho, Wo};
cudnn_set_tensor_nd_descriptor_force_dim(y_desc_, cudnn_data_type<T>::type(),
dimY, dimY.size(),
this->channel_last_, false);
}
template <typename T>
void WarpByGridCudaCudnn<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
auto oshape = outputs[0]->shape();
if (!warp_by_grid::cudnn_condition(
outputs[0]->shape().size(), this->mode_, this->padding_mode_t_,
this->align_corners_, this->channel_last_)) {
WarpByGridCuda<T>::forward_impl(inputs, outputs);
return;
}
cudnnHandle_t cudnn_handle =
SingletonManager::get<CudnnHandleManager>()->handle(this->device_);
auto x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
auto grid = inputs[1]->get_data_pointer<Tcu>(this->ctx_);
auto y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto alpha = get_cudnn_scalar_arg<T>(1);
auto beta = get_cudnn_scalar_arg<T>(0);
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerForward(cudnn_handle, spatial_tf_desc_,
&alpha, x_desc_, x, grid, &beta,
y_desc_, y));
}
template <typename T>
void WarpByGridCudaCudnn<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!(propagate_down[0] || propagate_down[1])) {
return;
}
cuda_set_device(this->device_);
auto oshape = outputs[0]->shape();
#if defined(_WIN32) || defined(_WIN64)
WarpByGridCuda<T>::backward_impl(inputs, outputs, propagate_down, accum);
return;
#endif
if (!warp_by_grid::cudnn_condition(
outputs[0]->shape().size(), this->mode_, this->padding_mode_t_,
this->align_corners_, this->channel_last_)) {
WarpByGridCuda<T>::backward_impl(inputs, outputs, propagate_down, accum);
return;
}
cudnnHandle_t cudnn_handle =
SingletonManager::get<CudnnHandleManager>()->handle(this->device_);
const auto x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
const auto grid = inputs[1]->get_data_pointer<Tcu>(this->ctx_);
const auto y = outputs[0]->get_data_pointer<Tcu>(this->ctx_);
const auto g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_);
auto alpha = get_cudnn_scalar_arg<T>(1);
auto beta = get_cudnn_scalar_arg<T>(0);
auto g_alpha = get_cudnn_scalar_arg<T>(1);
auto g_beta = get_cudnn_scalar_arg<T>(0);
auto x_size = inputs[0]->size();
auto g_size = inputs[1]->size();
// beta 1 seems not working
if (propagate_down[0] && propagate_down[1]) {
if (!accum[0] && !accum[1]) {
auto g_x =
inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
auto g_grid =
inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]);
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x, &g_alpha, y_desc_, g_y, grid, &g_beta, g_grid));
} else if (!accum[0] && accum[1]) {
auto g_x =
inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
auto g_grid =
inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]);
auto g_grid_tmp = make_shared<Variable>(inputs[1]->shape());
auto g_grid_tmp_ptr0 =
g_grid_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
auto g_grid_tmp_ptr1 = g_grid_tmp->get_grad_pointer<Tcu>(this->ctx_);
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x, &g_alpha, y_desc_, g_y, grid, &g_beta, g_grid_tmp_ptr0));
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_warp_by_grid_add, g_size, g_grid,
g_grid_tmp_ptr1);
} else if (accum[0] && !accum[1]) {
auto g_x =
inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
auto g_grid =
inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]);
auto g_x_tmp = make_shared<Variable>(inputs[0]->shape());
auto g_x_tmp_ptr0 =
g_x_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
auto g_x_tmp_ptr1 = g_x_tmp->get_grad_pointer<Tcu>(this->ctx_);
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x_tmp_ptr0, &g_alpha, y_desc_, g_y, grid, &g_beta, g_grid));
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_warp_by_grid_add, x_size, g_x,
g_x_tmp_ptr1);
} else if (accum[0] && accum[1]) {
auto g_x =
inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
auto g_grid =
inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]);
auto g_x_tmp = make_shared<Variable>(inputs[0]->shape());
auto g_x_tmp_ptr0 =
g_x_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
auto g_x_tmp_ptr1 = g_x_tmp->get_grad_pointer<Tcu>(this->ctx_);
auto g_grid_tmp = make_shared<Variable>(inputs[1]->shape());
auto g_grid_tmp_ptr0 =
g_grid_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
auto g_grid_tmp_ptr1 = g_grid_tmp->get_grad_pointer<Tcu>(this->ctx_);
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x_tmp_ptr0, &g_alpha, y_desc_, g_y, grid, &g_beta,
g_grid_tmp_ptr0));
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_warp_by_grid_add, g_size, g_grid,
g_grid_tmp_ptr1);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_warp_by_grid_add, x_size, g_x,
g_x_tmp_ptr1);
}
} else if (propagate_down[0] && !propagate_down[1]) {
auto g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
auto g_grid_tmp = make_shared<Variable>(inputs[1]->shape());
auto g_grid_tmp_ptr0 =
g_grid_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
if (!accum[0]) {
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x, &g_alpha, y_desc_, g_y, grid, &g_beta, g_grid_tmp_ptr0));
} else {
auto g_x_tmp = make_shared<Variable>(inputs[0]->shape());
auto g_x_tmp_ptr0 =
g_x_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
auto g_x_tmp_ptr1 = g_x_tmp->get_grad_pointer<Tcu>(this->ctx_);
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x_tmp_ptr0, &g_alpha, y_desc_, g_y, grid, &g_beta,
g_grid_tmp_ptr0));
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_warp_by_grid_add, x_size, g_x,
g_x_tmp_ptr1);
}
} else if (!propagate_down[0] && propagate_down[1]) {
auto g_x_tmp = make_shared<Variable>(inputs[0]->shape());
auto g_x_tmp_ptr0 =
g_x_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
auto g_grid =
inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]);
if (!accum[1]) {
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x_tmp_ptr0, &g_alpha, y_desc_, g_y, grid, &g_beta, g_grid));
} else {
auto g_grid_tmp = make_shared<Variable>(inputs[0]->shape());
auto g_grid_tmp_ptr0 =
g_grid_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
auto g_grid_tmp_ptr1 = g_grid_tmp->get_grad_pointer<Tcu>(this->ctx_);
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x_tmp_ptr0, &g_alpha, y_desc_, g_y, grid, &g_beta,
g_grid_tmp_ptr0));
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_warp_by_grid_add, g_size, g_grid,
g_grid_tmp_ptr1);
}
}
}
} // namespace nbla
| b58e72479cccb9fe6bbe2253de419777f4f4c4d5.cu | // Copyright 2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/cudnn/cudnn.hpp>
#include <nbla/cuda/cudnn/function/warp_by_grid.hpp>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T>
__global__ void kernel_warp_by_grid_add(const int size, T *y, const T *x) {
NBLA_CUDA_KERNEL_LOOP(idx, size) { y[idx] += x[idx]; }
}
template <typename T>
void WarpByGridCudaCudnn<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
WarpByGridCuda<T>::setup_impl(inputs, outputs);
cuda_set_device(this->device_);
auto oshape = outputs[0]->shape();
if (!warp_by_grid::cudnn_condition(
outputs[0]->shape().size(), this->mode_, this->padding_mode_t_,
this->align_corners_, this->channel_last_)) {
return;
}
int B = oshape[0];
int C = oshape[1];
int Ho = oshape[2];
int Wo = oshape[3];
vector<int> dimA{B, C, Ho, Wo};
NBLA_CUDNN_CHECK(cudnnSetSpatialTransformerNdDescriptor(
spatial_tf_desc_, CUDNN_SAMPLER_BILINEAR, cudnn_data_type<T>::type(), 4,
dimA.data()));
auto ishape = inputs[0]->shape();
int Hi = ishape[2];
int Wi = ishape[3];
vector<int> dimX{B, C, Hi, Wi};
cudnn_set_tensor_nd_descriptor_force_dim(x_desc_, cudnn_data_type<T>::type(),
dimX, dimX.size(),
this->channel_last_, false);
vector<int> dimY{B, C, Ho, Wo};
cudnn_set_tensor_nd_descriptor_force_dim(y_desc_, cudnn_data_type<T>::type(),
dimY, dimY.size(),
this->channel_last_, false);
}
template <typename T>
void WarpByGridCudaCudnn<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
auto oshape = outputs[0]->shape();
if (!warp_by_grid::cudnn_condition(
outputs[0]->shape().size(), this->mode_, this->padding_mode_t_,
this->align_corners_, this->channel_last_)) {
WarpByGridCuda<T>::forward_impl(inputs, outputs);
return;
}
cudnnHandle_t cudnn_handle =
SingletonManager::get<CudnnHandleManager>()->handle(this->device_);
auto x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
auto grid = inputs[1]->get_data_pointer<Tcu>(this->ctx_);
auto y = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto alpha = get_cudnn_scalar_arg<T>(1);
auto beta = get_cudnn_scalar_arg<T>(0);
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerForward(cudnn_handle, spatial_tf_desc_,
&alpha, x_desc_, x, grid, &beta,
y_desc_, y));
}
template <typename T>
void WarpByGridCudaCudnn<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!(propagate_down[0] || propagate_down[1])) {
return;
}
cuda_set_device(this->device_);
auto oshape = outputs[0]->shape();
#if defined(_WIN32) || defined(_WIN64)
WarpByGridCuda<T>::backward_impl(inputs, outputs, propagate_down, accum);
return;
#endif
if (!warp_by_grid::cudnn_condition(
outputs[0]->shape().size(), this->mode_, this->padding_mode_t_,
this->align_corners_, this->channel_last_)) {
WarpByGridCuda<T>::backward_impl(inputs, outputs, propagate_down, accum);
return;
}
cudnnHandle_t cudnn_handle =
SingletonManager::get<CudnnHandleManager>()->handle(this->device_);
const auto x = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
const auto grid = inputs[1]->get_data_pointer<Tcu>(this->ctx_);
const auto y = outputs[0]->get_data_pointer<Tcu>(this->ctx_);
const auto g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_);
auto alpha = get_cudnn_scalar_arg<T>(1);
auto beta = get_cudnn_scalar_arg<T>(0);
auto g_alpha = get_cudnn_scalar_arg<T>(1);
auto g_beta = get_cudnn_scalar_arg<T>(0);
auto x_size = inputs[0]->size();
auto g_size = inputs[1]->size();
// beta 1 seems not working
if (propagate_down[0] && propagate_down[1]) {
if (!accum[0] && !accum[1]) {
auto g_x =
inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
auto g_grid =
inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]);
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x, &g_alpha, y_desc_, g_y, grid, &g_beta, g_grid));
} else if (!accum[0] && accum[1]) {
auto g_x =
inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
auto g_grid =
inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]);
auto g_grid_tmp = make_shared<Variable>(inputs[1]->shape());
auto g_grid_tmp_ptr0 =
g_grid_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
auto g_grid_tmp_ptr1 = g_grid_tmp->get_grad_pointer<Tcu>(this->ctx_);
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x, &g_alpha, y_desc_, g_y, grid, &g_beta, g_grid_tmp_ptr0));
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_warp_by_grid_add, g_size, g_grid,
g_grid_tmp_ptr1);
} else if (accum[0] && !accum[1]) {
auto g_x =
inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
auto g_grid =
inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]);
auto g_x_tmp = make_shared<Variable>(inputs[0]->shape());
auto g_x_tmp_ptr0 =
g_x_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
auto g_x_tmp_ptr1 = g_x_tmp->get_grad_pointer<Tcu>(this->ctx_);
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x_tmp_ptr0, &g_alpha, y_desc_, g_y, grid, &g_beta, g_grid));
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_warp_by_grid_add, x_size, g_x,
g_x_tmp_ptr1);
} else if (accum[0] && accum[1]) {
auto g_x =
inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
auto g_grid =
inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]);
auto g_x_tmp = make_shared<Variable>(inputs[0]->shape());
auto g_x_tmp_ptr0 =
g_x_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
auto g_x_tmp_ptr1 = g_x_tmp->get_grad_pointer<Tcu>(this->ctx_);
auto g_grid_tmp = make_shared<Variable>(inputs[1]->shape());
auto g_grid_tmp_ptr0 =
g_grid_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
auto g_grid_tmp_ptr1 = g_grid_tmp->get_grad_pointer<Tcu>(this->ctx_);
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x_tmp_ptr0, &g_alpha, y_desc_, g_y, grid, &g_beta,
g_grid_tmp_ptr0));
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_warp_by_grid_add, g_size, g_grid,
g_grid_tmp_ptr1);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_warp_by_grid_add, x_size, g_x,
g_x_tmp_ptr1);
}
} else if (propagate_down[0] && !propagate_down[1]) {
auto g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[0]);
auto g_grid_tmp = make_shared<Variable>(inputs[1]->shape());
auto g_grid_tmp_ptr0 =
g_grid_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
if (!accum[0]) {
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x, &g_alpha, y_desc_, g_y, grid, &g_beta, g_grid_tmp_ptr0));
} else {
auto g_x_tmp = make_shared<Variable>(inputs[0]->shape());
auto g_x_tmp_ptr0 =
g_x_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
auto g_x_tmp_ptr1 = g_x_tmp->get_grad_pointer<Tcu>(this->ctx_);
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x_tmp_ptr0, &g_alpha, y_desc_, g_y, grid, &g_beta,
g_grid_tmp_ptr0));
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_warp_by_grid_add, x_size, g_x,
g_x_tmp_ptr1);
}
} else if (!propagate_down[0] && propagate_down[1]) {
auto g_x_tmp = make_shared<Variable>(inputs[0]->shape());
auto g_x_tmp_ptr0 =
g_x_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
auto g_grid =
inputs[1]->cast_grad_and_get_pointer<Tcu>(this->ctx_, !accum[1]);
if (!accum[1]) {
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x_tmp_ptr0, &g_alpha, y_desc_, g_y, grid, &g_beta, g_grid));
} else {
auto g_grid_tmp = make_shared<Variable>(inputs[0]->shape());
auto g_grid_tmp_ptr0 =
g_grid_tmp->cast_grad_and_get_pointer<Tcu>(this->ctx_, true);
auto g_grid_tmp_ptr1 = g_grid_tmp->get_grad_pointer<Tcu>(this->ctx_);
NBLA_CUDNN_CHECK(cudnnSpatialTfSamplerBackward(
cudnn_handle, spatial_tf_desc_, &alpha, x_desc_, x, &beta, x_desc_,
g_x_tmp_ptr0, &g_alpha, y_desc_, g_y, grid, &g_beta,
g_grid_tmp_ptr0));
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_warp_by_grid_add, g_size, g_grid,
g_grid_tmp_ptr1);
}
}
}
} // namespace nbla
|
57a5873af4611528da9ddaff899747e1d012c784.hip | // !!! This is a file automatically generated by hipify!!!
/* Using cuSPARSE for matrix vector multplication of block_match technique. */
#include <algorithm>
#include <hip/hip_runtime.h>
#include <hipsparse.h>
#include <time.h>
#include "utils.h"
int main(int argc, char *argv[]) {
/***********************************************
* initialize program's input parameters *
***********************************************/
double alpha = 1;
double beta = 1;
double norm = 0;
unsigned bin_width = 10;
hipsparseHandle_t handle = 0;
hipsparseMatDescr_t descr = 0;
h_vec_t<unsigned> h_distance_1;
unsigned num_feat_1 = atoi(argv[2]);
ReadMatrix(h_distance_1, argv[1], num_feat_1);
#ifdef ACCELERATE
std::cout << "CUDA" << std::endl;
d_vec_t<unsigned> d_distance_1 = distance_1;
#endif
h_vec_t<double> h_distance_2;
unsigned num_feat_2 = atoi(argv[4]);
ReadMatrix(h_distance_2, argv[3], num_feat_2);
#ifdef ACCELERATE
d_vec_t<double> d_distance_2 = distance_2;
#endif
unsigned num_iters = 20;
if (8 == argc)
num_iters = atoi(argv[7]);
/**************************************************
* find unique values of distance1 and their indices
***************************************************/
#ifdef ACCELERATE
d_vec_t<unsigned> d_uniq_keys = FindUniques(d_distance_1);
d_uniq_keys.erase(
remove_if(d_uniq_keys.begin(), d_uniq_keys.end(), IsLessThan(bin_width)),
d_uniq_keys.end());
#else
//std::cout << "HOST" << std::endl;
h_vec_t<unsigned> h_uniq_keys = FindUniques(h_distance_1);
h_uniq_keys.erase(
remove_if(h_uniq_keys.begin(), h_uniq_keys.end(), IsLessThan(bin_width)),
h_uniq_keys.end());
#endif
#ifdef ACCELERATE
d_vec_t<int> *d_keys_idcs = new d_vec_t<int>[d_uniq_keys.size()];
for (unsigned i = 0; i < d_uniq_keys.size(); ++i) {
d_keys_idcs[i].resize(d_distance_1.size());
}
#else
h_vec_t<int> *h_keys_idcs = new h_vec_t<int>[h_uniq_keys.size()];
for (unsigned i = 0; i < h_uniq_keys.size(); ++i) {
h_keys_idcs[i].resize(h_distance_1.size());
}
#endif
counting_iterator<unsigned> first_idx(0);
counting_iterator<unsigned> last_idx = first_idx + num_feat_1;
#ifdef ACCELERATE
for (unsigned i = 0; i < d_uniq_keys.size(); ++i) {
transform(ZIP2(d_distance_1.begin(), first_idx),
ZIP2(d_distance_1.end(), last_idx), d_keys_idcs[i].begin(),
IsEqual(d_uniq_keys[i]));
d_keys_idcs[i].erase(
remove(d_keys_idcs[i].begin(), d_keys_idcs[i].end(), -1),
d_keys_idcs[i].end());
}
#else
for (unsigned i = 0; i < h_uniq_keys.size(); ++i) {
transform(ZIP2(h_distance_1.begin(), first_idx),
ZIP2(h_distance_1.end(), last_idx), h_keys_idcs[i].begin(),
IsEqual(h_uniq_keys[i]));
h_keys_idcs[i].erase(
remove(h_keys_idcs[i].begin(), h_keys_idcs[i].end(), -1),
h_keys_idcs[i].end());
}
#endif
/***************************************************
* construct CSR sparse affinity blocks *
***************************************************/
unsigned len_affinity_block = num_feat_2 * num_feat_2;
#ifdef ACCELERATE
d_vec_t<double> d_affinity_blocks(d_uniq_keys.size() * len_affinity_block);
#else
h_vec_t<double> h_affinity_blocks(h_uniq_keys.size() * len_affinity_block);
#endif
#ifdef ACCELERATE
d_vec_t<double> csr_val;
d_vec_t<int> csr_col;
d_vec_t<int> csr_row;
d_vec_t<int> csr_blocked_len;
for (int i = 0; i < d_uniq_keys.size(); ++i) {
transform(d_distance_2.begin(), d_distance_2.end(),
d_affinity_blocks.begin() + i * len_affinity_block,
Affinity(d_uniq_keys[i]));
CompressMatrix(csr_val, csr_col, csr_row,
raw_pointer_cast(d_affinity_blocks.begin()) +
i * len_affinity_block,
num_feat_2, num_feat_2);
csr_blocked_len.push_back(csr_val.size());
}
#else
h_vec_t<double> csr_val;
h_vec_t<int> csr_col;
h_vec_t<int> csr_row;
h_vec_t<int> csr_blocked_len;
csr_blocked_len.push_back(0);
const clock_t begin_time = clock();
for (int i = 0; i < h_uniq_keys.size(); ++i) {
transform(h_distance_2.begin(), h_distance_2.end(),
h_affinity_blocks.begin() + i * len_affinity_block,
Affinity(h_uniq_keys[i]));
CompressMatrix(csr_val, csr_col, csr_row,
raw_pointer_cast(h_affinity_blocks.data()) +
i * len_affinity_block,
num_feat_2, num_feat_2);
csr_blocked_len.push_back(csr_val.size());
}
// std::cout << "val size: " << csr_val.size() << std::endl;
d_vec_t<double> d_csr_val = csr_val;
d_vec_t<int> d_csr_col = csr_col;
d_vec_t<int> d_csr_row = csr_row;
#endif
std::cout << "affinity runtime: "
<< (clock() - begin_time) / double(CLOCKS_PER_SEC) * 1000 << std::endl;
// std::cout << "affinity" << std::endl;
// std::cout << "values"
// << " "
// << "columns" << std::endl;
// for (int i = 0; i < h_uniq_keys.size(); ++i) {
// //std::cout << "unq keys:" << h_uniq_keys[i] << std::endl;
// for (int j = csr_blocked_len[i]; j < csr_blocked_len[i + 1]; ++j) {
// std::cout << csr_val[j] << " " << csr_col[j] << std::endl;
// }
// std::cout << std::endl;
// }
// std::cout << std::endl;
// std::cout << "csr rows" << std::endl;
// for (int i = 0; i < h_uniq_keys.size(); ++i) {
// std::cout << "unq keys:" << h_uniq_keys[i] << std::endl;
// for (int j = 0; j < num_feat_2 + 1; ++j) {
// std::cout << csr_row[j + i * (num_feat_2)] << std::endl;
// }
// }
// std::cout << std::endl;
/******************************************************
* initialize eigen vectors *
******************************************************/
unsigned len_eigen_vec = num_feat_1 * num_feat_2;
d_vec_t<double> eigen_vec_new(len_eigen_vec);
d_vec_t<double> eigen_vec_old(len_eigen_vec);
norm = 1.0 / sqrt(len_eigen_vec);
fill(eigen_vec_old.begin(), eigen_vec_old.end(), norm);
#if ACCELERATE
int num_keys = d_uniq_keys.size();
#else
int num_keys = h_uniq_keys.size();
#endif
hipsparseCreate(&handle);
hipsparseCreateMatDescr(&descr);
/*******************************************************
* compute eigen values *
********************************************************/
const clock_t begin_time2 = clock();
for (int iter = 0; iter < num_iters; ++iter) {
// Create a stream for each operation
hipStream_t *streams =
(hipStream_t *)malloc(num_keys * sizeof(hipStream_t));
for (int i = 0; i < num_keys; i++)
hipStreamCreate(&streams[i]);
for (int i = 0; i < num_keys; i++) {
hipsparseSetStream(handle, streams[i]);
#ifdef ACCELERATE
for (int j = 0; j < d_keys_idcs[i].size(); j++) {
int row = d_keys_idcs[i][j] / num_feat_1;
int col = d_keys_idcs[i][j] % num_feat_1;
#else
for (int j = 0; j < h_keys_idcs[i].size(); j++) {
int row = h_keys_idcs[i][j] / num_feat_1;
int col = h_keys_idcs[i][j] % num_feat_1;
#endif
int csr_size = csr_blocked_len[i + 1] - csr_blocked_len[i];
hipsparseDcsrmv(
handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, num_feat_2, num_feat_2,
csr_size, &alpha, descr,
raw_pointer_cast(d_csr_val.data() + csr_blocked_len[i]),
raw_pointer_cast(d_csr_row.data() + i * (num_feat_2 + 1)),
raw_pointer_cast(d_csr_col.data() + csr_blocked_len[i]),
raw_pointer_cast(eigen_vec_old.data()) + col * num_feat_2, &beta,
raw_pointer_cast(eigen_vec_new.data()) + row * num_feat_2);
}
}
double init = 0;
norm =
std::sqrt(transform_reduce(eigen_vec_new.begin(), eigen_vec_new.end(),
square(), init, thrust::plus<double>()));
transform(eigen_vec_new.begin(), eigen_vec_new.end(), eigen_vec_old.begin(),
division(norm));
fill(eigen_vec_new.begin(), eigen_vec_new.end(), 0);
}
std::cout << "Eigen runtime: "
<< (clock() - begin_time2) / double(CLOCKS_PER_SEC) * 1000 << std::endl;
//std::cout << "eigen values" << std::endl;
//for (int i = 0; i < eigen_vec_old.size(); i++) {
// std::cout << "eigen new value = " << eigen_vec_new[i] << "";
// std::cout << "eigen old value = " << eigen_vec_old[i] << std::endl;
//}
hipsparseDestroy(handle);
return 0;
}
| 57a5873af4611528da9ddaff899747e1d012c784.cu | /* Using cuSPARSE for matrix vector multplication of block_match technique. */
#include <algorithm>
#include <cuda_runtime.h>
#include <cusparse.h>
#include <time.h>
#include "utils.h"
int main(int argc, char *argv[]) {
/***********************************************
* initialize program's input parameters *
***********************************************/
double alpha = 1;
double beta = 1;
double norm = 0;
unsigned bin_width = 10;
cusparseHandle_t handle = 0;
cusparseMatDescr_t descr = 0;
h_vec_t<unsigned> h_distance_1;
unsigned num_feat_1 = atoi(argv[2]);
ReadMatrix(h_distance_1, argv[1], num_feat_1);
#ifdef ACCELERATE
std::cout << "CUDA" << std::endl;
d_vec_t<unsigned> d_distance_1 = distance_1;
#endif
h_vec_t<double> h_distance_2;
unsigned num_feat_2 = atoi(argv[4]);
ReadMatrix(h_distance_2, argv[3], num_feat_2);
#ifdef ACCELERATE
d_vec_t<double> d_distance_2 = distance_2;
#endif
unsigned num_iters = 20;
if (8 == argc)
num_iters = atoi(argv[7]);
/**************************************************
* find unique values of distance1 and their indices
***************************************************/
#ifdef ACCELERATE
d_vec_t<unsigned> d_uniq_keys = FindUniques(d_distance_1);
d_uniq_keys.erase(
remove_if(d_uniq_keys.begin(), d_uniq_keys.end(), IsLessThan(bin_width)),
d_uniq_keys.end());
#else
//std::cout << "HOST" << std::endl;
h_vec_t<unsigned> h_uniq_keys = FindUniques(h_distance_1);
h_uniq_keys.erase(
remove_if(h_uniq_keys.begin(), h_uniq_keys.end(), IsLessThan(bin_width)),
h_uniq_keys.end());
#endif
#ifdef ACCELERATE
d_vec_t<int> *d_keys_idcs = new d_vec_t<int>[d_uniq_keys.size()];
for (unsigned i = 0; i < d_uniq_keys.size(); ++i) {
d_keys_idcs[i].resize(d_distance_1.size());
}
#else
h_vec_t<int> *h_keys_idcs = new h_vec_t<int>[h_uniq_keys.size()];
for (unsigned i = 0; i < h_uniq_keys.size(); ++i) {
h_keys_idcs[i].resize(h_distance_1.size());
}
#endif
counting_iterator<unsigned> first_idx(0);
counting_iterator<unsigned> last_idx = first_idx + num_feat_1;
#ifdef ACCELERATE
for (unsigned i = 0; i < d_uniq_keys.size(); ++i) {
transform(ZIP2(d_distance_1.begin(), first_idx),
ZIP2(d_distance_1.end(), last_idx), d_keys_idcs[i].begin(),
IsEqual(d_uniq_keys[i]));
d_keys_idcs[i].erase(
remove(d_keys_idcs[i].begin(), d_keys_idcs[i].end(), -1),
d_keys_idcs[i].end());
}
#else
for (unsigned i = 0; i < h_uniq_keys.size(); ++i) {
transform(ZIP2(h_distance_1.begin(), first_idx),
ZIP2(h_distance_1.end(), last_idx), h_keys_idcs[i].begin(),
IsEqual(h_uniq_keys[i]));
h_keys_idcs[i].erase(
remove(h_keys_idcs[i].begin(), h_keys_idcs[i].end(), -1),
h_keys_idcs[i].end());
}
#endif
/***************************************************
* construct CSR sparse affinity blocks *
***************************************************/
unsigned len_affinity_block = num_feat_2 * num_feat_2;
#ifdef ACCELERATE
d_vec_t<double> d_affinity_blocks(d_uniq_keys.size() * len_affinity_block);
#else
h_vec_t<double> h_affinity_blocks(h_uniq_keys.size() * len_affinity_block);
#endif
#ifdef ACCELERATE
d_vec_t<double> csr_val;
d_vec_t<int> csr_col;
d_vec_t<int> csr_row;
d_vec_t<int> csr_blocked_len;
for (int i = 0; i < d_uniq_keys.size(); ++i) {
transform(d_distance_2.begin(), d_distance_2.end(),
d_affinity_blocks.begin() + i * len_affinity_block,
Affinity(d_uniq_keys[i]));
CompressMatrix(csr_val, csr_col, csr_row,
raw_pointer_cast(d_affinity_blocks.begin()) +
i * len_affinity_block,
num_feat_2, num_feat_2);
csr_blocked_len.push_back(csr_val.size());
}
#else
h_vec_t<double> csr_val;
h_vec_t<int> csr_col;
h_vec_t<int> csr_row;
h_vec_t<int> csr_blocked_len;
csr_blocked_len.push_back(0);
const clock_t begin_time = clock();
for (int i = 0; i < h_uniq_keys.size(); ++i) {
transform(h_distance_2.begin(), h_distance_2.end(),
h_affinity_blocks.begin() + i * len_affinity_block,
Affinity(h_uniq_keys[i]));
CompressMatrix(csr_val, csr_col, csr_row,
raw_pointer_cast(h_affinity_blocks.data()) +
i * len_affinity_block,
num_feat_2, num_feat_2);
csr_blocked_len.push_back(csr_val.size());
}
// std::cout << "val size: " << csr_val.size() << std::endl;
d_vec_t<double> d_csr_val = csr_val;
d_vec_t<int> d_csr_col = csr_col;
d_vec_t<int> d_csr_row = csr_row;
#endif
std::cout << "affinity runtime: "
<< (clock() - begin_time) / double(CLOCKS_PER_SEC) * 1000 << std::endl;
// std::cout << "affinity" << std::endl;
// std::cout << "values"
// << " "
// << "columns" << std::endl;
// for (int i = 0; i < h_uniq_keys.size(); ++i) {
// //std::cout << "unq keys:" << h_uniq_keys[i] << std::endl;
// for (int j = csr_blocked_len[i]; j < csr_blocked_len[i + 1]; ++j) {
// std::cout << csr_val[j] << " " << csr_col[j] << std::endl;
// }
// std::cout << std::endl;
// }
// std::cout << std::endl;
// std::cout << "csr rows" << std::endl;
// for (int i = 0; i < h_uniq_keys.size(); ++i) {
// std::cout << "unq keys:" << h_uniq_keys[i] << std::endl;
// for (int j = 0; j < num_feat_2 + 1; ++j) {
// std::cout << csr_row[j + i * (num_feat_2)] << std::endl;
// }
// }
// std::cout << std::endl;
/******************************************************
* initialize eigen vectors *
******************************************************/
unsigned len_eigen_vec = num_feat_1 * num_feat_2;
d_vec_t<double> eigen_vec_new(len_eigen_vec);
d_vec_t<double> eigen_vec_old(len_eigen_vec);
norm = 1.0 / sqrt(len_eigen_vec);
fill(eigen_vec_old.begin(), eigen_vec_old.end(), norm);
#if ACCELERATE
int num_keys = d_uniq_keys.size();
#else
int num_keys = h_uniq_keys.size();
#endif
cusparseCreate(&handle);
cusparseCreateMatDescr(&descr);
/*******************************************************
* compute eigen values *
********************************************************/
const clock_t begin_time2 = clock();
for (int iter = 0; iter < num_iters; ++iter) {
// Create a stream for each operation
cudaStream_t *streams =
(cudaStream_t *)malloc(num_keys * sizeof(cudaStream_t));
for (int i = 0; i < num_keys; i++)
cudaStreamCreate(&streams[i]);
for (int i = 0; i < num_keys; i++) {
cusparseSetStream(handle, streams[i]);
#ifdef ACCELERATE
for (int j = 0; j < d_keys_idcs[i].size(); j++) {
int row = d_keys_idcs[i][j] / num_feat_1;
int col = d_keys_idcs[i][j] % num_feat_1;
#else
for (int j = 0; j < h_keys_idcs[i].size(); j++) {
int row = h_keys_idcs[i][j] / num_feat_1;
int col = h_keys_idcs[i][j] % num_feat_1;
#endif
int csr_size = csr_blocked_len[i + 1] - csr_blocked_len[i];
cusparseDcsrmv(
handle, CUSPARSE_OPERATION_NON_TRANSPOSE, num_feat_2, num_feat_2,
csr_size, &alpha, descr,
raw_pointer_cast(d_csr_val.data() + csr_blocked_len[i]),
raw_pointer_cast(d_csr_row.data() + i * (num_feat_2 + 1)),
raw_pointer_cast(d_csr_col.data() + csr_blocked_len[i]),
raw_pointer_cast(eigen_vec_old.data()) + col * num_feat_2, &beta,
raw_pointer_cast(eigen_vec_new.data()) + row * num_feat_2);
}
}
double init = 0;
norm =
std::sqrt(transform_reduce(eigen_vec_new.begin(), eigen_vec_new.end(),
square(), init, thrust::plus<double>()));
transform(eigen_vec_new.begin(), eigen_vec_new.end(), eigen_vec_old.begin(),
division(norm));
fill(eigen_vec_new.begin(), eigen_vec_new.end(), 0);
}
std::cout << "Eigen runtime: "
<< (clock() - begin_time2) / double(CLOCKS_PER_SEC) * 1000 << std::endl;
//std::cout << "eigen values" << std::endl;
//for (int i = 0; i < eigen_vec_old.size(); i++) {
// std::cout << "eigen new value = " << eigen_vec_new[i] << "";
// std::cout << "eigen old value = " << eigen_vec_old[i] << std::endl;
//}
cusparseDestroy(handle);
return 0;
}
|
942436fd3ebfb61c04268e836008de1a69bd3bd2.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, scalar_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info) {
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info) {
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, n, ipiv.data_ptr<magma_int_t>(),
b_data, n, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, n, ipiv_array_cur, b_array_cur, n,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], n, &ipiv_array[mini_idx], &b_array[mini_idx], n,
&info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur,
n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
magma_int_t info_tmp = 0;
Tensor ipiv = at::empty({n}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp);
if (info_tmp != 0) {
info = info_tmp;
return;
}
magmaGetri<scalar_t>(
n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp);
info = info_tmp;
#endif
}
Tensor _inverse_helper_cuda(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
} else {
int64_t info = 0;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, info);
});
singleCheckErrors(info, "inverse_cuda");
}
return self_inv_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, n, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, n, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
constexpr int64_t batch_limit = 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = ::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = ::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
if (self.dim() > 2 && pivot && m == n && m <= 32) {
/*
The magma implementation of small singular square batch
matrices has a bug that results nan values in the LU
factorization results, see
https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on
TODO: disable this block for magma versions that implement a bug fix
*/
auto batch_size = infos_tensor.numel();
auto infos_array = infos_tensor.view({batch_size});
auto infos_cpu = infos_array.to(at::kCPU);
auto infos_data = infos_cpu.data_ptr<int>();
auto input_array = self.view({batch_size, m, n});
auto working_array = self_working_copy.view({batch_size, m, n});
auto pivots_array = pivots_tensor.view({batch_size, k});
for (int64_t i = 0; i < batch_size; i++) {
auto info = infos_data[i];
if (info > 0) {
/*
We'll recompute LU factorization of singular matrices
using the non-batch implementation to workaround the
magma bug (magma issue 13).
*/
working_array[i].copy_(input_array[i]);
auto matrix = working_array[i];
auto pivots = pivots_array[i];
auto infos = infos_array[i];
apply_lu<scalar_t>(matrix, pivots, infos, pivot);
}
}
}
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
// batch_size == 1 implies that:
// 1. the RHS and LHS tensors have 2 dimensions, or
// 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1
if (batch_size == 1) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto q_data = Q.data_ptr<scalar_t>();
auto r_data = R.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
auto r_matrix_stride = matrixStride(R);
magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)");
magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)");
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) {
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
// Fix the number of columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = n_columns_q;
q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options());
q_working_copy = q_working_copy.expand_as(q_working_copy);
// We repurpose the same q_sizes for r_working_copy
// Fix the number of rows and columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = self.size(-1);
q_sizes[self.dim() - 2] = n_columns_q;
r_working_copy = at::empty(q_sizes, self.options());
return std::make_tuple(q_working_copy, r_working_copy);
}
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
r_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q),
r_working_copy.narrow(-2, 0, n_columns_q).triu());
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto eigvals_data = eigvals.data_ptr<scalar_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magmaSymeig<scalar_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(wkopt, "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
scalar_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options())
: at::empty(self_sizes, self.options().device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<scalar_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto k = ::min(m, n);
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * k);
magmaSvd<scalar_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, iwork, &info);
lwork = magma_int_cast(wkopt, "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
scalar_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = ::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "svd_cuda", [&]{
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, self.options());
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-1, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, self.options());
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
| 942436fd3ebfb61c04268e836008de1a69bd3bd2.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, scalar_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info) {
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info) {
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, n, ipiv.data_ptr<magma_int_t>(),
b_data, n, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, n, ipiv_array_cur, b_array_cur, n,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], n, &ipiv_array[mini_idx], &b_array[mini_idx], n,
&info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur,
n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
magma_int_t info_tmp = 0;
Tensor ipiv = at::empty({n}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp);
if (info_tmp != 0) {
info = info_tmp;
return;
}
magmaGetri<scalar_t>(
n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp);
info = info_tmp;
#endif
}
Tensor _inverse_helper_cuda(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
} else {
int64_t info = 0;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, info);
});
singleCheckErrors(info, "inverse_cuda");
}
return self_inv_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, n, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, n, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
constexpr int64_t batch_limit = 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = std::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = std::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
if (self.dim() > 2 && pivot && m == n && m <= 32) {
/*
The magma implementation of small singular square batch
matrices has a bug that results nan values in the LU
factorization results, see
https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on
TODO: disable this block for magma versions that implement a bug fix
*/
auto batch_size = infos_tensor.numel();
auto infos_array = infos_tensor.view({batch_size});
auto infos_cpu = infos_array.to(at::kCPU);
auto infos_data = infos_cpu.data_ptr<int>();
auto input_array = self.view({batch_size, m, n});
auto working_array = self_working_copy.view({batch_size, m, n});
auto pivots_array = pivots_tensor.view({batch_size, k});
for (int64_t i = 0; i < batch_size; i++) {
auto info = infos_data[i];
if (info > 0) {
/*
We'll recompute LU factorization of singular matrices
using the non-batch implementation to workaround the
magma bug (magma issue 13).
*/
working_array[i].copy_(input_array[i]);
auto matrix = working_array[i];
auto pivots = pivots_array[i];
auto infos = infos_array[i];
apply_lu<scalar_t>(matrix, pivots, infos, pivot);
}
}
}
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
// batch_size == 1 implies that:
// 1. the RHS and LHS tensors have 2 dimensions, or
// 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1
if (batch_size == 1) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto q_data = Q.data_ptr<scalar_t>();
auto r_data = R.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
auto r_matrix_stride = matrixStride(R);
magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)");
magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)");
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) {
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
// Fix the number of columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = n_columns_q;
q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options());
q_working_copy = q_working_copy.expand_as(q_working_copy);
// We repurpose the same q_sizes for r_working_copy
// Fix the number of rows and columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = self.size(-1);
q_sizes[self.dim() - 2] = n_columns_q;
r_working_copy = at::empty(q_sizes, self.options());
return std::make_tuple(q_working_copy, r_working_copy);
}
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
r_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q),
r_working_copy.narrow(-2, 0, n_columns_q).triu());
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto eigvals_data = eigvals.data_ptr<scalar_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magmaSymeig<scalar_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(wkopt, "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
scalar_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options())
: at::empty(self_sizes, self.options().device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<scalar_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto k = std::min(m, n);
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * k);
magmaSvd<scalar_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, iwork, &info);
lwork = magma_int_cast(wkopt, "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
scalar_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = std::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "svd_cuda", [&]{
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, self.options());
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-1, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, self.options());
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
a16d56df159d6ff5f9985141dd0b9ae46a8b48f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reader_writer.h"
#include "cpu.h"
#include "gpu.h"
#include "helper.h"
int main(int argc, const char **argv) {
if (argc != 2) {
std::cout << "Usage: ./md_xxx [parameter file]" << std::endl;
exit(EXIT_FAILURE);
}
std::vector<Particle> particles;
Params params;
// Read parameter file and retrieve data
ParameterReader params_reader;
params_reader.read(std::string(argv[1]));
params = params_reader.get();
// Read input data and set num_part
ParticleReader part_reader;
params.num_part = part_reader.read(params);
particles = part_reader.get();
//TODO assert particles.length = nr_part
// Create OutputWriter
OutputWriter output_writer;
// Init linked list for cell and particle parallel approach
std::vector<int> linked_particles(params.num_part);
std::vector<int> linked_cells(params.cells0 * params.cells1 * params.cells2, -1);
// Data on device
#if defined(GPU)
Particle *d_particles;
Params *d_params;
int *d_linked_cells;
int *d_linked_particles;
const long long nBytes = sizeof(Particle) * (params.num_part);
checkError(hipMalloc(&d_particles, nBytes), "malloc particles");
checkError(hipMalloc(&d_params, sizeof(Params)), "malloc params");
checkError(hipMalloc(&d_linked_cells, sizeof(int) * params.cells0 * params.cells1 * params.cells2), "malloc linked cells");
checkError(hipMalloc(&d_linked_particles, sizeof(int) * params.num_part), "malloc linked particles");
checkError(hipMemcpy(d_particles, &particles[0], nBytes, hipMemcpyHostToDevice), "memcpy host to device part");
checkError(hipMemcpy(d_params, ¶ms, sizeof(Params), hipMemcpyHostToDevice), "memcpy host to deviceparams");
//TODO why?
checkError(hipMemcpy(d_linked_cells, &linked_cells[0], sizeof(int) * params.cells0 * params.cells1 * params.cells2, hipMemcpyHostToDevice),
"memcpy host to device cells");
//TODO why?
checkError(hipMemcpy(d_linked_particles, &linked_particles[0], sizeof(int) * params.num_part, hipMemcpyHostToDevice),
"memcpy host to device linked particles");
const dim3 threadsPerBlock(params.block_size);
const dim3 numBlocks(params.num_part / params.block_size + 1);
const dim3 numBlocksCells((params.cells0 * params.cells1 * params.cells2) / params.block_size + 1);
#endif
// Variables for measurement
double total_time = 0.;
double start_time, end_time;
// Variables for iteration
double time = 0.;
size_t iter = 0, iter_v = 0;
#if defined(GPU)
hipLaunchKernelGGL(( update_list), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_params, d_particles, d_linked_cells, d_linked_particles);
//checkError(hipPeekAtLastError(), "peek error");
//checkError(hipDeviceSynchronize(), "");
#else
update_list(params, particles, linked_cells, linked_particles);
#endif
// Initial force calc.
#if defined(GPU)
hipLaunchKernelGGL(( calc_force), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_params, d_particles, d_linked_cells, d_linked_particles);
//checkError(hipDeviceSynchronize(), "");
#else
calc_force(params, particles, linked_cells, linked_particles);
#endif
while (time <= params.time_end) {
if (iter % params.vtk_out_freq == 0) {
#if defined(GPU)
checkError(hipMemcpy(&particles[0], d_particles, nBytes, hipMemcpyDeviceToHost), "memcpy device to host vtk");
#endif
output_writer.write_vtk(particles, params, iter_v);
++iter_v;
}
start_time = getSeconds();
#if defined(GPU)
hipLaunchKernelGGL(( update_pos), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_params, d_particles);
#else
update_pos(params, particles);
#endif
#if defined(GPU)
//checkError(hipMemset(d_linked_cells, -1, sizeof(int) * params.cells0 * params.cells1 * params.cells2), "memset");
//TODO numblocks etc
hipLaunchKernelGGL(( set_list), dim3(numBlocksCells), dim3(threadsPerBlock), 0, 0, d_linked_cells, params.cells0 * params.cells1 * params.cells2, -1);
hipLaunchKernelGGL(( update_list), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_params, d_particles, d_linked_cells, d_linked_particles);
hipLaunchKernelGGL(( calc_force), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_params, d_particles, d_linked_cells, d_linked_particles);
#else
linked_cells.assign(linked_cells.size(), -1);
update_list(params, particles, linked_cells, linked_particles);
calc_force(params, particles, linked_cells, linked_particles);
#endif
#if defined(GPU)
hipLaunchKernelGGL(( calc_velocity), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_params, d_particles);
#else
calc_velocity(params, particles);
#endif
checkError(hipDeviceSynchronize(), "sync");
end_time = getSeconds();
total_time += end_time - start_time;
if (iter % 100 == 0 && iter != 0) std:: cout << "time/iter: " << total_time/iter << std::endl;
if (iter % 100 == 0 && iter != 0) std:: cout << "total: " << end_time - start_time << std::endl;
time += params.timestep_length;
++iter;
}
// write last vtk file
if (iter % params.vtk_out_freq == 0) {
#if defined(GPU)
checkError(hipMemcpy(&particles[0], d_particles, nBytes, hipMemcpyDeviceToHost), "memcpy device to host vtk");
#endif
output_writer.write_vtk(particles, params, iter_v);
}
std:: cout << total_time << std::endl;
#if defined(GPU)
checkError(hipFree(d_params), "free");
checkError(hipFree(d_particles), "free");
checkError(hipFree(d_linked_cells), "free");
checkError(hipFree(d_linked_particles), "free");
#endif
exit(EXIT_SUCCESS);
}
| a16d56df159d6ff5f9985141dd0b9ae46a8b48f3.cu | #include "reader_writer.h"
#include "cpu.h"
#include "gpu.h"
#include "helper.h"
int main(int argc, const char **argv) {
if (argc != 2) {
std::cout << "Usage: ./md_xxx [parameter file]" << std::endl;
exit(EXIT_FAILURE);
}
std::vector<Particle> particles;
Params params;
// Read parameter file and retrieve data
ParameterReader params_reader;
params_reader.read(std::string(argv[1]));
params = params_reader.get();
// Read input data and set num_part
ParticleReader part_reader;
params.num_part = part_reader.read(params);
particles = part_reader.get();
//TODO assert particles.length = nr_part
// Create OutputWriter
OutputWriter output_writer;
// Init linked list for cell and particle parallel approach
std::vector<int> linked_particles(params.num_part);
std::vector<int> linked_cells(params.cells0 * params.cells1 * params.cells2, -1);
// Data on device
#if defined(GPU)
Particle *d_particles;
Params *d_params;
int *d_linked_cells;
int *d_linked_particles;
const long long nBytes = sizeof(Particle) * (params.num_part);
checkError(cudaMalloc(&d_particles, nBytes), "malloc particles");
checkError(cudaMalloc(&d_params, sizeof(Params)), "malloc params");
checkError(cudaMalloc(&d_linked_cells, sizeof(int) * params.cells0 * params.cells1 * params.cells2), "malloc linked cells");
checkError(cudaMalloc(&d_linked_particles, sizeof(int) * params.num_part), "malloc linked particles");
checkError(cudaMemcpy(d_particles, &particles[0], nBytes, cudaMemcpyHostToDevice), "memcpy host to device part");
checkError(cudaMemcpy(d_params, ¶ms, sizeof(Params), cudaMemcpyHostToDevice), "memcpy host to deviceparams");
//TODO why?
checkError(cudaMemcpy(d_linked_cells, &linked_cells[0], sizeof(int) * params.cells0 * params.cells1 * params.cells2, cudaMemcpyHostToDevice),
"memcpy host to device cells");
//TODO why?
checkError(cudaMemcpy(d_linked_particles, &linked_particles[0], sizeof(int) * params.num_part, cudaMemcpyHostToDevice),
"memcpy host to device linked particles");
const dim3 threadsPerBlock(params.block_size);
const dim3 numBlocks(params.num_part / params.block_size + 1);
const dim3 numBlocksCells((params.cells0 * params.cells1 * params.cells2) / params.block_size + 1);
#endif
// Variables for measurement
double total_time = 0.;
double start_time, end_time;
// Variables for iteration
double time = 0.;
size_t iter = 0, iter_v = 0;
#if defined(GPU)
update_list<<<numBlocks, threadsPerBlock>>>(d_params, d_particles, d_linked_cells, d_linked_particles);
//checkError(cudaPeekAtLastError(), "peek error");
//checkError(cudaDeviceSynchronize(), "");
#else
update_list(params, particles, linked_cells, linked_particles);
#endif
// Initial force calc.
#if defined(GPU)
calc_force<<<numBlocks, threadsPerBlock>>>(d_params, d_particles, d_linked_cells, d_linked_particles);
//checkError(cudaDeviceSynchronize(), "");
#else
calc_force(params, particles, linked_cells, linked_particles);
#endif
while (time <= params.time_end) {
if (iter % params.vtk_out_freq == 0) {
#if defined(GPU)
checkError(cudaMemcpy(&particles[0], d_particles, nBytes, cudaMemcpyDeviceToHost), "memcpy device to host vtk");
#endif
output_writer.write_vtk(particles, params, iter_v);
++iter_v;
}
start_time = getSeconds();
#if defined(GPU)
update_pos<<<numBlocks, threadsPerBlock>>>(d_params, d_particles);
#else
update_pos(params, particles);
#endif
#if defined(GPU)
//checkError(cudaMemset(d_linked_cells, -1, sizeof(int) * params.cells0 * params.cells1 * params.cells2), "memset");
//TODO numblocks etc
set_list<<<numBlocksCells, threadsPerBlock>>>(d_linked_cells, params.cells0 * params.cells1 * params.cells2, -1);
update_list<<<numBlocks, threadsPerBlock>>>(d_params, d_particles, d_linked_cells, d_linked_particles);
calc_force<<<numBlocks, threadsPerBlock>>>(d_params, d_particles, d_linked_cells, d_linked_particles);
#else
linked_cells.assign(linked_cells.size(), -1);
update_list(params, particles, linked_cells, linked_particles);
calc_force(params, particles, linked_cells, linked_particles);
#endif
#if defined(GPU)
calc_velocity<<<numBlocks, threadsPerBlock>>>(d_params, d_particles);
#else
calc_velocity(params, particles);
#endif
checkError(cudaDeviceSynchronize(), "sync");
end_time = getSeconds();
total_time += end_time - start_time;
if (iter % 100 == 0 && iter != 0) std:: cout << "time/iter: " << total_time/iter << std::endl;
if (iter % 100 == 0 && iter != 0) std:: cout << "total: " << end_time - start_time << std::endl;
time += params.timestep_length;
++iter;
}
// write last vtk file
if (iter % params.vtk_out_freq == 0) {
#if defined(GPU)
checkError(cudaMemcpy(&particles[0], d_particles, nBytes, cudaMemcpyDeviceToHost), "memcpy device to host vtk");
#endif
output_writer.write_vtk(particles, params, iter_v);
}
std:: cout << total_time << std::endl;
#if defined(GPU)
checkError(cudaFree(d_params), "free");
checkError(cudaFree(d_particles), "free");
checkError(cudaFree(d_linked_cells), "free");
checkError(cudaFree(d_linked_particles), "free");
#endif
exit(EXIT_SUCCESS);
}
|
955adaf13316af8fb61868a4e6836156ebcf2982.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeConvexPolyhedron<32>
template hipError_t gpu_hpmc_free_volume<ShapeConvexPolyhedron<32> >(const hpmc_free_volume_args_t &args,
const typename ShapeConvexPolyhedron<32> ::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeConvexPolyhedron<32> >(const hpmc_args_t& args,
const typename ShapeConvexPolyhedron<32> ::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeConvexPolyhedron<32> >(const hpmc_implicit_args_t& args,
const typename ShapeConvexPolyhedron<32> ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeConvexPolyhedron<32> >(const hpmc_implicit_args_t& args,
const typename ShapeConvexPolyhedron<32> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| 955adaf13316af8fb61868a4e6836156ebcf2982.cu | // Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeSphere.h"
#include "ShapeConvexPolygon.h"
#include "ShapePolyhedron.h"
#include "ShapeConvexPolyhedron.h"
#include "ShapeSpheropolyhedron.h"
#include "ShapeSpheropolygon.h"
#include "ShapeSimplePolygon.h"
#include "ShapeEllipsoid.h"
#include "ShapeFacetedSphere.h"
#include "ShapeSphinx.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeConvexPolyhedron<32>
template cudaError_t gpu_hpmc_free_volume<ShapeConvexPolyhedron<32> >(const hpmc_free_volume_args_t &args,
const typename ShapeConvexPolyhedron<32> ::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeConvexPolyhedron<32> >(const hpmc_args_t& args,
const typename ShapeConvexPolyhedron<32> ::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeConvexPolyhedron<32> >(const hpmc_implicit_args_t& args,
const typename ShapeConvexPolyhedron<32> ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeConvexPolyhedron<32> >(const hpmc_implicit_args_t& args,
const typename ShapeConvexPolyhedron<32> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
7f579186dbd7b249435746e2b5ad317aad0d4b0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Matrix multiplication program for CPU
This program generates a matrix of defined size and multiplies them
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "helpers.cuh"
//Dimensions of the first matrix
#define ROWS1 16
#define COLS1 16
//Dimensions of the seconds matrix
#define ROWS2 16
#define COLS2 16
/* Function to do matrix multiplication */
__global__ void matMul(int *matC, int *matA, int *matB) {
int row, col, k, prod;
row = blockIdx.y * blockDim.y + threadIdx.y;
col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < ROWS1 && col < COLS2) {
for (k = 0; k < COLS1; k++) {
prod = prod + matA[row * COLS1 + k] * matB[k * COLS2 + col];
}
matC[row * COLS2 + col] = prod;
}
}
int main() {
//check whether dimensions are valid for a multiplication
if (COLS1 != ROWS2) {
printf("Matrix dimensions are invalid for matrix multiplication\n");
exit(1);
}
//Initialize arrays in RAM
int matA[ROWS1 * COLS1];
int matB[ROWS2 * COLS2];
int matC[ROWS1 * COLS2];
//generate some values for matrixA
int i, j;
for (i = 0; i < ROWS1; i++) {
for (j = 0; j < COLS1; j++) {
matA[i * COLS1 + j] = i + j;
}
}
//print the matA
printf("Matrix A : \n");
for (i = 0; i < ROWS1; i++) {
for (j = 0; j < COLS1; j++) {
printf("%5d ", matA[i * COLS1 + j]);
}
printf("\n");
}
printf("\n");
//generate values for matrixB
for (i = 0; i < ROWS2; i++) {
for (j = 0; j < COLS2; j++) {
matB[i * COLS2 + j] = i - j;
}
}
//print the matB
printf("Matrix B : \n");
for (i = 0; i < ROWS2; i++) {
for (j = 0; j < COLS2; j++) {
printf("%5d ", matB[i * COLS2 + j]);
}
printf("\n");
}
printf("\n");
/************cuda**********/
int *cudaMatA;
int *cudaMatB;
int *cudaMatC;
hipMalloc((void **) &cudaMatA, sizeof(int) * ROWS1 * COLS1);
checkCudaError();
hipMalloc((void **) &cudaMatB, sizeof(int) * ROWS2 * COLS2);
checkCudaError();
hipMalloc((void **) &cudaMatC, sizeof(int) * ROWS1 * COLS2);
checkCudaError();
hipMemcpy(cudaMatA, matA, sizeof(int) * ROWS1 * COLS1, hipMemcpyHostToDevice);
checkCudaError();
hipMemcpy(cudaMatB, matB, sizeof(int) * ROWS2 * COLS2, hipMemcpyHostToDevice);
checkCudaError();
dim3 blockNum(ceil(COLS2 / (float) 16), ceil(ROWS1 / (float) 16));
dim3 threadsPerBlocks(16, 16);
clock_t start = clock();
matMul << < blockNum, threadsPerBlocks >> > (cudaMatC, cudaMatA, cudaMatB);
checkCudaError();
clock_t stop = clock();
hipMemcpy(matC, cudaMatC, sizeof(int) * ROWS1 * COLS2, hipMemcpyDeviceToHost);
checkCudaError();
hipFree(cudaMatA);
checkCudaError();
hipFree(cudaMatB);
checkCudaError();
hipFree(cudaMatC);
checkCudaError();
//print the answer
printf("Answer : \n");
for (i = 0; i < ROWS1; i++) {
for (j = 0; j < COLS2; j++) {
printf("%5d ", matC[i * COLS2 + j]);
}
printf("\n");
}
//calculate the time taken and print to stderr
double elapsedtime = (stop - start) / (double) CLOCKS_PER_SEC;
fprintf(stderr, "Elapsed time for operation on CPU is %1.5f seconds \n", elapsedtime);
return 0;
}
| 7f579186dbd7b249435746e2b5ad317aad0d4b0a.cu | /* Matrix multiplication program for CPU
This program generates a matrix of defined size and multiplies them
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "helpers.cuh"
//Dimensions of the first matrix
#define ROWS1 16
#define COLS1 16
//Dimensions of the seconds matrix
#define ROWS2 16
#define COLS2 16
/* Function to do matrix multiplication */
__global__ void matMul(int *matC, int *matA, int *matB) {
int row, col, k, prod;
row = blockIdx.y * blockDim.y + threadIdx.y;
col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < ROWS1 && col < COLS2) {
for (k = 0; k < COLS1; k++) {
prod = prod + matA[row * COLS1 + k] * matB[k * COLS2 + col];
}
matC[row * COLS2 + col] = prod;
}
}
int main() {
//check whether dimensions are valid for a multiplication
if (COLS1 != ROWS2) {
printf("Matrix dimensions are invalid for matrix multiplication\n");
exit(1);
}
//Initialize arrays in RAM
int matA[ROWS1 * COLS1];
int matB[ROWS2 * COLS2];
int matC[ROWS1 * COLS2];
//generate some values for matrixA
int i, j;
for (i = 0; i < ROWS1; i++) {
for (j = 0; j < COLS1; j++) {
matA[i * COLS1 + j] = i + j;
}
}
//print the matA
printf("Matrix A : \n");
for (i = 0; i < ROWS1; i++) {
for (j = 0; j < COLS1; j++) {
printf("%5d ", matA[i * COLS1 + j]);
}
printf("\n");
}
printf("\n");
//generate values for matrixB
for (i = 0; i < ROWS2; i++) {
for (j = 0; j < COLS2; j++) {
matB[i * COLS2 + j] = i - j;
}
}
//print the matB
printf("Matrix B : \n");
for (i = 0; i < ROWS2; i++) {
for (j = 0; j < COLS2; j++) {
printf("%5d ", matB[i * COLS2 + j]);
}
printf("\n");
}
printf("\n");
/************cuda**********/
int *cudaMatA;
int *cudaMatB;
int *cudaMatC;
cudaMalloc((void **) &cudaMatA, sizeof(int) * ROWS1 * COLS1);
checkCudaError();
cudaMalloc((void **) &cudaMatB, sizeof(int) * ROWS2 * COLS2);
checkCudaError();
cudaMalloc((void **) &cudaMatC, sizeof(int) * ROWS1 * COLS2);
checkCudaError();
cudaMemcpy(cudaMatA, matA, sizeof(int) * ROWS1 * COLS1, cudaMemcpyHostToDevice);
checkCudaError();
cudaMemcpy(cudaMatB, matB, sizeof(int) * ROWS2 * COLS2, cudaMemcpyHostToDevice);
checkCudaError();
dim3 blockNum(ceil(COLS2 / (float) 16), ceil(ROWS1 / (float) 16));
dim3 threadsPerBlocks(16, 16);
clock_t start = clock();
matMul << < blockNum, threadsPerBlocks >> > (cudaMatC, cudaMatA, cudaMatB);
checkCudaError();
clock_t stop = clock();
cudaMemcpy(matC, cudaMatC, sizeof(int) * ROWS1 * COLS2, cudaMemcpyDeviceToHost);
checkCudaError();
cudaFree(cudaMatA);
checkCudaError();
cudaFree(cudaMatB);
checkCudaError();
cudaFree(cudaMatC);
checkCudaError();
//print the answer
printf("Answer : \n");
for (i = 0; i < ROWS1; i++) {
for (j = 0; j < COLS2; j++) {
printf("%5d ", matC[i * COLS2 + j]);
}
printf("\n");
}
//calculate the time taken and print to stderr
double elapsedtime = (stop - start) / (double) CLOCKS_PER_SEC;
fprintf(stderr, "Elapsed time for operation on CPU is %1.5f seconds \n", elapsedtime);
return 0;
}
|
ec1ab09c167a972a49c3081de6b2bd6e59714473.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "AddVector.cuh"
__global__ void addKernelI(float32 *c, float32 *a, float32 *b, int size)
{
int ID = threadIdx.x;
if (ID < size) {
c[ID] = a[ID] + b[ID];
}
}
__global__ void addKernelC(float32 *c, float32 k, int size)
{
int ID = threadIdx.x;
if (ID < size) {
c[ID] = k;
}
}
__global__ void addKernelV(b2Vec2 *c, b2Vec2 vec, int size)
{
int ID = threadIdx.x;
if (ID < size) {
c[ID] = vec;
}
}
void addVectorI(float32 *re, float32 *a, float32 *b, int size)
{
float32 *dev_a = 0;
float32 *dev_b = 0;
float32 *dev_c = 0;
hipSetDevice(0);
hipMalloc((void**)&dev_c, size * sizeof(int));
hipMalloc((void**)&dev_a, size * sizeof(int));
hipMalloc((void**)&dev_b, size * sizeof(int));
hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addKernelI), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b, size);
hipDeviceSynchronize();
hipMemcpy(re, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
}
void addVectorC(float32 *c, float32 k, int size)
{
float32 *dev_c = 0;
hipSetDevice(0);
hipMalloc((void**)&dev_c, size * sizeof(int));
hipMemcpy(dev_c, c, size * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addKernelC), dim3(1), dim3(size), 0, 0, dev_c, k, size);
hipDeviceSynchronize();
hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_c);
}
void addVectorV(b2Vec2 *c, b2Vec2 vec, int size)
{
b2Vec2 *dev_c = 0;
hipSetDevice(0);
hipMalloc((void**)&dev_c, size * sizeof(b2Vec2));
hipMemcpy(dev_c, c, size * sizeof(b2Vec2), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addKernelV), dim3(1), dim3(size), 0, 0, dev_c, vec, size);
hipDeviceSynchronize();
hipMemcpy(c, dev_c, size * sizeof(b2Vec2), hipMemcpyDeviceToHost);
hipFree(dev_c);
} | ec1ab09c167a972a49c3081de6b2bd6e59714473.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "AddVector.cuh"
__global__ void addKernelI(float32 *c, float32 *a, float32 *b, int size)
{
int ID = threadIdx.x;
if (ID < size) {
c[ID] = a[ID] + b[ID];
}
}
__global__ void addKernelC(float32 *c, float32 k, int size)
{
int ID = threadIdx.x;
if (ID < size) {
c[ID] = k;
}
}
__global__ void addKernelV(b2Vec2 *c, b2Vec2 vec, int size)
{
int ID = threadIdx.x;
if (ID < size) {
c[ID] = vec;
}
}
void addVectorI(float32 *re, float32 *a, float32 *b, int size)
{
float32 *dev_a = 0;
float32 *dev_b = 0;
float32 *dev_c = 0;
cudaSetDevice(0);
cudaMalloc((void**)&dev_c, size * sizeof(int));
cudaMalloc((void**)&dev_a, size * sizeof(int));
cudaMalloc((void**)&dev_b, size * sizeof(int));
cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
addKernelI<<<1, size>>>(dev_c, dev_a, dev_b, size);
cudaDeviceSynchronize();
cudaMemcpy(re, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
}
void addVectorC(float32 *c, float32 k, int size)
{
float32 *dev_c = 0;
cudaSetDevice(0);
cudaMalloc((void**)&dev_c, size * sizeof(int));
cudaMemcpy(dev_c, c, size * sizeof(int), cudaMemcpyHostToDevice);
addKernelC<<<1, size>>>(dev_c, k, size);
cudaDeviceSynchronize();
cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
}
void addVectorV(b2Vec2 *c, b2Vec2 vec, int size)
{
b2Vec2 *dev_c = 0;
cudaSetDevice(0);
cudaMalloc((void**)&dev_c, size * sizeof(b2Vec2));
cudaMemcpy(dev_c, c, size * sizeof(b2Vec2), cudaMemcpyHostToDevice);
addKernelV<<<1, size>>>(dev_c, vec, size);
cudaDeviceSynchronize();
cudaMemcpy(c, dev_c, size * sizeof(b2Vec2), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
} |
6c3b719d371f1aa81cca0d9634ae4f489b64febd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_functions.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char* file, int line, bool abort=true) {
if (code != hipSuccess) {
fprintf(stderr, "GPU: %s: %d: %s\n", file, line, hipGetErrorString(code));
if (abort) exit(code);
}
}
__global__
void drawPixels(int numPixels,
Fractal* fractal,
unsigned char* buffer,
int numThreads,
int numBlocks,
Camera cam,
unsigned int millis,
int screen_width,
int screen_height) {
int steps;
bool hit;
double dst;
double rayPos[3];
double rayDelta[3];
double x, y;
for (int pixel = threadIdx.x + blockIdx.x * numThreads; pixel < numPixels; pixel += numThreads * numBlocks) {
rayPos[0] = cam.pos[0];
rayPos[1] = cam.pos[1];
rayPos[2] = cam.pos[2];
x = (double) (pixel % screen_width) / screen_width;
y = (double) (pixel / screen_width) / screen_height;
cam.getDeltaFrom2D(x, y, rayDelta);
// printf("x, y: %f %f\n", x, y);
// printf("Ray delta: %f %f %f\n", rayDelta[0], rayDelta[1], rayDelta[2]);
steps = 0;
while (true) {
// clock_t start_time = clock();
// clock_t stop_time;
dst = static_cast<BasicSphere*>(fractal)->DE(rayPos);
if (dst > DST_MAX) {
hit = false;
break;
}
if (dst < DST_MIN) {
hit = true;
break;
}
rayPos[0] += rayDelta[0] * dst;
rayPos[1] += rayDelta[1] * dst;
rayPos[2] += rayDelta[2] * dst;
steps += 1;
// stop_time = clock();
// printf("Time: %f microseconds\n", (int) (stop_time - start_time) / 1987.0);
}
if (hit) {
buffer[pixel * 4 + 0] = 0x00; // b
buffer[pixel * 4 + 1] = 0xFF; // g
buffer[pixel * 4 + 2] = 0x00; // r
buffer[pixel * 4 + 3] = 0x00; // a
} else {
buffer[pixel * 4 + 0] = (unsigned char) (steps * 4); // b
buffer[pixel * 4 + 1] = (unsigned char) (steps * 4); // g
buffer[pixel * 4 + 2] = (unsigned char) (steps * 4); // r
buffer[pixel * 4 + 3] = 0x00; // a
}
}
}
void renderScreen(int numPixels, Fractal* fractal, Camera* cam, unsigned char* buffer, int screen_width, int screen_height) {
int numThreads = 1024;
int numBlocks = 32;
hipLaunchKernelGGL(( drawPixels), dim3(numBlocks), dim3(numThreads), 0, 0,
numPixels,
fractal,
buffer,
numThreads,
numBlocks,
*cam, // copying data because it needs to know xFov and yFov
SDL_GetTicks(),
screen_width,
screen_height);
hipDeviceSynchronize();
gpuErrchk(hipPeekAtLastError()); // to see stdout before handling
}
| 6c3b719d371f1aa81cca0d9634ae4f489b64febd.cu | #include "cuda_functions.h"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char* file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPU: %s: %d: %s\n", file, line, cudaGetErrorString(code));
if (abort) exit(code);
}
}
__global__
void drawPixels(int numPixels,
Fractal* fractal,
unsigned char* buffer,
int numThreads,
int numBlocks,
Camera cam,
unsigned int millis,
int screen_width,
int screen_height) {
int steps;
bool hit;
double dst;
double rayPos[3];
double rayDelta[3];
double x, y;
for (int pixel = threadIdx.x + blockIdx.x * numThreads; pixel < numPixels; pixel += numThreads * numBlocks) {
rayPos[0] = cam.pos[0];
rayPos[1] = cam.pos[1];
rayPos[2] = cam.pos[2];
x = (double) (pixel % screen_width) / screen_width;
y = (double) (pixel / screen_width) / screen_height;
cam.getDeltaFrom2D(x, y, rayDelta);
// printf("x, y: %f %f\n", x, y);
// printf("Ray delta: %f %f %f\n", rayDelta[0], rayDelta[1], rayDelta[2]);
steps = 0;
while (true) {
// clock_t start_time = clock();
// clock_t stop_time;
dst = static_cast<BasicSphere*>(fractal)->DE(rayPos);
if (dst > DST_MAX) {
hit = false;
break;
}
if (dst < DST_MIN) {
hit = true;
break;
}
rayPos[0] += rayDelta[0] * dst;
rayPos[1] += rayDelta[1] * dst;
rayPos[2] += rayDelta[2] * dst;
steps += 1;
// stop_time = clock();
// printf("Time: %f microseconds\n", (int) (stop_time - start_time) / 1987.0);
}
if (hit) {
buffer[pixel * 4 + 0] = 0x00; // b
buffer[pixel * 4 + 1] = 0xFF; // g
buffer[pixel * 4 + 2] = 0x00; // r
buffer[pixel * 4 + 3] = 0x00; // a
} else {
buffer[pixel * 4 + 0] = (unsigned char) (steps * 4); // b
buffer[pixel * 4 + 1] = (unsigned char) (steps * 4); // g
buffer[pixel * 4 + 2] = (unsigned char) (steps * 4); // r
buffer[pixel * 4 + 3] = 0x00; // a
}
}
}
void renderScreen(int numPixels, Fractal* fractal, Camera* cam, unsigned char* buffer, int screen_width, int screen_height) {
int numThreads = 1024;
int numBlocks = 32;
drawPixels<<<numBlocks, numThreads>>>(
numPixels,
fractal,
buffer,
numThreads,
numBlocks,
*cam, // copying data because it needs to know xFov and yFov
SDL_GetTicks(),
screen_width,
screen_height);
cudaDeviceSynchronize();
gpuErrchk(cudaPeekAtLastError()); // to see stdout before handling
}
|
84cf2350a0f982d7107bbdf31f22b0fd9f23ae4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixModuleCreateAbort.h"
#include "random.h"
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<unsigned int>( occluded ) );
}
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
RadiancePRD* prd
)
{
// TODO: deduce stride from num ray-types passed in params
unsigned int u0, u1;
packPointer( prd, u0, u1 );
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1 );
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__rg()
{
const int w = params.width;
const int h = params.height;
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const uint3 idx = optixGetLaunchIndex();
const int subframe_index = params.subframe_index;
unsigned int seed = tea<4>( idx.y*w + idx.x, subframe_index );
float3 result = make_float3( 0.0f );
int i = params.samples_per_launch;
do
{
const float2 subpixel_jitter = make_float2( rnd( seed )-0.5f, rnd( seed )-0.5f );
const float2 d = 2.0f * make_float2(
( static_cast<float>( idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ),
( static_cast<float>( idx.y ) + subpixel_jitter.y ) / static_cast<float>( h )
) - 1.0f;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
float3 ray_origin = eye;
RadiancePRD prd;
prd.emitted = make_float3(0.f);
prd.radiance = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.seed = seed;
int depth = 0;
for( ;; )
{
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&prd );
result += prd.emitted;
result += prd.radiance * prd.attenuation;
if( prd.done || depth >= 3 ) // TODO RR, variable for depth
break;
ray_origin = prd.origin;
ray_direction = prd.direction;
++depth;
}
}
while( --i );
const uint3 launch_index = optixGetLaunchIndex();
const unsigned int image_index = launch_index.y * params.width + launch_index.x;
float3 accum_color = result / static_cast<float>( params.samples_per_launch );
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f);
params.frame_buffer[ image_index ] = make_color ( accum_color );
}
extern "C" __global__ void __miss__radiance()
{
MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
RadiancePRD* prd = getPRD();
prd->radiance = make_float3( rt_data->bg_color );
prd->done = true;
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
| 84cf2350a0f982d7107bbdf31f22b0fd9f23ae4b.cu | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <optix.h>
#include "optixModuleCreateAbort.h"
#include "random.h"
extern "C" {
__constant__ Params params;
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
static __forceinline__ __device__ void setPayloadOcclusion( bool occluded )
{
optixSetPayload_0( static_cast<unsigned int>( occluded ) );
}
static __forceinline__ __device__ void traceRadiance(
OptixTraversableHandle handle,
float3 ray_origin,
float3 ray_direction,
float tmin,
float tmax,
RadiancePRD* prd
)
{
// TODO: deduce stride from num ray-types passed in params
unsigned int u0, u1;
packPointer( prd, u0, u1 );
optixTrace(
handle,
ray_origin,
ray_direction,
tmin,
tmax,
0.0f, // rayTime
OptixVisibilityMask( 1 ),
OPTIX_RAY_FLAG_NONE,
RAY_TYPE_RADIANCE, // SBT offset
RAY_TYPE_COUNT, // SBT stride
RAY_TYPE_RADIANCE, // missSBTIndex
u0, u1 );
}
//------------------------------------------------------------------------------
//
//
//
//------------------------------------------------------------------------------
extern "C" __global__ void __raygen__rg()
{
const int w = params.width;
const int h = params.height;
const float3 eye = params.eye;
const float3 U = params.U;
const float3 V = params.V;
const float3 W = params.W;
const uint3 idx = optixGetLaunchIndex();
const int subframe_index = params.subframe_index;
unsigned int seed = tea<4>( idx.y*w + idx.x, subframe_index );
float3 result = make_float3( 0.0f );
int i = params.samples_per_launch;
do
{
const float2 subpixel_jitter = make_float2( rnd( seed )-0.5f, rnd( seed )-0.5f );
const float2 d = 2.0f * make_float2(
( static_cast<float>( idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ),
( static_cast<float>( idx.y ) + subpixel_jitter.y ) / static_cast<float>( h )
) - 1.0f;
float3 ray_direction = normalize(d.x*U + d.y*V + W);
float3 ray_origin = eye;
RadiancePRD prd;
prd.emitted = make_float3(0.f);
prd.radiance = make_float3(0.f);
prd.attenuation = make_float3(1.f);
prd.countEmitted = true;
prd.done = false;
prd.seed = seed;
int depth = 0;
for( ;; )
{
traceRadiance(
params.handle,
ray_origin,
ray_direction,
0.01f, // tmin // TODO: smarter offset
1e16f, // tmax
&prd );
result += prd.emitted;
result += prd.radiance * prd.attenuation;
if( prd.done || depth >= 3 ) // TODO RR, variable for depth
break;
ray_origin = prd.origin;
ray_direction = prd.direction;
++depth;
}
}
while( --i );
const uint3 launch_index = optixGetLaunchIndex();
const unsigned int image_index = launch_index.y * params.width + launch_index.x;
float3 accum_color = result / static_cast<float>( params.samples_per_launch );
if( subframe_index > 0 )
{
const float a = 1.0f / static_cast<float>( subframe_index+1 );
const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]);
accum_color = lerp( accum_color_prev, accum_color, a );
}
params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f);
params.frame_buffer[ image_index ] = make_color ( accum_color );
}
extern "C" __global__ void __miss__radiance()
{
MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() );
RadiancePRD* prd = getPRD();
prd->radiance = make_float3( rt_data->bg_color );
prd->done = true;
}
extern "C" __global__ void __closesthit__occlusion()
{
setPayloadOcclusion( true );
}
|
0a6cd2e406b4d9866e5364b7b5b0165e8f778672.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdio>
#include <cassert>
#include "PLOG.hh"
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wlanguage-extension-token"
#endif
#include "cuRANDWrapper_kernel.hh"
#include "LaunchSequence.hh"
#include "hiprand/hiprand_kernel.h"
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#define CUDA_SAFE_CALL( call) do { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
/**
allocate_rng_wrapper
---------------------
Allocates hiprandState_t device buffer sized to hold the number
of items from LaunchSequence (aka rngmax) and returns pointer to it.
**/
hipDeviceptr_t allocate_rng_wrapper( const LaunchSequence* launchseq)
{
unsigned int items = launchseq->getItems();
size_t nbytes = items*sizeof(hiprandState_t) ;
int value = 0 ;
int M = 1000000 ;
LOG(info)
<< " items " << items
<< " items/M " << items/M
<< " sizeof(hiprandState_t) " << sizeof(hiprandState_t)
<< " nbytes " << nbytes
<< " nbytes/M " << nbytes/M
;
hipDeviceptr_t dev_rng_states ;
CUDA_SAFE_CALL( hipMalloc((void**)&dev_rng_states, nbytes ));
CUDA_SAFE_CALL( hipMemset((void*)dev_rng_states, value, nbytes ));
return dev_rng_states ;
}
/**
free_rng_wrapper
-------------------
Frees device buffer provided as argument.
**/
void free_rng_wrapper( hipDeviceptr_t dev_rng_states )
{
CUDA_SAFE_CALL( hipFree((void*)dev_rng_states));
}
/**
copytohost_rng_wrapper
-----------------------
1. allocates host memory for item count from the launchseq
2. hipMemcpy from device buffer pointer provided in argument to the host buffer
3. returns pointer to host buffer
**/
hiprandState_t* copytohost_rng_wrapper( const LaunchSequence* launchseq, hipDeviceptr_t dev_rng_states)
{
unsigned items = launchseq->getItems();
void* host_rng_states = malloc(sizeof(hiprandState_t)*items);
CUDA_SAFE_CALL( hipMemcpy(host_rng_states, (void*)dev_rng_states, sizeof(hiprandState_t)*items, hipMemcpyDeviceToHost) );
CUDA_SAFE_CALL( hipDeviceSynchronize() );
return (hiprandState_t*)host_rng_states ;
}
/**
copytodevice_rng_wrapper
--------------------------
1. allocates device buffer sized for launchseq items
2. hipMemcpy from host buffer provided in argument
3. returns device pointer
**/
hipDeviceptr_t copytodevice_rng_wrapper( const LaunchSequence* launchseq, void* host_rng_states)
{
unsigned int items = launchseq->getItems();
hipDeviceptr_t dev_rng_states;
CUDA_SAFE_CALL( hipMalloc((void**)&dev_rng_states, items*sizeof(hiprandState_t)));
CUDA_SAFE_CALL( hipMemcpy((void*)dev_rng_states, host_rng_states, sizeof(hiprandState_t)*items, hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipDeviceSynchronize() );
return dev_rng_states ;
}
/**
before_kernel
--------------
* create start and stop events and record the start
**/
void before_kernel( hipEvent_t& start, hipEvent_t& stop )
{
CUDA_SAFE_CALL( hipEventCreate( &start ) );
CUDA_SAFE_CALL( hipEventCreate( &stop ) );
CUDA_SAFE_CALL( hipEventRecord( start,0 ) );
}
/**
after_kernel
--------------
* record the stop, returning elapsed time
**/
float after_kernel( hipEvent_t& start, hipEvent_t& stop )
{
float kernel_time = 0.f ;
CUDA_SAFE_CALL( hipEventRecord( stop,0 ) );
CUDA_SAFE_CALL( hipEventSynchronize(stop) );
CUDA_SAFE_CALL( hipEventElapsedTime(&kernel_time, start, stop) );
CUDA_SAFE_CALL( hipEventDestroy( start ) );
CUDA_SAFE_CALL( hipEventDestroy( stop ) );
CUDA_SAFE_CALL( hipDeviceSynchronize() );
return kernel_time ;
}
void devicesync_wrapper()
{
CUDA_SAFE_CALL( hipDeviceSynchronize() );
}
/**
init_rng
----------
Invokes hiprand_init with resulting hiprandState_t written into rng_states
of the argument.
The thread_offset is a technicality from doing multiple
launches to complete the initialize.
As rng_state is already offset in the kernerl call, NOT doing
the below as its cleaner for id to be local to the launch::
&rng_states[id+thread_offset]
Chroma approach was to recycle rng_states for each kernel launch
in the cohort being propagated, which means the size of each kernel
launch is limited by timeouts occuring in any of the kernel launches
including the hiprand_init one
Instead of doing this try having a state for every photon
and offsetting into it : the advantage is that changes to
the CUDA launch configuration should not have any impact
on the random number streams being consumed by the simulation (?)
But all these rng streams are rather expensive though, so
should compare performace with chroma piecewise approach.
Maybe are just paying the expense at initialization ?
(On macOS) hiprand_init runs 10x slower for large thread_offset ?
starting from 262144 running the kernel launch sequence in reverse
confirms this finding
* :google:`hiprand_init slow with large sequence numbers`
From cuda-hiprand CURAND_Library.pdf Chapter 3::
__device__ void hiprand_init (
unsigned long long seed,
unsigned long long sequence,
unsigned long long offset,
hiprandState_t *state )
The hiprand_init() function sets up an initial state allocated by the caller using the
given seed, sequence number, and offset within the sequence. Different seeds are
guaranteed to produce different starting states and different sequences.
...
Sequences generated with different seeds usually do not have statistically correlated
values, but some choices of seeds may give statistically correlated sequences. Sequences
generated with the same seed and different sequence numbers will not have statistically
correlated values.
For the highest quality parallel pseudorandom number generation, each experiment
should be assigned a unique seed. Within an experiment, each thread of computation
should be assigned a unique sequence number. If an experiment spans multiple kernel
launches, it is recommended that threads between kernel launches be given the same
seed, and sequence numbers be assigned in a monotonically increasing way. If the same
configuration of threads is launched, random state can be preserved in global memory
between launches to avoid state setup time.
Opticks Approach
~~~~~~~~~~~~~~~~~~~~
Photon record_id used as the hiprand sequence number
with seed and offset set as zero in cuRANDWrapperTest.
**/
__global__ void init_rng(int threads_per_launch, int thread_offset, hiprandState_t* rng_states, unsigned long long seed, unsigned long long offset)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id >= threads_per_launch) return;
hiprand_init(seed, id + thread_offset , offset, &rng_states[id]);
}
/**
init_rng_wrapper
-----------------
Typically multiple launches are made in order to initialize the hiprandState_t buffer
for rngmax items.
Loops over launchseq NumLaunches invoking init_rng
which writes curandStates into offset device buffer locations.
**/
void init_rng_wrapper( const LaunchSequence* launchseq, hipDeviceptr_t dev_rng_states, unsigned long long seed, unsigned long long offset)
{
hipEvent_t start, stop ;
for(unsigned i=0 ; i < launchseq->getNumLaunches() ; i++ )
{
const Launch& launch = launchseq->getLaunch(i) ;
hiprandState_t* dev_rng_states_launch = (hiprandState_t*)dev_rng_states + launch.thread_offset ;
before_kernel( start, stop );
hipLaunchKernelGGL(( init_rng), dim3(launch.blocks_per_launch), dim3(launch.threads_per_block), 0, 0, launch.threads_per_launch, launch.thread_offset, dev_rng_states_launch, seed, offset );
float kernel_time = after_kernel( start, stop );
const_cast<Launch&>(launch).kernel_time = kernel_time ;
launch.Summary("init_rng_wrapper");
}
launchseq->Summary("init_rng_wrapper");
}
/**
test_rng
-----------
Use the rng_states hiprandState_t of the argument, offset by id,
to generate some random floats.
NB no id offsetting on rng_states or a, as the offsetting
was done once in the kernel call
this means thread_offset argument not used
hiprandState_t struct contains double boxmuller_extra_double
that causes demoting to float warnings in the below.
Stanley Seibert judges it to be benign.
* http://lists.tiker.net/pipermail/pycuda/2011-December/003513.html
**/
__global__ void test_rng(int threads_per_launch, int thread_offset, hiprandState_t* rng_states, float *a, bool update_states )
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id >= threads_per_launch) return;
hiprandState_t rng = rng_states[id]; // copy from global to register
a[id] = hiprand_uniform(&rng);
if(update_states) rng_states[id] = rng; // update from register to global
}
/**
test_rng_wrapper
----------------
1. allocate device buffer dev_a with room for one float per item
2. loop over launchseq launches invoking test_rng launches
using the curandStates provided in the argument
3. for each thread of the launch a single random float is
generated which is stored in dev_a
4. copy dev_a to host_a of argument and free on device
**/
void test_rng_wrapper(
const LaunchSequence* launchseq,
hipDeviceptr_t dev_rng_states,
float* host_a,
bool update_states
)
{
hipEvent_t start, stop ;
unsigned int items = launchseq->getItems();
float* dev_a;
CUDA_SAFE_CALL(hipMalloc((void**)&dev_a, items*sizeof(float)));
for(unsigned i=0 ; i < launchseq->getNumLaunches() ; i++ )
{
const Launch& launch = launchseq->getLaunch(i) ;
hiprandState_t* dev_rng_states_launch = (hiprandState_t*)dev_rng_states + launch.thread_offset ;
float* dev_a_launch = dev_a + launch.thread_offset ;
before_kernel( start, stop );
hipLaunchKernelGGL(( test_rng), dim3(launch.blocks_per_launch), dim3(launch.threads_per_block), 0, 0, launch.threads_per_launch, launch.thread_offset, dev_rng_states_launch, dev_a_launch, update_states );
float kernel_time = after_kernel( start, stop );
const_cast<Launch&>(launch).kernel_time = kernel_time ;
}
CUDA_SAFE_CALL( hipMemcpy(host_a, dev_a, items*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_SAFE_CALL( hipFree(dev_a) );
CUDA_SAFE_CALL( hipDeviceSynchronize() );
}
| 0a6cd2e406b4d9866e5364b7b5b0165e8f778672.cu | /*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdio>
#include <cassert>
#include "PLOG.hh"
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wlanguage-extension-token"
#endif
#include "cuRANDWrapper_kernel.hh"
#include "LaunchSequence.hh"
#include "curand_kernel.h"
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#define CUDA_SAFE_CALL( call) do { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
/**
allocate_rng_wrapper
---------------------
Allocates curandState device buffer sized to hold the number
of items from LaunchSequence (aka rngmax) and returns pointer to it.
**/
CUdeviceptr allocate_rng_wrapper( const LaunchSequence* launchseq)
{
unsigned int items = launchseq->getItems();
size_t nbytes = items*sizeof(curandState) ;
int value = 0 ;
int M = 1000000 ;
LOG(info)
<< " items " << items
<< " items/M " << items/M
<< " sizeof(curandState) " << sizeof(curandState)
<< " nbytes " << nbytes
<< " nbytes/M " << nbytes/M
;
CUdeviceptr dev_rng_states ;
CUDA_SAFE_CALL( cudaMalloc((void**)&dev_rng_states, nbytes ));
CUDA_SAFE_CALL( cudaMemset((void*)dev_rng_states, value, nbytes ));
return dev_rng_states ;
}
/**
free_rng_wrapper
-------------------
Frees device buffer provided as argument.
**/
void free_rng_wrapper( CUdeviceptr dev_rng_states )
{
CUDA_SAFE_CALL( cudaFree((void*)dev_rng_states));
}
/**
copytohost_rng_wrapper
-----------------------
1. allocates host memory for item count from the launchseq
2. cudaMemcpy from device buffer pointer provided in argument to the host buffer
3. returns pointer to host buffer
**/
curandState* copytohost_rng_wrapper( const LaunchSequence* launchseq, CUdeviceptr dev_rng_states)
{
unsigned items = launchseq->getItems();
void* host_rng_states = malloc(sizeof(curandState)*items);
CUDA_SAFE_CALL( cudaMemcpy(host_rng_states, (void*)dev_rng_states, sizeof(curandState)*items, cudaMemcpyDeviceToHost) );
CUDA_SAFE_CALL( cudaDeviceSynchronize() );
return (curandState*)host_rng_states ;
}
/**
copytodevice_rng_wrapper
--------------------------
1. allocates device buffer sized for launchseq items
2. cudaMemcpy from host buffer provided in argument
3. returns device pointer
**/
CUdeviceptr copytodevice_rng_wrapper( const LaunchSequence* launchseq, void* host_rng_states)
{
unsigned int items = launchseq->getItems();
CUdeviceptr dev_rng_states;
CUDA_SAFE_CALL( cudaMalloc((void**)&dev_rng_states, items*sizeof(curandState)));
CUDA_SAFE_CALL( cudaMemcpy((void*)dev_rng_states, host_rng_states, sizeof(curandState)*items, cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaDeviceSynchronize() );
return dev_rng_states ;
}
/**
before_kernel
--------------
* create start and stop events and record the start
**/
void before_kernel( cudaEvent_t& start, cudaEvent_t& stop )
{
CUDA_SAFE_CALL( cudaEventCreate( &start ) );
CUDA_SAFE_CALL( cudaEventCreate( &stop ) );
CUDA_SAFE_CALL( cudaEventRecord( start,0 ) );
}
/**
after_kernel
--------------
* record the stop, returning elapsed time
**/
float after_kernel( cudaEvent_t& start, cudaEvent_t& stop )
{
float kernel_time = 0.f ;
CUDA_SAFE_CALL( cudaEventRecord( stop,0 ) );
CUDA_SAFE_CALL( cudaEventSynchronize(stop) );
CUDA_SAFE_CALL( cudaEventElapsedTime(&kernel_time, start, stop) );
CUDA_SAFE_CALL( cudaEventDestroy( start ) );
CUDA_SAFE_CALL( cudaEventDestroy( stop ) );
CUDA_SAFE_CALL( cudaDeviceSynchronize() );
return kernel_time ;
}
void devicesync_wrapper()
{
CUDA_SAFE_CALL( cudaDeviceSynchronize() );
}
/**
init_rng
----------
Invokes curand_init with resulting curandState written into rng_states
of the argument.
The thread_offset is a technicality from doing multiple
launches to complete the initialize.
As rng_state is already offset in the kernerl call, NOT doing
the below as its cleaner for id to be local to the launch::
&rng_states[id+thread_offset]
Chroma approach was to recycle rng_states for each kernel launch
in the cohort being propagated, which means the size of each kernel
launch is limited by timeouts occuring in any of the kernel launches
including the curand_init one
Instead of doing this try having a state for every photon
and offsetting into it : the advantage is that changes to
the CUDA launch configuration should not have any impact
on the random number streams being consumed by the simulation (?)
But all these rng streams are rather expensive though, so
should compare performace with chroma piecewise approach.
Maybe are just paying the expense at initialization ?
(On macOS) curand_init runs 10x slower for large thread_offset ?
starting from 262144 running the kernel launch sequence in reverse
confirms this finding
* :google:`curand_init slow with large sequence numbers`
From cuda-curand CURAND_Library.pdf Chapter 3::
__device__ void curand_init (
unsigned long long seed,
unsigned long long sequence,
unsigned long long offset,
curandState_t *state )
The curand_init() function sets up an initial state allocated by the caller using the
given seed, sequence number, and offset within the sequence. Different seeds are
guaranteed to produce different starting states and different sequences.
...
Sequences generated with different seeds usually do not have statistically correlated
values, but some choices of seeds may give statistically correlated sequences. Sequences
generated with the same seed and different sequence numbers will not have statistically
correlated values.
For the highest quality parallel pseudorandom number generation, each experiment
should be assigned a unique seed. Within an experiment, each thread of computation
should be assigned a unique sequence number. If an experiment spans multiple kernel
launches, it is recommended that threads between kernel launches be given the same
seed, and sequence numbers be assigned in a monotonically increasing way. If the same
configuration of threads is launched, random state can be preserved in global memory
between launches to avoid state setup time.
Opticks Approach
~~~~~~~~~~~~~~~~~~~~
Photon record_id used as the curand sequence number
with seed and offset set as zero in cuRANDWrapperTest.
**/
__global__ void init_rng(int threads_per_launch, int thread_offset, curandState* rng_states, unsigned long long seed, unsigned long long offset)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id >= threads_per_launch) return;
curand_init(seed, id + thread_offset , offset, &rng_states[id]);
}
/**
init_rng_wrapper
-----------------
Typically multiple launches are made in order to initialize the curandState buffer
for rngmax items.
Loops over launchseq NumLaunches invoking init_rng
which writes curandStates into offset device buffer locations.
**/
void init_rng_wrapper( const LaunchSequence* launchseq, CUdeviceptr dev_rng_states, unsigned long long seed, unsigned long long offset)
{
cudaEvent_t start, stop ;
for(unsigned i=0 ; i < launchseq->getNumLaunches() ; i++ )
{
const Launch& launch = launchseq->getLaunch(i) ;
curandState* dev_rng_states_launch = (curandState*)dev_rng_states + launch.thread_offset ;
before_kernel( start, stop );
init_rng<<<launch.blocks_per_launch, launch.threads_per_block>>>( launch.threads_per_launch, launch.thread_offset, dev_rng_states_launch, seed, offset );
float kernel_time = after_kernel( start, stop );
const_cast<Launch&>(launch).kernel_time = kernel_time ;
launch.Summary("init_rng_wrapper");
}
launchseq->Summary("init_rng_wrapper");
}
/**
test_rng
-----------
Use the rng_states curandState of the argument, offset by id,
to generate some random floats.
NB no id offsetting on rng_states or a, as the offsetting
was done once in the kernel call
this means thread_offset argument not used
curandState struct contains double boxmuller_extra_double
that causes demoting to float warnings in the below.
Stanley Seibert judges it to be benign.
* http://lists.tiker.net/pipermail/pycuda/2011-December/003513.html
**/
__global__ void test_rng(int threads_per_launch, int thread_offset, curandState* rng_states, float *a, bool update_states )
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id >= threads_per_launch) return;
curandState rng = rng_states[id]; // copy from global to register
a[id] = curand_uniform(&rng);
if(update_states) rng_states[id] = rng; // update from register to global
}
/**
test_rng_wrapper
----------------
1. allocate device buffer dev_a with room for one float per item
2. loop over launchseq launches invoking test_rng launches
using the curandStates provided in the argument
3. for each thread of the launch a single random float is
generated which is stored in dev_a
4. copy dev_a to host_a of argument and free on device
**/
void test_rng_wrapper(
const LaunchSequence* launchseq,
CUdeviceptr dev_rng_states,
float* host_a,
bool update_states
)
{
cudaEvent_t start, stop ;
unsigned int items = launchseq->getItems();
float* dev_a;
CUDA_SAFE_CALL(cudaMalloc((void**)&dev_a, items*sizeof(float)));
for(unsigned i=0 ; i < launchseq->getNumLaunches() ; i++ )
{
const Launch& launch = launchseq->getLaunch(i) ;
curandState* dev_rng_states_launch = (curandState*)dev_rng_states + launch.thread_offset ;
float* dev_a_launch = dev_a + launch.thread_offset ;
before_kernel( start, stop );
test_rng<<<launch.blocks_per_launch, launch.threads_per_block>>>( launch.threads_per_launch, launch.thread_offset, dev_rng_states_launch, dev_a_launch, update_states );
float kernel_time = after_kernel( start, stop );
const_cast<Launch&>(launch).kernel_time = kernel_time ;
}
CUDA_SAFE_CALL( cudaMemcpy(host_a, dev_a, items*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_SAFE_CALL( cudaFree(dev_a) );
CUDA_SAFE_CALL( cudaDeviceSynchronize() );
}
|
3fb6ac0c47325c32ed0ba340b78ce75a912fe73f.hip | // !!! This is a file automatically generated by hipify!!!
/*
*/
#include <iostream>
#include <hip/hip_runtime.h>
#include <vector>
#include <hip/hip_runtime.h>
#include "../include/mycudaheader.h"
using namespace std;
int main()
{
size_t num_rows = 18;
size_t num_cols = 18;
size_t max_row_size;
size_t N = 2;
vector<size_t> bc_index = {0, 1, 6, 7, 12, 13};
vector<double> globalmatrix = {
6652102.4, 2400134.4, -4066334.72, -185606.4, 0, 0, 740236.8, 185606.4, -3325952, -2400153.6, 0, 0, 0, 0, 0, 0, 0, 0,
2400134.4, 6652102.4, 185606.4, 740236.8, 0, 0, -185606.4, -4066334.72, -2400153.6, -3325952, 0, 0, 0, 0, 0, 0, 0, 0,
-4066334.72, 185606.4, 13304204.8, 0, -4066334.72, -185606.4, -3325952, 2400153.6, 1480473.6, 0, -3325952, -2400153.6, 0, 0, 0, 0, 0, 0,
-185606.4, 740236.8, 0, 13304204.8, 185606.4, 740236.8, 2400153.6, -3325952, 0, -8132669.44, -2400153.6, -3325952, 0, 0, 0, 0, 0, 0,
0, 0, -4066334.72, 185606.4, 6652102.4, -2400134.4, 0, 0, -3325952, 2400153.6, 740236.8, -185606.4, 0, 0, 0, 0, 0, 0,
0, 0, -185606.4, 740236.8, -2400134.4, 6652102.4, 0, 0, 2400153.6, -3325952, 185606.4, -4066334.72, 0, 0, 0, 0, 0, 0,
740236.8, -185606.4, -3325952, 2400153.6, 0, 0, 13304204.8, 0, -8132669.44, 0, 0, 0, 740236.8, 185606.4, -3325952, -2400153.6, 0, 0,
185606.4, -4066334.72, 2400153.6, -3325952, 0, 0, 0, 13304204.8, 0, 1480473.6, 0, 0, -185606.4, -4066334.72, -2400153.6, -3325952, 0, 0,
-3325952, -2400153.6, 1480473.6, 0, -3325952, 2400153.6, -8132669.44, 0, 26608409.6, 0, -8132669.44, 0, -3325952, 2400153.6, 1480473.6, 0, -3325952, -2400153.6,
-2400153.6, -3325952, 0, -8132669.44, 2400153.6, -3325952, 0, 1480473.6, 0, 26608409.6, 0, 1480473.6, 2400153.6, -3325952, 0, -8132669.44, -2400153.6, -3325952,
0, 0, -3325952, -2400153.6, 740236.8, 185606.4, 0, 0, -8132669.44, 0, 13304204.8, 0, 0, 0, -3325952, 2400153.6, 740236.8, -185606.4,
0, 0, -2400153.6, -3325952, -185606.4, -4066334.72, 0, 0, 0, 1480473.6, 0, 13304204.8, 0, 0, 2400153.6, -3325952, 185606.4, -4066334.72,
0, 0, 0, 0, 0, 0, 740236.8, -185606.4, -3325952, 2400153.6, 0, 0, 6652102.4, -2400134.4, -4066334.72, 185606.4, 0, 0,
0, 0, 0, 0, 0, 0, 185606.4, -4066334.72, 2400153.6, -3325952, 0, 0, -2400134.4, 6652102.4, -185606.4, 740236.8, 0, 0,
0, 0, 0, 0, 0, 0, -3325952, -2400153.6, 1480473.6, 0, -3325952, 2400153.6, -4066334.72, -185606.4, 13304204.8, 0, -4066334.72, 185606.4,
0, 0, 0, 0, 0, 0, -2400153.6, -3325952, 0, -8132669.44, 2400153.6, -3325952, 185606.4, 740236.8, 0, 13304204.8, -185606.4, 740236.8,
0, 0, 0, 0, 0, 0, 0, 0, -3325952, -2400153.6, 740236.8, 185606.4, 0, 0, -4066334.72, -185606.4, 6652102.4, 2400134.4,
0, 0, 0, 0, 0, 0, 0, 0, -2400153.6, -3325952, -185606.4, -4066334.72, 0, 0, 185606.4, 740236.8, 2400134.4, 6652102.4
};
for ( int i = 0 ; i < bc_index.size() ; ++i )
applyMatrixBC(&globalmatrix[0], bc_index[i], num_rows, num_cols);
// get max row size
max_row_size = getMaxRowSize(globalmatrix, num_rows, num_cols);
std::vector<double> value(max_row_size*num_rows, 0.0);
std::vector<std::size_t> index(max_row_size*num_rows, 0);
transformToELL(globalmatrix, value, index, max_row_size, num_rows);
hipDeviceSynchronize();
} | 3fb6ac0c47325c32ed0ba340b78ce75a912fe73f.cu | /*
*/
#include <iostream>
#include <cuda.h>
#include <vector>
#include <cuda_runtime.h>
#include "../include/mycudaheader.h"
using namespace std;
int main()
{
size_t num_rows = 18;
size_t num_cols = 18;
size_t max_row_size;
size_t N = 2;
vector<size_t> bc_index = {0, 1, 6, 7, 12, 13};
vector<double> globalmatrix = {
6652102.4, 2400134.4, -4066334.72, -185606.4, 0, 0, 740236.8, 185606.4, -3325952, -2400153.6, 0, 0, 0, 0, 0, 0, 0, 0,
2400134.4, 6652102.4, 185606.4, 740236.8, 0, 0, -185606.4, -4066334.72, -2400153.6, -3325952, 0, 0, 0, 0, 0, 0, 0, 0,
-4066334.72, 185606.4, 13304204.8, 0, -4066334.72, -185606.4, -3325952, 2400153.6, 1480473.6, 0, -3325952, -2400153.6, 0, 0, 0, 0, 0, 0,
-185606.4, 740236.8, 0, 13304204.8, 185606.4, 740236.8, 2400153.6, -3325952, 0, -8132669.44, -2400153.6, -3325952, 0, 0, 0, 0, 0, 0,
0, 0, -4066334.72, 185606.4, 6652102.4, -2400134.4, 0, 0, -3325952, 2400153.6, 740236.8, -185606.4, 0, 0, 0, 0, 0, 0,
0, 0, -185606.4, 740236.8, -2400134.4, 6652102.4, 0, 0, 2400153.6, -3325952, 185606.4, -4066334.72, 0, 0, 0, 0, 0, 0,
740236.8, -185606.4, -3325952, 2400153.6, 0, 0, 13304204.8, 0, -8132669.44, 0, 0, 0, 740236.8, 185606.4, -3325952, -2400153.6, 0, 0,
185606.4, -4066334.72, 2400153.6, -3325952, 0, 0, 0, 13304204.8, 0, 1480473.6, 0, 0, -185606.4, -4066334.72, -2400153.6, -3325952, 0, 0,
-3325952, -2400153.6, 1480473.6, 0, -3325952, 2400153.6, -8132669.44, 0, 26608409.6, 0, -8132669.44, 0, -3325952, 2400153.6, 1480473.6, 0, -3325952, -2400153.6,
-2400153.6, -3325952, 0, -8132669.44, 2400153.6, -3325952, 0, 1480473.6, 0, 26608409.6, 0, 1480473.6, 2400153.6, -3325952, 0, -8132669.44, -2400153.6, -3325952,
0, 0, -3325952, -2400153.6, 740236.8, 185606.4, 0, 0, -8132669.44, 0, 13304204.8, 0, 0, 0, -3325952, 2400153.6, 740236.8, -185606.4,
0, 0, -2400153.6, -3325952, -185606.4, -4066334.72, 0, 0, 0, 1480473.6, 0, 13304204.8, 0, 0, 2400153.6, -3325952, 185606.4, -4066334.72,
0, 0, 0, 0, 0, 0, 740236.8, -185606.4, -3325952, 2400153.6, 0, 0, 6652102.4, -2400134.4, -4066334.72, 185606.4, 0, 0,
0, 0, 0, 0, 0, 0, 185606.4, -4066334.72, 2400153.6, -3325952, 0, 0, -2400134.4, 6652102.4, -185606.4, 740236.8, 0, 0,
0, 0, 0, 0, 0, 0, -3325952, -2400153.6, 1480473.6, 0, -3325952, 2400153.6, -4066334.72, -185606.4, 13304204.8, 0, -4066334.72, 185606.4,
0, 0, 0, 0, 0, 0, -2400153.6, -3325952, 0, -8132669.44, 2400153.6, -3325952, 185606.4, 740236.8, 0, 13304204.8, -185606.4, 740236.8,
0, 0, 0, 0, 0, 0, 0, 0, -3325952, -2400153.6, 740236.8, 185606.4, 0, 0, -4066334.72, -185606.4, 6652102.4, 2400134.4,
0, 0, 0, 0, 0, 0, 0, 0, -2400153.6, -3325952, -185606.4, -4066334.72, 0, 0, 185606.4, 740236.8, 2400134.4, 6652102.4
};
for ( int i = 0 ; i < bc_index.size() ; ++i )
applyMatrixBC(&globalmatrix[0], bc_index[i], num_rows, num_cols);
// get max row size
max_row_size = getMaxRowSize(globalmatrix, num_rows, num_cols);
std::vector<double> value(max_row_size*num_rows, 0.0);
std::vector<std::size_t> index(max_row_size*num_rows, 0);
transformToELL(globalmatrix, value, index, max_row_size, num_rows);
cudaDeviceSynchronize();
} |
66eef0b0f94ed7b1069a71bad1cff149e2ef070f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/magma_zmcsrcompressor_gpu.cu, normal z -> c, Thu Oct 8 23:05:48 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE1 256
#define BLOCK_SIZE2 1
// copy nonzeros into new structure
__global__ void
magma_cmcsrgpu_kernel1( int num_rows,
magmaFloatComplex *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind,
magmaFloatComplex *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
magmaFloatComplex zero = MAGMA_C_ZERO;
int start = A_rowptr[ row ];
int new_location = start;
int end = A_rowptr[ row+1 ];
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
// B_val[new_location] = A_val[j];
// B_colind[new_location] = A_colind[j];
new_location++;
}
}
// this is not a correctr rowpointer! this is nn_z in this row!
B_rowptr[ row ] = new_location-start;
}
}
// generate a valid rowpointer
__global__ void
magma_cmcsrgpu_kernel2( int num_rows,
magma_index_t *B_rowptr,
magma_index_t *A_rowptr )
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int j, nnz = 0;
if( idx == 0 ){
A_rowptr[ 0 ] = nnz;
for( j=0; j<num_rows; j++ ){
nnz+=B_rowptr[ j ];
A_rowptr[ j+1 ] = nnz;
}
}
}
// copy new structure into original matrix
__global__ void
magma_cmcsrgpu_kernel3( int num_rows,
magmaFloatComplex *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind,
magma_index_t *B2_rowptr,
magmaFloatComplex *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind
)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j, new_location;
if(row<num_rows){
new_location = A_rowptr[ row ];
int start = B2_rowptr[ row ];
int end = B2_rowptr[ row+1 ];
magmaFloatComplex zero = MAGMA_C_ZERO;
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
B_val[new_location] = A_val[j];
B_colind[new_location] = A_colind[j];
new_location++;
}
// A_val[ j ] = B_val[ j ];
// A_colind[ j ] = B_colind[ j ];
}
}
}
/**
Purpose
-------
Removes zeros in a CSR matrix. This is a GPU implementation of the
CSR compressor.
Arguments
---------
@param[in,out]
A magma_c_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_cmcsrcompressor_gpu(
magma_c_matrix *A,
magma_queue_t queue )
{
magma_int_t info = 0;
magma_c_matrix B={Magma_CSR}, B2={Magma_CSR};
magma_c_matrix dA={Magma_CSR}, CSRA={Magma_CSR};
magma_index_t *cputmp = NULL;
if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) {
CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 ));
CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 ));
magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1, queue );
dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) );
// copying the nonzeros into B and write in B.drow how many there are
hipLaunchKernelGGL(( magma_cmcsrgpu_kernel1), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue->cuda_stream() ,
A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol );
// correct the row pointer
dim3 grid2( 1, 1, 1);
hipLaunchKernelGGL(( magma_cmcsrgpu_kernel2), dim3(grid2), dim3(BLOCK_SIZE2), 0, queue->cuda_stream() ,
A->num_rows, B.drow, A->drow );
// access the true number of nonzeros
CHECK( magma_index_malloc_cpu( &cputmp, 1 ));
magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1, queue );
A->nnz = (magma_int_t) cputmp[0];
// reallocate with right size
CHECK( magma_cmalloc( &B.dval, A->nnz ));
CHECK( magma_index_malloc( &B.dcol, A->nnz ));
// copy correct values back
hipLaunchKernelGGL(( magma_cmcsrgpu_kernel3), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue->cuda_stream() ,
A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol );
magma_free( A->dcol );
magma_free( A->dval );
A->dcol = B.dcol;
A->dval = B.dval;
}
else {
magma_storage_t A_storage = A->storage_type;
magma_location_t A_location = A->memory_location;
CHECK( magma_cmconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue ));
CHECK( magma_cmtransfer( *A, &dA, A->memory_location, Magma_DEV, queue ));
CHECK( magma_cmcsrcompressor_gpu( &dA, queue ));
magma_cmfree( &dA, queue );
magma_cmfree( A, queue );
CHECK( magma_cmtransfer( dA, &CSRA, Magma_DEV, A_location, queue ));
CHECK( magma_cmconvert( CSRA, A, Magma_CSR, A_storage, queue ));
magma_cmfree( &dA, queue );
magma_cmfree( &CSRA, queue );
}
cleanup:
magma_cmfree( &dA, queue );
magma_cmfree( &CSRA, queue );
magma_free( B2.drow );
magma_free( B.drow );
return info;
}
| 66eef0b0f94ed7b1069a71bad1cff149e2ef070f.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/magma_zmcsrcompressor_gpu.cu, normal z -> c, Thu Oct 8 23:05:48 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE1 256
#define BLOCK_SIZE2 1
// copy nonzeros into new structure
__global__ void
magma_cmcsrgpu_kernel1( int num_rows,
magmaFloatComplex *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind,
magmaFloatComplex *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
magmaFloatComplex zero = MAGMA_C_ZERO;
int start = A_rowptr[ row ];
int new_location = start;
int end = A_rowptr[ row+1 ];
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
// B_val[new_location] = A_val[j];
// B_colind[new_location] = A_colind[j];
new_location++;
}
}
// this is not a correctr rowpointer! this is nn_z in this row!
B_rowptr[ row ] = new_location-start;
}
}
// generate a valid rowpointer
__global__ void
magma_cmcsrgpu_kernel2( int num_rows,
magma_index_t *B_rowptr,
magma_index_t *A_rowptr )
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int j, nnz = 0;
if( idx == 0 ){
A_rowptr[ 0 ] = nnz;
for( j=0; j<num_rows; j++ ){
nnz+=B_rowptr[ j ];
A_rowptr[ j+1 ] = nnz;
}
}
}
// copy new structure into original matrix
__global__ void
magma_cmcsrgpu_kernel3( int num_rows,
magmaFloatComplex *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind,
magma_index_t *B2_rowptr,
magmaFloatComplex *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind
)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j, new_location;
if(row<num_rows){
new_location = A_rowptr[ row ];
int start = B2_rowptr[ row ];
int end = B2_rowptr[ row+1 ];
magmaFloatComplex zero = MAGMA_C_ZERO;
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
B_val[new_location] = A_val[j];
B_colind[new_location] = A_colind[j];
new_location++;
}
// A_val[ j ] = B_val[ j ];
// A_colind[ j ] = B_colind[ j ];
}
}
}
/**
Purpose
-------
Removes zeros in a CSR matrix. This is a GPU implementation of the
CSR compressor.
Arguments
---------
@param[in,out]
A magma_c_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_caux
********************************************************************/
extern "C" magma_int_t
magma_cmcsrcompressor_gpu(
magma_c_matrix *A,
magma_queue_t queue )
{
magma_int_t info = 0;
magma_c_matrix B={Magma_CSR}, B2={Magma_CSR};
magma_c_matrix dA={Magma_CSR}, CSRA={Magma_CSR};
magma_index_t *cputmp = NULL;
if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) {
CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 ));
CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 ));
magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1, queue );
dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) );
// copying the nonzeros into B and write in B.drow how many there are
magma_cmcsrgpu_kernel1<<< grid1, BLOCK_SIZE1, 0, queue->cuda_stream() >>>
( A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol );
// correct the row pointer
dim3 grid2( 1, 1, 1);
magma_cmcsrgpu_kernel2<<< grid2, BLOCK_SIZE2, 0, queue->cuda_stream() >>>
( A->num_rows, B.drow, A->drow );
// access the true number of nonzeros
CHECK( magma_index_malloc_cpu( &cputmp, 1 ));
magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1, queue );
A->nnz = (magma_int_t) cputmp[0];
// reallocate with right size
CHECK( magma_cmalloc( &B.dval, A->nnz ));
CHECK( magma_index_malloc( &B.dcol, A->nnz ));
// copy correct values back
magma_cmcsrgpu_kernel3<<< grid1, BLOCK_SIZE1, 0, queue->cuda_stream() >>>
( A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol );
magma_free( A->dcol );
magma_free( A->dval );
A->dcol = B.dcol;
A->dval = B.dval;
}
else {
magma_storage_t A_storage = A->storage_type;
magma_location_t A_location = A->memory_location;
CHECK( magma_cmconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue ));
CHECK( magma_cmtransfer( *A, &dA, A->memory_location, Magma_DEV, queue ));
CHECK( magma_cmcsrcompressor_gpu( &dA, queue ));
magma_cmfree( &dA, queue );
magma_cmfree( A, queue );
CHECK( magma_cmtransfer( dA, &CSRA, Magma_DEV, A_location, queue ));
CHECK( magma_cmconvert( CSRA, A, Magma_CSR, A_storage, queue ));
magma_cmfree( &dA, queue );
magma_cmfree( &CSRA, queue );
}
cleanup:
magma_cmfree( &dA, queue );
magma_cmfree( &CSRA, queue );
magma_free( B2.drow );
magma_free( B.drow );
return info;
}
|
07884268c902d67b7ba8717026246654a212251f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "head.h"
extern float *b, *ut, *Vt;
extern float *d_V1, *d_V2;
extern float *d_b, *d_ut, *d_Vt;
__global__ void GPU_adi_x(float *d_V1, float *d_b, int j){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<Np){
if(j==0){
d_b[i] = d_V1[i*Np+j] + r*(-d_V1[i*Np+j] + d_V1[i*Np+j+1]);
}else if(j==Np-1){
d_b[i] = d_V1[i*Np+j] + r*(d_V1[i*Np+j-1] - d_V1[i*Np+j]);
}else{
d_b[i] = d_V1[i*Np+j] + (r/2)*(d_V1[i*Np+j-1] - 2*d_V1[i*Np+j] + d_V1[i*Np+j+1]);
}
}
}
__global__ void GPU_getV2(float *d_V2, float *d_Vt, int j){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<Np){
d_V2[i*Np+j] = d_Vt[i];
}
}
__global__ void GPU_adi_y(float *d_V2, float *d_b, int i){
int j = blockDim.x * blockIdx.x + threadIdx.x;
if(j<Np){
if(i==0){
d_b[j] = d_V2[i*Np+j] + (r/2)*(-2*d_V2[i*Np+j] + 2*d_V2[(i+1)*Np+j]);
}else if(i==Np-1){
d_b[j] = d_V2[i*Np+j] + (r/2)*(2*d_V2[(i-1)*Np+j] - 2*d_V2[i*Np+j]);
}else{
d_b[j] = d_V2[i*Np+j] + (r/2)*(d_V2[(i-1)*Np+j] - 2*d_V2[i*Np+j] + d_V2[(i+1)*Np+j]);
}
}
}
__global__ void GPU_getV1(float *d_V1, float *d_ut, int i){
int j = blockDim.x * blockIdx.x + threadIdx.x;
if(j<Np){
d_V1[i*Np+j] = d_ut[j];
}
}
void ADI1(int j){
int tpb = 256;
int bpg = (Np+tpb-1)/tpb;
hipLaunchKernelGGL(( GPU_adi_x), dim3(bpg), dim3(tpb), 0, 0, d_V1, d_b, j);
}
void ADI1_2(int j){
int tpb = 256;
int bpg = (Np+tpb-1)/tpb;
hipLaunchKernelGGL(( GPU_getV2), dim3(bpg), dim3(tpb), 0, 0, d_V2, d_Vt, j);
}
void ADI2(int i){
int tpb = 256;
int bpg = (Np+tpb-1)/tpb;
hipLaunchKernelGGL(( GPU_adi_y), dim3(bpg), dim3(tpb), 0, 0, d_V2, d_b, i);
}
void ADI2_2(int i){
int tpb = 256;
int bpg = (Np+tpb-1)/tpb;
hipLaunchKernelGGL(( GPU_getV1), dim3(bpg), dim3(tpb), 0, 0, d_V1, d_ut, i);
}
void Send_to_D(){
size_t size;
size = Np*sizeof(float);
hipMemcpy(d_Vt, Vt, size, hipMemcpyHostToDevice);
hipMemcpy(d_ut, ut, size, hipMemcpyHostToDevice);
}
void Send_to_H(){
size_t size;
size = Np*sizeof(float);
hipMemcpy(b, d_b, size, hipMemcpyDeviceToHost);
}
| 07884268c902d67b7ba8717026246654a212251f.cu | #include "head.h"
extern float *b, *ut, *Vt;
extern float *d_V1, *d_V2;
extern float *d_b, *d_ut, *d_Vt;
__global__ void GPU_adi_x(float *d_V1, float *d_b, int j){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<Np){
if(j==0){
d_b[i] = d_V1[i*Np+j] + r*(-d_V1[i*Np+j] + d_V1[i*Np+j+1]);
}else if(j==Np-1){
d_b[i] = d_V1[i*Np+j] + r*(d_V1[i*Np+j-1] - d_V1[i*Np+j]);
}else{
d_b[i] = d_V1[i*Np+j] + (r/2)*(d_V1[i*Np+j-1] - 2*d_V1[i*Np+j] + d_V1[i*Np+j+1]);
}
}
}
__global__ void GPU_getV2(float *d_V2, float *d_Vt, int j){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<Np){
d_V2[i*Np+j] = d_Vt[i];
}
}
__global__ void GPU_adi_y(float *d_V2, float *d_b, int i){
int j = blockDim.x * blockIdx.x + threadIdx.x;
if(j<Np){
if(i==0){
d_b[j] = d_V2[i*Np+j] + (r/2)*(-2*d_V2[i*Np+j] + 2*d_V2[(i+1)*Np+j]);
}else if(i==Np-1){
d_b[j] = d_V2[i*Np+j] + (r/2)*(2*d_V2[(i-1)*Np+j] - 2*d_V2[i*Np+j]);
}else{
d_b[j] = d_V2[i*Np+j] + (r/2)*(d_V2[(i-1)*Np+j] - 2*d_V2[i*Np+j] + d_V2[(i+1)*Np+j]);
}
}
}
__global__ void GPU_getV1(float *d_V1, float *d_ut, int i){
int j = blockDim.x * blockIdx.x + threadIdx.x;
if(j<Np){
d_V1[i*Np+j] = d_ut[j];
}
}
void ADI1(int j){
int tpb = 256;
int bpg = (Np+tpb-1)/tpb;
GPU_adi_x<<<bpg, tpb>>>(d_V1, d_b, j);
}
void ADI1_2(int j){
int tpb = 256;
int bpg = (Np+tpb-1)/tpb;
GPU_getV2<<<bpg, tpb>>>(d_V2, d_Vt, j);
}
void ADI2(int i){
int tpb = 256;
int bpg = (Np+tpb-1)/tpb;
GPU_adi_y<<<bpg, tpb>>>(d_V2, d_b, i);
}
void ADI2_2(int i){
int tpb = 256;
int bpg = (Np+tpb-1)/tpb;
GPU_getV1<<<bpg, tpb>>>(d_V1, d_ut, i);
}
void Send_to_D(){
size_t size;
size = Np*sizeof(float);
cudaMemcpy(d_Vt, Vt, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_ut, ut, size, cudaMemcpyHostToDevice);
}
void Send_to_H(){
size_t size;
size = Np*sizeof(float);
cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost);
}
|
9be6c3571ee5aab346b9f534c4609ff4e41c8fdb.hip | // !!! This is a file automatically generated by hipify!!!
//
// Assignment 3 - The threads in a warp compute successive cross-correlation points
// Rafael S, 104552, [email protected]
// Lus Laranjeira, 81526, [email protected]
//
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "common.h"
#include <hip/hip_runtime.h>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// program configuration
//
static void compute_CC_cpu_kernel(int n, double *x, double *y, double *results);
__global__ static void computeCC_cuda_kernel(int n, double *x_h, double *h_y, double *results);
static double get_delta_time(void);
static void generate_samples(double *m, int N);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Main program
//
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// create memory areas in host and device memory where the disk sectors data and sector numbers will be stored
int n = 1 << 16;
double *h_x, *h_y, *result_cuda, *result_cpu;
int nBytes;
nBytes = n * sizeof(double); //Storage space in bytes
h_x = (double *)malloc(nBytes);
h_y = (double *)malloc(nBytes);
result_cuda = (double *)malloc(nBytes);
result_cpu = (double *)malloc(nBytes);
//generate samples for x and y
(void)get_delta_time();
generate_samples(h_x, n);
generate_samples(h_y, n);
printf("Samples for signals x and y generated on %.3e seconds\n", get_delta_time());
//reserve memory for gpu
double *d_x, *d_y, *d_results;
CHECK(hipMalloc((void **)&d_x, nBytes));
CHECK(hipMalloc((void **)&d_y, nBytes));
CHECK(hipMalloc((void **)&d_results, nBytes));
// copy the host data to the device memory
(void)get_delta_time();
//copy to gpu
CHECK(hipMemcpy(d_x, h_x, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_y, h_y, nBytes, hipMemcpyHostToDevice));
printf("The transfer of %d bytes from the host to the device took %.3e seconds\n",
2 * nBytes, get_delta_time());
// run the computational kernel
// as an example, n threads are launched where each thread deals with one point
unsigned int gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ;
blockDimX = 1 << 8; // optimize!
blockDimY = 1; // optimize!
blockDimZ = 1; // do not change!
gridDimX = 1 << 8; // optimize!
gridDimY = 1; // optimize!
gridDimZ = 1; // do not change!
dim3 grid(gridDimX, gridDimY, gridDimZ);
dim3 block(blockDimX, blockDimY, blockDimZ);
if ((gridDimX * gridDimY * gridDimZ * blockDimX * blockDimY * blockDimZ) != n)
{
printf("%d\n",(gridDimX * gridDimY * gridDimZ * blockDimX * blockDimY * blockDimZ));
printf("%d\n",n);
printf("Wrong configuration!\n");
return 1;
}
(void)get_delta_time();
hipLaunchKernelGGL(( computeCC_cuda_kernel), dim3(grid), dim3(block), 0, 0, n, d_x, d_y, d_results);
CHECK(hipDeviceSynchronize()); // wait for kernel to finish
CHECK(hipGetLastError()); // check for kernel errors
printf("The CUDA kernel <<<(%d,%d,%d), (%d,%d,%d)>>> took %.3e seconds to run\n",
gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, get_delta_time());
// copy kernel result back to host side
CHECK(hipMemcpy(result_cuda, d_results, nBytes, hipMemcpyDeviceToHost));
printf("The transfer of %d bytes from the device to the host took %.3e seconds\n",
nBytes, get_delta_time());
// free device global memory
CHECK(hipFree(d_x)); //gpu
CHECK(hipFree(d_y));
CHECK(hipFree(d_results)); //gpu
// reset device
CHECK(hipDeviceReset());
// compute the CC on the CPU
(void)get_delta_time();
compute_CC_cpu_kernel(n, h_x, h_y, result_cpu);
printf("The cpu kernel took %.3e seconds to run (single core)\n", get_delta_time());
// compare
size_t i;
for (i = 0; i < n; i++)
if((fabs(result_cuda[i] - result_cpu[i]) >= 1e-6) &&
((fabs(result_cuda[i]) < 1e-6 ) || (((result_cuda[i] - result_cpu[i]) / result_cuda[i]) >= 1e-6)))
{
printf("Mismatch in point %zu, expected %f.\n", i, result_cpu[i]);
exit(1);
}
printf("All is well!\n");
// free host memory
free(h_x); //cpu
free(h_y);
free(result_cuda);
free(result_cpu);
return 0;
}
static void compute_CC_cpu_kernel(int n, double *x, double *y, double *results)
{
unsigned int point, i;
double sum;
for (point = 0; point < n; point++)
{
sum = 0.0;
for (i = 0; i < n; i++)
{
sum += x[i] * y[(point + i) % n];
}
results[point] = sum;
}
}
__global__ static void computeCC_cuda_kernel(int n, double *x_h, double *h_y, double *results)
{
unsigned int x, y, idx, i;
double sum;
// compute the thread number
x = (unsigned int)threadIdx.x + (unsigned int)blockDim.x * (unsigned int)blockIdx.x;
y = (unsigned int)threadIdx.y + (unsigned int)blockDim.y * (unsigned int)blockIdx.y;
idx = (unsigned int)blockDim.x * (unsigned int)gridDim.x * y + x;
sum = 0.0;
for (i = 0; i < n; i++)
{
sum += x_h[i] * h_y[(idx + i) % n];
}
results[idx] = sum;
}
static double get_delta_time(void)
{
static struct timespec t0, t1;
t0 = t1;
if (clock_gettime(CLOCK_MONOTONIC, &t1) != 0)
{
perror("clock_gettime");
exit(1);
}
return (double)(t1.tv_sec - t0.tv_sec) + 1.0e-9 * (double)(t1.tv_nsec - t0.tv_nsec);
}
static void generate_samples(double *m, int N)
{
size_t i;
double lower = -0.5;
double upper = 0.5;
for (i = 0; i < N; i++)
{
m[i] = ((double)rand() * (upper - lower)) / (double)RAND_MAX + lower;
}
}
| 9be6c3571ee5aab346b9f534c4609ff4e41c8fdb.cu | //
// Assignment 3 - The threads in a warp compute successive cross-correlation points
// Rafael Sá, 104552, [email protected]
// Luís Laranjeira, 81526, [email protected]
//
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "common.h"
#include <cuda_runtime.h>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// program configuration
//
static void compute_CC_cpu_kernel(int n, double *x, double *y, double *results);
__global__ static void computeCC_cuda_kernel(int n, double *x_h, double *h_y, double *results);
static double get_delta_time(void);
static void generate_samples(double *m, int N);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Main program
//
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// create memory areas in host and device memory where the disk sectors data and sector numbers will be stored
int n = 1 << 16;
double *h_x, *h_y, *result_cuda, *result_cpu;
int nBytes;
nBytes = n * sizeof(double); //Storage space in bytes
h_x = (double *)malloc(nBytes);
h_y = (double *)malloc(nBytes);
result_cuda = (double *)malloc(nBytes);
result_cpu = (double *)malloc(nBytes);
//generate samples for x and y
(void)get_delta_time();
generate_samples(h_x, n);
generate_samples(h_y, n);
printf("Samples for signals x and y generated on %.3e seconds\n", get_delta_time());
//reserve memory for gpu
double *d_x, *d_y, *d_results;
CHECK(cudaMalloc((void **)&d_x, nBytes));
CHECK(cudaMalloc((void **)&d_y, nBytes));
CHECK(cudaMalloc((void **)&d_results, nBytes));
// copy the host data to the device memory
(void)get_delta_time();
//copy to gpu
CHECK(cudaMemcpy(d_x, h_x, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_y, h_y, nBytes, cudaMemcpyHostToDevice));
printf("The transfer of %d bytes from the host to the device took %.3e seconds\n",
2 * nBytes, get_delta_time());
// run the computational kernel
// as an example, n threads are launched where each thread deals with one point
unsigned int gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ;
blockDimX = 1 << 8; // optimize!
blockDimY = 1; // optimize!
blockDimZ = 1; // do not change!
gridDimX = 1 << 8; // optimize!
gridDimY = 1; // optimize!
gridDimZ = 1; // do not change!
dim3 grid(gridDimX, gridDimY, gridDimZ);
dim3 block(blockDimX, blockDimY, blockDimZ);
if ((gridDimX * gridDimY * gridDimZ * blockDimX * blockDimY * blockDimZ) != n)
{
printf("%d\n",(gridDimX * gridDimY * gridDimZ * blockDimX * blockDimY * blockDimZ));
printf("%d\n",n);
printf("Wrong configuration!\n");
return 1;
}
(void)get_delta_time();
computeCC_cuda_kernel<<<grid, block>>>(n, d_x, d_y, d_results);
CHECK(cudaDeviceSynchronize()); // wait for kernel to finish
CHECK(cudaGetLastError()); // check for kernel errors
printf("The CUDA kernel <<<(%d,%d,%d), (%d,%d,%d)>>> took %.3e seconds to run\n",
gridDimX, gridDimY, gridDimZ, blockDimX, blockDimY, blockDimZ, get_delta_time());
// copy kernel result back to host side
CHECK(cudaMemcpy(result_cuda, d_results, nBytes, cudaMemcpyDeviceToHost));
printf("The transfer of %d bytes from the device to the host took %.3e seconds\n",
nBytes, get_delta_time());
// free device global memory
CHECK(cudaFree(d_x)); //gpu
CHECK(cudaFree(d_y));
CHECK(cudaFree(d_results)); //gpu
// reset device
CHECK(cudaDeviceReset());
// compute the CC on the CPU
(void)get_delta_time();
compute_CC_cpu_kernel(n, h_x, h_y, result_cpu);
printf("The cpu kernel took %.3e seconds to run (single core)\n", get_delta_time());
// compare
size_t i;
for (i = 0; i < n; i++)
if((fabs(result_cuda[i] - result_cpu[i]) >= 1e-6) &&
((fabs(result_cuda[i]) < 1e-6 ) || (((result_cuda[i] - result_cpu[i]) / result_cuda[i]) >= 1e-6)))
{
printf("Mismatch in point %zu, expected %f.\n", i, result_cpu[i]);
exit(1);
}
printf("All is well!\n");
// free host memory
free(h_x); //cpu
free(h_y);
free(result_cuda);
free(result_cpu);
return 0;
}
static void compute_CC_cpu_kernel(int n, double *x, double *y, double *results)
{
unsigned int point, i;
double sum;
for (point = 0; point < n; point++)
{
sum = 0.0;
for (i = 0; i < n; i++)
{
sum += x[i] * y[(point + i) % n];
}
results[point] = sum;
}
}
__global__ static void computeCC_cuda_kernel(int n, double *x_h, double *h_y, double *results)
{
unsigned int x, y, idx, i;
double sum;
// compute the thread number
x = (unsigned int)threadIdx.x + (unsigned int)blockDim.x * (unsigned int)blockIdx.x;
y = (unsigned int)threadIdx.y + (unsigned int)blockDim.y * (unsigned int)blockIdx.y;
idx = (unsigned int)blockDim.x * (unsigned int)gridDim.x * y + x;
sum = 0.0;
for (i = 0; i < n; i++)
{
sum += x_h[i] * h_y[(idx + i) % n];
}
results[idx] = sum;
}
static double get_delta_time(void)
{
static struct timespec t0, t1;
t0 = t1;
if (clock_gettime(CLOCK_MONOTONIC, &t1) != 0)
{
perror("clock_gettime");
exit(1);
}
return (double)(t1.tv_sec - t0.tv_sec) + 1.0e-9 * (double)(t1.tv_nsec - t0.tv_nsec);
}
static void generate_samples(double *m, int N)
{
size_t i;
double lower = -0.5;
double upper = 0.5;
for (i = 0; i < N; i++)
{
m[i] = ((double)rand() * (upper - lower)) / (double)RAND_MAX + lower;
}
}
|
3f54e0d1767009a0742a663244be7956a56a8719.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <iostream>
#include "gpu-new-forward.h"
#define TILE_WIDTH 8
float *host_xpin, *host_ypin, *host_kpin;
__global__ void conv_forward_kernel(float *y, const float *x, const float *k, const int B, const int M, const int C, const int H, const int W, const int K)
{
/*
Modify this function to implement the forward pass described in Chapter 16.
We have added an additional dimension to the tensors to support an entire mini-batch
The goal here is to be correct AND fast.
Function paramter definitions:
y - output
x - input
k - kernel
B - batch_size (number of images in x)
M - number of output feature maps
C - number of input feature maps
H - input height dimension
W - input width dimension
K - kernel height and width (K x K)
*/
const int H_out = H - K + 1;
const int W_out = W - K + 1;
const int W_grid = ceil(1.0 * W_out / TILE_WIDTH);
// We have some nice #defs for you below to simplify indexing. Feel free to use them, or create your own.
// An example use of these macros:
// float a = y4d(0,0,0,0)
// y4d(0,0,0,0) = a
#define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
#define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
#define k4d(i3, i2, i1, i0) k[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0]
// Insert your GPU convolution kernel code here
int b = blockIdx.x;
int m = blockIdx.y;
int h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y;
int w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x;
if (h < H_out && w < W_out) {
float acc = 0.;
for (int c = 0; c < C; c++) {
for (int p = 0; p < K; p++) {
for (int q = 0; q < K; q++) {
acc += x4d(b, c, h+p, w+q) * k4d(m, c, p, q);
}
}
}
y4d(b, m, h, w) = acc;
}
#undef y4d
#undef x4d
#undef k4d
}
__host__ void GPUInterface::conv_forward_gpu_prolog(const float *host_y, const float *host_x, const float *host_k, float **device_y_ptr, float **device_x_ptr, float **device_k_ptr, const int B, const int M, const int C, const int H, const int W, const int K)
{
// Allocate memory and copy over the relevant data structures to the GPU
// We pass double pointers for you to initialize the relevant device pointers,
// which are passed to the other two functions.
const int H_out = H - K + 1;
const int W_out = W - K + 1;
hipMalloc((void **) device_x_ptr, B * C * W * H * sizeof(float));
hipMalloc((void **) device_k_ptr, M * C * K * K * sizeof(float));
hipMalloc((void **) device_y_ptr, B * M * W_out * H_out * sizeof(float));
hipHostMalloc((void **) &host_xpin, B * C * W * H * sizeof(float));
hipHostMalloc((void **) &host_kpin, M * C * K * K * sizeof(float));
hipHostMalloc((void **) &host_ypin, B * M * W_out * H_out * sizeof(float));
memcpy(host_xpin, host_x, B * C * W * H * sizeof(float));
memcpy(host_kpin, host_k, M * C * K * K * sizeof(float));
memset(host_ypin, 0, B * M * W_out * H_out * sizeof(float));
hipMemcpy(*device_x_ptr, host_xpin, B * C * W * H * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(*device_k_ptr, host_kpin, M * C * K * K * sizeof(float), hipMemcpyHostToDevice);
// Useful snippet for error checking
hipError_t error = hipGetLastError();
if(error != hipSuccess) {
std::cout<<"CUDA error: "<<hipGetErrorString(error)<<std::endl;
exit(-1);
}
}
__host__ void GPUInterface::conv_forward_gpu(float *device_y, const float *device_x, const float *device_k, const int B, const int M, const int C, const int H, const int W, const int K)
{
// Set the kernel dimensions and call the kernel
const int H_out = H - K + 1;
const int W_out = W - K + 1;
const int W_grid = ceil(1.0 * W_out / TILE_WIDTH);
const int H_grid = ceil(1.0 * H_out / TILE_WIDTH);
const int Z = W_grid * H_grid;
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
dim3 dimGrid(B, M, Z);
hipLaunchKernelGGL(( conv_forward_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, device_y, device_x, device_k, B, M, C, H, W, K);
}
__host__ void GPUInterface::conv_forward_gpu_epilog(float *host_y, float *device_y, float *device_x, float *device_k, const int B, const int M, const int C, const int H, const int W, const int K)
{
const int H_out = H - K + 1;
const int W_out = W - K + 1;
// Copy the output back to host
hipMemcpy(host_ypin, device_y, B * M * W_out * H_out * sizeof(float), hipMemcpyDeviceToHost);
memcpy(host_y, host_ypin, B * M * W_out * H_out * sizeof(float));
// Free device memory
hipFree(device_x);
hipFree(device_y);
hipFree(device_k);
hipHostFree(host_xpin);
hipHostFree(host_ypin);
hipHostFree(host_kpin);
}
__host__ void GPUInterface::get_device_properties()
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
for(int dev = 0; dev < deviceCount; dev++)
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl;
std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl;
std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl;
std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl;
std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl;
std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl;
std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl;
std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl;
std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl;
}
}
| 3f54e0d1767009a0742a663244be7956a56a8719.cu | #include <cmath>
#include <iostream>
#include "gpu-new-forward.h"
#define TILE_WIDTH 8
float *host_xpin, *host_ypin, *host_kpin;
__global__ void conv_forward_kernel(float *y, const float *x, const float *k, const int B, const int M, const int C, const int H, const int W, const int K)
{
/*
Modify this function to implement the forward pass described in Chapter 16.
We have added an additional dimension to the tensors to support an entire mini-batch
The goal here is to be correct AND fast.
Function paramter definitions:
y - output
x - input
k - kernel
B - batch_size (number of images in x)
M - number of output feature maps
C - number of input feature maps
H - input height dimension
W - input width dimension
K - kernel height and width (K x K)
*/
const int H_out = H - K + 1;
const int W_out = W - K + 1;
const int W_grid = ceil(1.0 * W_out / TILE_WIDTH);
// We have some nice #defs for you below to simplify indexing. Feel free to use them, or create your own.
// An example use of these macros:
// float a = y4d(0,0,0,0)
// y4d(0,0,0,0) = a
#define y4d(i3, i2, i1, i0) y[(i3) * (M * H_out * W_out) + (i2) * (H_out * W_out) + (i1) * (W_out) + i0]
#define x4d(i3, i2, i1, i0) x[(i3) * (C * H * W) + (i2) * (H * W) + (i1) * (W) + i0]
#define k4d(i3, i2, i1, i0) k[(i3) * (C * K * K) + (i2) * (K * K) + (i1) * (K) + i0]
// Insert your GPU convolution kernel code here
int b = blockIdx.x;
int m = blockIdx.y;
int h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y;
int w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x;
if (h < H_out && w < W_out) {
float acc = 0.;
for (int c = 0; c < C; c++) {
for (int p = 0; p < K; p++) {
for (int q = 0; q < K; q++) {
acc += x4d(b, c, h+p, w+q) * k4d(m, c, p, q);
}
}
}
y4d(b, m, h, w) = acc;
}
#undef y4d
#undef x4d
#undef k4d
}
__host__ void GPUInterface::conv_forward_gpu_prolog(const float *host_y, const float *host_x, const float *host_k, float **device_y_ptr, float **device_x_ptr, float **device_k_ptr, const int B, const int M, const int C, const int H, const int W, const int K)
{
// Allocate memory and copy over the relevant data structures to the GPU
// We pass double pointers for you to initialize the relevant device pointers,
// which are passed to the other two functions.
const int H_out = H - K + 1;
const int W_out = W - K + 1;
cudaMalloc((void **) device_x_ptr, B * C * W * H * sizeof(float));
cudaMalloc((void **) device_k_ptr, M * C * K * K * sizeof(float));
cudaMalloc((void **) device_y_ptr, B * M * W_out * H_out * sizeof(float));
cudaMallocHost((void **) &host_xpin, B * C * W * H * sizeof(float));
cudaMallocHost((void **) &host_kpin, M * C * K * K * sizeof(float));
cudaMallocHost((void **) &host_ypin, B * M * W_out * H_out * sizeof(float));
memcpy(host_xpin, host_x, B * C * W * H * sizeof(float));
memcpy(host_kpin, host_k, M * C * K * K * sizeof(float));
memset(host_ypin, 0, B * M * W_out * H_out * sizeof(float));
cudaMemcpy(*device_x_ptr, host_xpin, B * C * W * H * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(*device_k_ptr, host_kpin, M * C * K * K * sizeof(float), cudaMemcpyHostToDevice);
// Useful snippet for error checking
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess) {
std::cout<<"CUDA error: "<<cudaGetErrorString(error)<<std::endl;
exit(-1);
}
}
__host__ void GPUInterface::conv_forward_gpu(float *device_y, const float *device_x, const float *device_k, const int B, const int M, const int C, const int H, const int W, const int K)
{
// Set the kernel dimensions and call the kernel
const int H_out = H - K + 1;
const int W_out = W - K + 1;
const int W_grid = ceil(1.0 * W_out / TILE_WIDTH);
const int H_grid = ceil(1.0 * H_out / TILE_WIDTH);
const int Z = W_grid * H_grid;
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
dim3 dimGrid(B, M, Z);
conv_forward_kernel<<<dimGrid, dimBlock>>>(device_y, device_x, device_k, B, M, C, H, W, K);
}
__host__ void GPUInterface::conv_forward_gpu_epilog(float *host_y, float *device_y, float *device_x, float *device_k, const int B, const int M, const int C, const int H, const int W, const int K)
{
const int H_out = H - K + 1;
const int W_out = W - K + 1;
// Copy the output back to host
cudaMemcpy(host_ypin, device_y, B * M * W_out * H_out * sizeof(float), cudaMemcpyDeviceToHost);
memcpy(host_y, host_ypin, B * M * W_out * H_out * sizeof(float));
// Free device memory
cudaFree(device_x);
cudaFree(device_y);
cudaFree(device_k);
cudaFreeHost(host_xpin);
cudaFreeHost(host_ypin);
cudaFreeHost(host_kpin);
}
__host__ void GPUInterface::get_device_properties()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for(int dev = 0; dev < deviceCount; dev++)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl;
std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl;
std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl;
std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl;
std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl;
std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl;
std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl;
std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl;
std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl;
}
}
|
7422d85b65826215bf3553fb175b2f2ac8be46aa.hip | // !!! This is a file automatically generated by hipify!!!
/**********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2011 Andreas Muetzel ([email protected]). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
#include "kdtree_cuda_3d_index.h"
#include <flann/algorithms/dist.h>
#include <flann/util/cuda/result_set.h>
// #define THRUST_DEBUG 1
#include <hip/hip_runtime.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <hip/hip_vector_types.h>
#include <flann/util/cutil_math.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <flann/util/cuda/heap.h>
#include <thrust/scan.h>
#include <thrust/count.h>
#include <flann/algorithms/kdtree_cuda_builder.h>
#include <hip/hip_vector_types.h>
namespace flann
{
namespace KdTreeCudaPrivate
{
template< typename GPUResultSet, typename Distance >
__device__
void searchNeighbors(const cuda::kd_tree_builder_detail::SplitInfo* splits,
const int* child1,
const int* parent,
const float4* aabbLow,
const float4* aabbHigh, const float4* elements, const float4& q, GPUResultSet& result, const Distance& distance = Distance() )
{
bool backtrack=false;
int lastNode=-1;
int current=0;
cuda::kd_tree_builder_detail::SplitInfo split;
while(true) {
if( current==-1 ) break;
split = splits[current];
float diff1;
if( split.split_dim==0 ) diff1=q.x- split.split_val;
else if( split.split_dim==1 ) diff1=q.y- split.split_val;
else if( split.split_dim==2 ) diff1=q.z- split.split_val;
// children are next to each other: leftChild+1 == rightChild
int leftChild= child1[current];
int bestChild=leftChild;
int otherChild=leftChild;
if ((diff1)<0) {
otherChild++;
}
else {
bestChild++;
}
if( !backtrack ) {
/* If this is a leaf node, then do check and return. */
if (leftChild==-1) {
for (int i=split.left; i<split.right; ++i) {
float dist=distance.dist(elements[i],q);
result.insert(i,dist);
}
backtrack=true;
lastNode=current;
current=parent[current];
}
else { // go to closer child node
lastNode=current;
current=bestChild;
}
}
else { // continue moving back up the tree or visit far node?
// minimum possible distance between query point and a point inside the AABB
float mindistsq=0;
float4 aabbMin=aabbLow[otherChild];
float4 aabbMax=aabbHigh[otherChild];
if( q.x < aabbMin.x ) mindistsq+=distance.axisDist(q.x, aabbMin.x);
else if( q.x > aabbMax.x ) mindistsq+=distance.axisDist(q.x, aabbMax.x);
if( q.y < aabbMin.y ) mindistsq+=distance.axisDist(q.y, aabbMin.y);
else if( q.y > aabbMax.y ) mindistsq+=distance.axisDist(q.y, aabbMax.y);
if( q.z < aabbMin.z ) mindistsq+=distance.axisDist(q.z, aabbMin.z);
else if( q.z > aabbMax.z ) mindistsq+=distance.axisDist(q.z, aabbMax.z);
// the far node was NOT the last node (== not visited yet) AND there could be a closer point in it
if(( lastNode==bestChild) && (mindistsq <= result.worstDist() ) ) {
lastNode=current;
current=otherChild;
backtrack=false;
}
else {
lastNode=current;
current=parent[current];
}
}
}
}
template< typename GPUResultSet, typename Distance >
__global__
void nearestKernel(const cuda::kd_tree_builder_detail::SplitInfo* splits,
const int* child1,
const int* parent,
const float4* aabbMin,
const float4* aabbMax, const float4* elements, const float* query, int stride, int resultStride, int* resultIndex, float* resultDist, int querysize, GPUResultSet result, Distance dist = Distance())
{
typedef float DistanceType;
typedef float ElementType;
// typedef DistanceType float;
size_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if( tid >= querysize ) return;
float4 q = make_float4(query[tid*stride],query[tid*stride+1],query[tid*stride+2],0);
result.setResultLocation( resultDist, resultIndex, tid, resultStride );
searchNeighbors(splits,child1,parent,aabbMin,aabbMax,elements, q, result, dist);
result.finish();
}
}
//! contains some pointers that use cuda data types and that cannot be easily
//! forward-declared.
//! basically it contains all GPU buffers
template<typename Distance>
struct KDTreeCuda3dIndex<Distance>::GpuHelper
{
thrust::device_vector< cuda::kd_tree_builder_detail::SplitInfo >* gpu_splits_;
thrust::device_vector< int >* gpu_parent_;
thrust::device_vector< int >* gpu_child1_;
thrust::device_vector< float4 >* gpu_aabb_min_;
thrust::device_vector< float4 >* gpu_aabb_max_;
thrust::device_vector<float4>* gpu_points_;
thrust::device_vector<int>* gpu_vind_;
GpuHelper() : gpu_splits_(0), gpu_parent_(0), gpu_child1_(0), gpu_aabb_min_(0), gpu_aabb_max_(0), gpu_points_(0), gpu_vind_(0){
}
~GpuHelper()
{
delete gpu_splits_;
gpu_splits_=0;
delete gpu_parent_;
gpu_parent_=0;
delete gpu_child1_;
gpu_child1_=0;
delete gpu_aabb_max_;
gpu_aabb_max_=0;
delete gpu_aabb_min_;
gpu_aabb_min_=0;
delete gpu_vind_;
gpu_vind_=0;
delete gpu_points_;
gpu_points_=0;
}
};
//! thrust transform functor
//! transforms indices in the internal data set back to the original indices
struct map_indices
{
const int* v_;
map_indices(const int* v) : v_(v) {
}
__host__ __device__
float operator() (const int&i) const
{
if( i>= 0 ) return v_[i];
else return i;
}
};
//! implementation of L2 distance for the CUDA kernels
struct CudaL2
{
static float
__host__ __device__
axisDist( float a, float b )
{
return (a-b)*(a-b);
}
static float
__host__ __device__
dist( float4 a, float4 b )
{
float4 diff = a-b;
return dot(diff,diff);
}
};
//! implementation of L1 distance for the CUDA kernels
//! NOT TESTED!
struct CudaL1
{
static float
__host__ __device__
axisDist( float a, float b )
{
return fabs(a-b);
}
static float
__host__ __device__
dist( float4 a, float4 b )
{
return fabs(a.x-b.x)+fabs (a.y-b.y)+( a.z-b.z)+(a.w-b.w);
}
};
//! used to adapt CPU and GPU distance types.
//! specializations define the ::type as their corresponding GPU distance type
//! \see GpuDistance< L2<float> >, GpuDistance< L2_Simple<float> >
template< class Distance >
struct GpuDistance
{
};
template<>
struct GpuDistance< L2<float> >
{
typedef CudaL2 type;
};
template<>
struct GpuDistance< L2_Simple<float> >
{
typedef CudaL2 type;
};
template<>
struct GpuDistance< L1<float> >
{
typedef CudaL1 type;
};
template< typename Distance >
void KDTreeCuda3dIndex<Distance>::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params)
{
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(int(indices.cols) >= knn);
assert( dists.cols == indices.cols && dists.stride==indices.stride );
bool matrices_on_gpu = get_param(params, "matrices_in_gpu_ram", false);
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
float epsError = 1+get_param(params,"eps",0.0f);
bool sorted = get_param(params,"sorted",true);
bool use_heap = get_param(params,"use_heap",false);
typename GpuDistance<Distance>::type distance;
// std::cout<<" search: "<<std::endl;
// std::cout<<" rows: "<<indices.rows<<" "<<dists.rows<<" "<<queries.rows<<std::endl;
// std::cout<<" cols: "<<indices.cols<<" "<<dists.cols<<" "<<queries.cols<<std::endl;
// std::cout<<" stride: "<<indices.stride<<" "<<dists.stride<<" "<<queries.stride<<std::endl;
// std::cout<<" knn:"<<knn<<" matrices_on_gpu:"<<matrices_on_gpu<<std::endl;
if( !matrices_on_gpu ) {
thrust::device_vector<float> queriesDev(queries.stride* queries.rows,0);
thrust::copy( queries.data, queries.data+queries.stride*queries.rows, queriesDev.begin() );
thrust::device_vector<float> distsDev(queries.rows* dists.stride);
thrust::device_vector<int> indicesDev(queries.rows* dists.stride);
if( knn==1 ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
dists.stride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::SingleResultSet<float>(epsError),distance);
// hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_nodes_)[0])),
// thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
// thrust::raw_pointer_cast(&queriesDev[0]),
// queries.stride,
// thrust::raw_pointer_cast(&indicesDev[0]),
// thrust::raw_pointer_cast(&distsDev[0]),
// queries.rows, epsError);
//
}
else {
if( use_heap ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
dists.stride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnResultSet<float, true>(knn,sorted,epsError)
, distance);
}
else {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
dists.stride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnResultSet<float, false>(knn,sorted,epsError),
distance
);
}
}
thrust::copy( distsDev.begin(), distsDev.end(), dists.data );
thrust::transform(indicesDev.begin(), indicesDev.end(), indicesDev.begin(), map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.data );
}
else {
thrust::device_ptr<float> qd = thrust::device_pointer_cast(queries.data);
thrust::device_ptr<float> dd = thrust::device_pointer_cast(dists.data);
thrust::device_ptr<int> id = thrust::device_pointer_cast(indices.data);
if( knn==1 ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
queries.stride,
dists.stride,
id.get(),
dd.get(),
queries.rows, flann::cuda::SingleResultSet<float>(epsError),distance);
// hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_nodes_)[0])),
// thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
// thrust::raw_pointer_cast(&queriesDev[0]),
// queries.stride,
// thrust::raw_pointer_cast(&indicesDev[0]),
// thrust::raw_pointer_cast(&distsDev[0]),
// queries.rows, epsError);
//
}
else {
if( use_heap ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
queries.stride,
dists.stride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnResultSet<float, true>(knn,sorted,epsError)
, distance);
}
else {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
queries.stride,
dists.stride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnResultSet<float, false>(knn,sorted,epsError),
distance
);
}
}
thrust::transform(id, id+knn*queries.rows, id, map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
}
}
template< typename Distance>
int KDTreeCuda3dIndex<Distance >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params)
{
// assert(indices.roasdfws >= queries.rows);
// assert(dists.rows >= queries.rows);
int max_neighbors = get_param(params, "max_neighbors", -1);
bool sorted = get_param(params, "sorted", true);
bool use_heap = get_param(params, "use_heap", false);
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
thrust::device_vector<float> queriesDev(queries.stride* queries.rows,0);
thrust::copy( queries.data, queries.data+queries.stride*queries.rows, queriesDev.begin() );
thrust::device_vector<int> countsDev(queries.rows);
typename GpuDistance<Distance>::type distance;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
1,
thrust::raw_pointer_cast(&countsDev[0]),
0,
queries.rows, flann::cuda::CountingRadiusResultSet<float>(radius,max_neighbors),
distance
);
thrust::host_vector<int> counts_host=countsDev;
if( max_neighbors!=0 ) { // we'll need this later, but the exclusive_scan will change the array
for( size_t i=0; i<queries.rows; i++ ) {
int count = counts_host[i];
if( count > 0 ) {
indices[i].resize(count);
dists[i].resize(count);
}
else {
indices[i].clear();
dists[i].clear();
}
}
}
int neighbors_last_elem = countsDev.back();
thrust::exclusive_scan( countsDev.begin(), countsDev.end(), countsDev.begin() );
size_t total_neighbors=neighbors_last_elem+countsDev.back();
if( max_neighbors==0 ) return total_neighbors;
thrust::device_vector<int> indicesDev(total_neighbors,-1);
thrust::device_vector<float> distsDev(total_neighbors,std::numeric_limits<float>::infinity());
if( max_neighbors<0 ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::RadiusResultSet<float>(radius,thrust::raw_pointer_cast(&countsDev[0]),get_param(params, "sorted", true)), distance);
}
else {
if( use_heap ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::RadiusKnnResultSet<float, true>(radius,max_neighbors, thrust::raw_pointer_cast(&countsDev[0]),get_param(params, "sorted", true)), distance);
}
else {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::RadiusKnnResultSet<float, false>(radius,max_neighbors, thrust::raw_pointer_cast(&countsDev[0]),get_param(params, "sorted", true)), distance);
}
}
thrust::transform(indicesDev.begin(), indicesDev.end(), indicesDev.begin(), map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
thrust::host_vector<int> indices_temp = indicesDev;
thrust::host_vector<float> dists_temp = distsDev;
int buffer_index=0;
for( size_t i=0; i<queries.rows; i++ ) {
for( size_t j=0; j<counts_host[i]; j++ ) {
dists[i][j]=dists_temp[buffer_index];
indices[i][j]=indices_temp[buffer_index];
++buffer_index;
}
}
return buffer_index;
}
//! used in the radius search to count the total number of neighbors
struct isNotMinusOne
{
__host__ __device__
bool operator() ( int i ){
return i!=-1;
}
};
template< typename Distance>
int KDTreeCuda3dIndex< Distance >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params)
{
int max_neighbors = get_param(params, "max_neighbors", -1);
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows || max_neighbors==0 );
assert(indices.stride==dists.stride || max_neighbors==0 );
assert( indices.cols==indices.stride );
assert(dists.rows >= queries.rows || max_neighbors==0 );
bool sorted = get_param(params, "sorted", true);
bool matrices_on_gpu = get_param(params, "matrices_in_gpu_ram", false);
float epsError = 1+get_param(params,"eps",0.0f);
bool use_heap = get_param(params,"use_heap",false);
if( max_neighbors<0 ) max_neighbors=indices.cols;
if( !matrices_on_gpu ) {
thrust::device_vector<float> queriesDev(queries.stride* queries.rows,0);
thrust::copy( queries.data, queries.data+queries.stride*queries.rows, queriesDev.begin() );
typename GpuDistance<Distance>::type distance;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
if( max_neighbors== 0 ) {
thrust::device_vector<int> indicesDev(queries.rows* indices.stride);
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
indices.stride,
thrust::raw_pointer_cast(&indicesDev[0]),
0,
queries.rows, flann::cuda::CountingRadiusResultSet<float>(radius,-1),
distance
);
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.data );
return thrust::reduce(indicesDev.begin(), indicesDev.end() );
}
thrust::device_vector<float> distsDev(queries.rows* max_neighbors);
thrust::device_vector<int> indicesDev(queries.rows* max_neighbors);
// bool sorted = get_param(params,"sorted",true);
if( use_heap ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
dists.stride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnRadiusResultSet<float, true>(max_neighbors,sorted,epsError, radius), distance);
}
else {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
dists.stride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnRadiusResultSet<float, false>(max_neighbors,sorted,epsError, radius), distance);
}
thrust::copy( distsDev.begin(), distsDev.end(), dists.data );
thrust::transform(indicesDev.begin(), indicesDev.end(), indicesDev.begin(), map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.data );
return thrust::count_if(indicesDev.begin(), indicesDev.end(), isNotMinusOne() );
}
else {
thrust::device_ptr<float> qd=thrust::device_pointer_cast(queries.data);
thrust::device_ptr<float> dd=thrust::device_pointer_cast(dists.data);
thrust::device_ptr<int> id=thrust::device_pointer_cast(indices.data);
typename GpuDistance<Distance>::type distance;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
if( max_neighbors== 0 ) {
thrust::device_vector<int> indicesDev(queries.rows* indices.stride);
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
queries.stride,
indices.stride,
id.get(),
0,
queries.rows, flann::cuda::CountingRadiusResultSet<float>(radius,-1),
distance
);
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.data );
return thrust::reduce(indicesDev.begin(), indicesDev.end() );
}
// bool sorted = get_param(params,"sorted",true);
if( use_heap ) {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
queries.stride,
dists.stride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnRadiusResultSet<float, true>(max_neighbors,sorted,epsError, radius), distance);
}
else {
hipLaunchKernelGGL(( KdTreeCudaPrivate::nearestKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
queries.stride,
dists.stride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnRadiusResultSet<float, false>(max_neighbors,sorted,epsError, radius), distance);
}
thrust::transform(id, id+max_neighbors*queries.rows, id, map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
return thrust::count_if(id, id+max_neighbors*queries.rows, isNotMinusOne() );
}
}
template<typename Distance>
void KDTreeCuda3dIndex<Distance>::uploadTreeToGpu()
{
// just make sure that no weird alignment stuff is going on...
// shouldn't, but who knows
// (I would make this a (boost) static assertion, but so far flann seems to avoid boost
// assert( sizeof( KdTreeCudaPrivate::GpuNode)==sizeof( Node ) );
delete gpu_helper_;
gpu_helper_ = new GpuHelper;
gpu_helper_->gpu_points_=new thrust::device_vector<float4>(size_);
thrust::device_vector<float4> tmp(size_);
if( get_param(index_params_,"input_is_gpu_float4",false) ) {
assert( dataset_.cols == 3 && dataset_.stride==4);
thrust::copy( thrust::device_pointer_cast((float4*)dataset_.data),thrust::device_pointer_cast((float4*)(dataset_.data))+size_,tmp.begin());
}
else {
// k is limited to 4 -> use 128bit-alignment regardless of dimensionality
// makes cpu search about 5% slower, but gpu can read a float4 w/ a single instruction
// (vs a float2 and a float load for a float3 value)
// pad data directly to avoid having to copy and re-format the data when
// copying it to the GPU
data_ = flann::Matrix<ElementType>(new ElementType[size_*4], size_, dim_,4);
for (size_t i=0; i<size_; ++i) {
for (size_t j=0; j<dim_; ++j) {
data_[i][j] = dataset_[i][j];
}
for (size_t j=dim_; j<4; ++j) {
data_[i][j] = 0;
}
}
thrust::copy((float4*)data_.data,(float4*)(data_.data)+size_,tmp.begin());
}
CudaKdTreeBuilder builder( tmp, leaf_max_size_ );
builder.buildTree();
gpu_helper_->gpu_splits_ = builder.splits_;
gpu_helper_->gpu_aabb_min_ = builder.aabb_min_;
gpu_helper_->gpu_aabb_max_ = builder.aabb_max_;
gpu_helper_->gpu_child1_ = builder.child1_;
gpu_helper_->gpu_parent_=builder.parent_;
gpu_helper_->gpu_vind_=builder.index_x_;
thrust::gather( builder.index_x_->begin(), builder.index_x_->end(), tmp.begin(), gpu_helper_->gpu_points_->begin());
// gpu_helper_->gpu_nodes_=new thrust::device_vector<KdTreeCudaPrivate::GpuNode>(node_count_);
// gpu_helper_->gpu_vind_=new thrust::device_vector<int>(size_);
// thrust::copy( (KdTreeCudaPrivate::GpuNode*)&(tree_[0]), ((KdTreeCudaPrivate::GpuNode*)&(tree_[0]))+tree_.size(), gpu_helper_->gpu_nodes_->begin());
// thrust::copy(vind_.begin(),vind_.end(),gpu_helper_->gpu_vind_->begin());
// buildGpuTree();
}
template<typename Distance>
void KDTreeCuda3dIndex<Distance>::clearGpuBuffers()
{
delete gpu_helper_;
gpu_helper_=0;
}
// explicit instantiations for distance-independent functions
template
void KDTreeCuda3dIndex<flann::L2<float> >::uploadTreeToGpu();
template
void KDTreeCuda3dIndex<flann::L2<float> >::clearGpuBuffers();
template
struct KDTreeCuda3dIndex<flann::L2<float> >::GpuHelper;
template
void KDTreeCuda3dIndex<flann::L2<float> >::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params);
template
int KDTreeCuda3dIndex< flann::L2<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params);
template
int KDTreeCuda3dIndex< flann::L2<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params);
// explicit instantiations for distance-independent functions
template
void KDTreeCuda3dIndex<flann::L2_Simple<float> >::uploadTreeToGpu();
template
void KDTreeCuda3dIndex<flann::L2_Simple<float> >::clearGpuBuffers();
template
struct KDTreeCuda3dIndex<flann::L2_Simple<float> >::GpuHelper;
template
void KDTreeCuda3dIndex<flann::L2_Simple<float> >::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params);
template
int KDTreeCuda3dIndex< flann::L2_Simple<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params);
template
int KDTreeCuda3dIndex< flann::L2_Simple<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params);
// explicit instantiations for distance-independent functions
template
void KDTreeCuda3dIndex<flann::L1<float> >::uploadTreeToGpu();
template
void KDTreeCuda3dIndex<flann::L1<float> >::clearGpuBuffers();
template
struct KDTreeCuda3dIndex<flann::L1<float> >::GpuHelper;
template
void KDTreeCuda3dIndex<flann::L1<float> >::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params);
template
int KDTreeCuda3dIndex< flann::L1<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params);
template
int KDTreeCuda3dIndex< flann::L1<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params);
}
| 7422d85b65826215bf3553fb175b2f2ac8be46aa.cu | /**********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2011 Andreas Muetzel ([email protected]). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
#include "kdtree_cuda_3d_index.h"
#include <flann/algorithms/dist.h>
#include <flann/util/cuda/result_set.h>
// #define THRUST_DEBUG 1
#include <cuda.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <vector_types.h>
#include <flann/util/cutil_math.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#include <flann/util/cuda/heap.h>
#include <thrust/scan.h>
#include <thrust/count.h>
#include <flann/algorithms/kdtree_cuda_builder.h>
#include <vector_types.h>
namespace flann
{
namespace KdTreeCudaPrivate
{
template< typename GPUResultSet, typename Distance >
__device__
void searchNeighbors(const cuda::kd_tree_builder_detail::SplitInfo* splits,
const int* child1,
const int* parent,
const float4* aabbLow,
const float4* aabbHigh, const float4* elements, const float4& q, GPUResultSet& result, const Distance& distance = Distance() )
{
bool backtrack=false;
int lastNode=-1;
int current=0;
cuda::kd_tree_builder_detail::SplitInfo split;
while(true) {
if( current==-1 ) break;
split = splits[current];
float diff1;
if( split.split_dim==0 ) diff1=q.x- split.split_val;
else if( split.split_dim==1 ) diff1=q.y- split.split_val;
else if( split.split_dim==2 ) diff1=q.z- split.split_val;
// children are next to each other: leftChild+1 == rightChild
int leftChild= child1[current];
int bestChild=leftChild;
int otherChild=leftChild;
if ((diff1)<0) {
otherChild++;
}
else {
bestChild++;
}
if( !backtrack ) {
/* If this is a leaf node, then do check and return. */
if (leftChild==-1) {
for (int i=split.left; i<split.right; ++i) {
float dist=distance.dist(elements[i],q);
result.insert(i,dist);
}
backtrack=true;
lastNode=current;
current=parent[current];
}
else { // go to closer child node
lastNode=current;
current=bestChild;
}
}
else { // continue moving back up the tree or visit far node?
// minimum possible distance between query point and a point inside the AABB
float mindistsq=0;
float4 aabbMin=aabbLow[otherChild];
float4 aabbMax=aabbHigh[otherChild];
if( q.x < aabbMin.x ) mindistsq+=distance.axisDist(q.x, aabbMin.x);
else if( q.x > aabbMax.x ) mindistsq+=distance.axisDist(q.x, aabbMax.x);
if( q.y < aabbMin.y ) mindistsq+=distance.axisDist(q.y, aabbMin.y);
else if( q.y > aabbMax.y ) mindistsq+=distance.axisDist(q.y, aabbMax.y);
if( q.z < aabbMin.z ) mindistsq+=distance.axisDist(q.z, aabbMin.z);
else if( q.z > aabbMax.z ) mindistsq+=distance.axisDist(q.z, aabbMax.z);
// the far node was NOT the last node (== not visited yet) AND there could be a closer point in it
if(( lastNode==bestChild) && (mindistsq <= result.worstDist() ) ) {
lastNode=current;
current=otherChild;
backtrack=false;
}
else {
lastNode=current;
current=parent[current];
}
}
}
}
template< typename GPUResultSet, typename Distance >
__global__
void nearestKernel(const cuda::kd_tree_builder_detail::SplitInfo* splits,
const int* child1,
const int* parent,
const float4* aabbMin,
const float4* aabbMax, const float4* elements, const float* query, int stride, int resultStride, int* resultIndex, float* resultDist, int querysize, GPUResultSet result, Distance dist = Distance())
{
typedef float DistanceType;
typedef float ElementType;
// typedef DistanceType float;
size_t tid = blockDim.x*blockIdx.x + threadIdx.x;
if( tid >= querysize ) return;
float4 q = make_float4(query[tid*stride],query[tid*stride+1],query[tid*stride+2],0);
result.setResultLocation( resultDist, resultIndex, tid, resultStride );
searchNeighbors(splits,child1,parent,aabbMin,aabbMax,elements, q, result, dist);
result.finish();
}
}
//! contains some pointers that use cuda data types and that cannot be easily
//! forward-declared.
//! basically it contains all GPU buffers
template<typename Distance>
struct KDTreeCuda3dIndex<Distance>::GpuHelper
{
thrust::device_vector< cuda::kd_tree_builder_detail::SplitInfo >* gpu_splits_;
thrust::device_vector< int >* gpu_parent_;
thrust::device_vector< int >* gpu_child1_;
thrust::device_vector< float4 >* gpu_aabb_min_;
thrust::device_vector< float4 >* gpu_aabb_max_;
thrust::device_vector<float4>* gpu_points_;
thrust::device_vector<int>* gpu_vind_;
GpuHelper() : gpu_splits_(0), gpu_parent_(0), gpu_child1_(0), gpu_aabb_min_(0), gpu_aabb_max_(0), gpu_points_(0), gpu_vind_(0){
}
~GpuHelper()
{
delete gpu_splits_;
gpu_splits_=0;
delete gpu_parent_;
gpu_parent_=0;
delete gpu_child1_;
gpu_child1_=0;
delete gpu_aabb_max_;
gpu_aabb_max_=0;
delete gpu_aabb_min_;
gpu_aabb_min_=0;
delete gpu_vind_;
gpu_vind_=0;
delete gpu_points_;
gpu_points_=0;
}
};
//! thrust transform functor
//! transforms indices in the internal data set back to the original indices
struct map_indices
{
const int* v_;
map_indices(const int* v) : v_(v) {
}
__host__ __device__
float operator() (const int&i) const
{
if( i>= 0 ) return v_[i];
else return i;
}
};
//! implementation of L2 distance for the CUDA kernels
struct CudaL2
{
static float
__host__ __device__
axisDist( float a, float b )
{
return (a-b)*(a-b);
}
static float
__host__ __device__
dist( float4 a, float4 b )
{
float4 diff = a-b;
return dot(diff,diff);
}
};
//! implementation of L1 distance for the CUDA kernels
//! NOT TESTED!
struct CudaL1
{
static float
__host__ __device__
axisDist( float a, float b )
{
return fabs(a-b);
}
static float
__host__ __device__
dist( float4 a, float4 b )
{
return fabs(a.x-b.x)+fabs (a.y-b.y)+( a.z-b.z)+(a.w-b.w);
}
};
//! used to adapt CPU and GPU distance types.
//! specializations define the ::type as their corresponding GPU distance type
//! \see GpuDistance< L2<float> >, GpuDistance< L2_Simple<float> >
template< class Distance >
struct GpuDistance
{
};
template<>
struct GpuDistance< L2<float> >
{
typedef CudaL2 type;
};
template<>
struct GpuDistance< L2_Simple<float> >
{
typedef CudaL2 type;
};
template<>
struct GpuDistance< L1<float> >
{
typedef CudaL1 type;
};
template< typename Distance >
void KDTreeCuda3dIndex<Distance>::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params)
{
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(int(indices.cols) >= knn);
assert( dists.cols == indices.cols && dists.stride==indices.stride );
bool matrices_on_gpu = get_param(params, "matrices_in_gpu_ram", false);
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
float epsError = 1+get_param(params,"eps",0.0f);
bool sorted = get_param(params,"sorted",true);
bool use_heap = get_param(params,"use_heap",false);
typename GpuDistance<Distance>::type distance;
// std::cout<<" search: "<<std::endl;
// std::cout<<" rows: "<<indices.rows<<" "<<dists.rows<<" "<<queries.rows<<std::endl;
// std::cout<<" cols: "<<indices.cols<<" "<<dists.cols<<" "<<queries.cols<<std::endl;
// std::cout<<" stride: "<<indices.stride<<" "<<dists.stride<<" "<<queries.stride<<std::endl;
// std::cout<<" knn:"<<knn<<" matrices_on_gpu:"<<matrices_on_gpu<<std::endl;
if( !matrices_on_gpu ) {
thrust::device_vector<float> queriesDev(queries.stride* queries.rows,0);
thrust::copy( queries.data, queries.data+queries.stride*queries.rows, queriesDev.begin() );
thrust::device_vector<float> distsDev(queries.rows* dists.stride);
thrust::device_vector<int> indicesDev(queries.rows* dists.stride);
if( knn==1 ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
dists.stride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::SingleResultSet<float>(epsError),distance);
// KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_nodes_)[0])),
// thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
// thrust::raw_pointer_cast(&queriesDev[0]),
// queries.stride,
// thrust::raw_pointer_cast(&indicesDev[0]),
// thrust::raw_pointer_cast(&distsDev[0]),
// queries.rows, epsError);
//
}
else {
if( use_heap ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
dists.stride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnResultSet<float, true>(knn,sorted,epsError)
, distance);
}
else {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
dists.stride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnResultSet<float, false>(knn,sorted,epsError),
distance
);
}
}
thrust::copy( distsDev.begin(), distsDev.end(), dists.data );
thrust::transform(indicesDev.begin(), indicesDev.end(), indicesDev.begin(), map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.data );
}
else {
thrust::device_ptr<float> qd = thrust::device_pointer_cast(queries.data);
thrust::device_ptr<float> dd = thrust::device_pointer_cast(dists.data);
thrust::device_ptr<int> id = thrust::device_pointer_cast(indices.data);
if( knn==1 ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
queries.stride,
dists.stride,
id.get(),
dd.get(),
queries.rows, flann::cuda::SingleResultSet<float>(epsError),distance);
// KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_nodes_)[0])),
// thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
// thrust::raw_pointer_cast(&queriesDev[0]),
// queries.stride,
// thrust::raw_pointer_cast(&indicesDev[0]),
// thrust::raw_pointer_cast(&distsDev[0]),
// queries.rows, epsError);
//
}
else {
if( use_heap ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
queries.stride,
dists.stride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnResultSet<float, true>(knn,sorted,epsError)
, distance);
}
else {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
queries.stride,
dists.stride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnResultSet<float, false>(knn,sorted,epsError),
distance
);
}
}
thrust::transform(id, id+knn*queries.rows, id, map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
}
}
template< typename Distance>
int KDTreeCuda3dIndex<Distance >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params)
{
// assert(indices.roasdfws >= queries.rows);
// assert(dists.rows >= queries.rows);
int max_neighbors = get_param(params, "max_neighbors", -1);
bool sorted = get_param(params, "sorted", true);
bool use_heap = get_param(params, "use_heap", false);
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
thrust::device_vector<float> queriesDev(queries.stride* queries.rows,0);
thrust::copy( queries.data, queries.data+queries.stride*queries.rows, queriesDev.begin() );
thrust::device_vector<int> countsDev(queries.rows);
typename GpuDistance<Distance>::type distance;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
1,
thrust::raw_pointer_cast(&countsDev[0]),
0,
queries.rows, flann::cuda::CountingRadiusResultSet<float>(radius,max_neighbors),
distance
);
thrust::host_vector<int> counts_host=countsDev;
if( max_neighbors!=0 ) { // we'll need this later, but the exclusive_scan will change the array
for( size_t i=0; i<queries.rows; i++ ) {
int count = counts_host[i];
if( count > 0 ) {
indices[i].resize(count);
dists[i].resize(count);
}
else {
indices[i].clear();
dists[i].clear();
}
}
}
int neighbors_last_elem = countsDev.back();
thrust::exclusive_scan( countsDev.begin(), countsDev.end(), countsDev.begin() );
size_t total_neighbors=neighbors_last_elem+countsDev.back();
if( max_neighbors==0 ) return total_neighbors;
thrust::device_vector<int> indicesDev(total_neighbors,-1);
thrust::device_vector<float> distsDev(total_neighbors,std::numeric_limits<float>::infinity());
if( max_neighbors<0 ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::RadiusResultSet<float>(radius,thrust::raw_pointer_cast(&countsDev[0]),get_param(params, "sorted", true)), distance);
}
else {
if( use_heap ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::RadiusKnnResultSet<float, true>(radius,max_neighbors, thrust::raw_pointer_cast(&countsDev[0]),get_param(params, "sorted", true)), distance);
}
else {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
1,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::RadiusKnnResultSet<float, false>(radius,max_neighbors, thrust::raw_pointer_cast(&countsDev[0]),get_param(params, "sorted", true)), distance);
}
}
thrust::transform(indicesDev.begin(), indicesDev.end(), indicesDev.begin(), map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
thrust::host_vector<int> indices_temp = indicesDev;
thrust::host_vector<float> dists_temp = distsDev;
int buffer_index=0;
for( size_t i=0; i<queries.rows; i++ ) {
for( size_t j=0; j<counts_host[i]; j++ ) {
dists[i][j]=dists_temp[buffer_index];
indices[i][j]=indices_temp[buffer_index];
++buffer_index;
}
}
return buffer_index;
}
//! used in the radius search to count the total number of neighbors
struct isNotMinusOne
{
__host__ __device__
bool operator() ( int i ){
return i!=-1;
}
};
template< typename Distance>
int KDTreeCuda3dIndex< Distance >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params)
{
int max_neighbors = get_param(params, "max_neighbors", -1);
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows || max_neighbors==0 );
assert(indices.stride==dists.stride || max_neighbors==0 );
assert( indices.cols==indices.stride );
assert(dists.rows >= queries.rows || max_neighbors==0 );
bool sorted = get_param(params, "sorted", true);
bool matrices_on_gpu = get_param(params, "matrices_in_gpu_ram", false);
float epsError = 1+get_param(params,"eps",0.0f);
bool use_heap = get_param(params,"use_heap",false);
if( max_neighbors<0 ) max_neighbors=indices.cols;
if( !matrices_on_gpu ) {
thrust::device_vector<float> queriesDev(queries.stride* queries.rows,0);
thrust::copy( queries.data, queries.data+queries.stride*queries.rows, queriesDev.begin() );
typename GpuDistance<Distance>::type distance;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
if( max_neighbors== 0 ) {
thrust::device_vector<int> indicesDev(queries.rows* indices.stride);
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
indices.stride,
thrust::raw_pointer_cast(&indicesDev[0]),
0,
queries.rows, flann::cuda::CountingRadiusResultSet<float>(radius,-1),
distance
);
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.data );
return thrust::reduce(indicesDev.begin(), indicesDev.end() );
}
thrust::device_vector<float> distsDev(queries.rows* max_neighbors);
thrust::device_vector<int> indicesDev(queries.rows* max_neighbors);
// bool sorted = get_param(params,"sorted",true);
if( use_heap ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
dists.stride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnRadiusResultSet<float, true>(max_neighbors,sorted,epsError, radius), distance);
}
else {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
thrust::raw_pointer_cast(&queriesDev[0]),
queries.stride,
dists.stride,
thrust::raw_pointer_cast(&indicesDev[0]),
thrust::raw_pointer_cast(&distsDev[0]),
queries.rows, flann::cuda::KnnRadiusResultSet<float, false>(max_neighbors,sorted,epsError, radius), distance);
}
thrust::copy( distsDev.begin(), distsDev.end(), dists.data );
thrust::transform(indicesDev.begin(), indicesDev.end(), indicesDev.begin(), map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.data );
return thrust::count_if(indicesDev.begin(), indicesDev.end(), isNotMinusOne() );
}
else {
thrust::device_ptr<float> qd=thrust::device_pointer_cast(queries.data);
thrust::device_ptr<float> dd=thrust::device_pointer_cast(dists.data);
thrust::device_ptr<int> id=thrust::device_pointer_cast(indices.data);
typename GpuDistance<Distance>::type distance;
int threadsPerBlock = 128;
int blocksPerGrid=(queries.rows+threadsPerBlock-1)/threadsPerBlock;
if( max_neighbors== 0 ) {
thrust::device_vector<int> indicesDev(queries.rows* indices.stride);
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
queries.stride,
indices.stride,
id.get(),
0,
queries.rows, flann::cuda::CountingRadiusResultSet<float>(radius,-1),
distance
);
thrust::copy( indicesDev.begin(), indicesDev.end(), indices.data );
return thrust::reduce(indicesDev.begin(), indicesDev.end() );
}
// bool sorted = get_param(params,"sorted",true);
if( use_heap ) {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
queries.stride,
dists.stride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnRadiusResultSet<float, true>(max_neighbors,sorted,epsError, radius), distance);
}
else {
KdTreeCudaPrivate::nearestKernel<<<blocksPerGrid, threadsPerBlock>>> (thrust::raw_pointer_cast(&((*gpu_helper_->gpu_splits_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_child1_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_parent_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_min_)[0])),
thrust::raw_pointer_cast(&((*gpu_helper_->gpu_aabb_max_)[0])),
thrust::raw_pointer_cast( &((*gpu_helper_->gpu_points_)[0]) ),
qd.get(),
queries.stride,
dists.stride,
id.get(),
dd.get(),
queries.rows, flann::cuda::KnnRadiusResultSet<float, false>(max_neighbors,sorted,epsError, radius), distance);
}
thrust::transform(id, id+max_neighbors*queries.rows, id, map_indices(thrust::raw_pointer_cast( &((*gpu_helper_->gpu_vind_))[0]) ));
return thrust::count_if(id, id+max_neighbors*queries.rows, isNotMinusOne() );
}
}
template<typename Distance>
void KDTreeCuda3dIndex<Distance>::uploadTreeToGpu()
{
// just make sure that no weird alignment stuff is going on...
// shouldn't, but who knows
// (I would make this a (boost) static assertion, but so far flann seems to avoid boost
// assert( sizeof( KdTreeCudaPrivate::GpuNode)==sizeof( Node ) );
delete gpu_helper_;
gpu_helper_ = new GpuHelper;
gpu_helper_->gpu_points_=new thrust::device_vector<float4>(size_);
thrust::device_vector<float4> tmp(size_);
if( get_param(index_params_,"input_is_gpu_float4",false) ) {
assert( dataset_.cols == 3 && dataset_.stride==4);
thrust::copy( thrust::device_pointer_cast((float4*)dataset_.data),thrust::device_pointer_cast((float4*)(dataset_.data))+size_,tmp.begin());
}
else {
// k is limited to 4 -> use 128bit-alignment regardless of dimensionality
// makes cpu search about 5% slower, but gpu can read a float4 w/ a single instruction
// (vs a float2 and a float load for a float3 value)
// pad data directly to avoid having to copy and re-format the data when
// copying it to the GPU
data_ = flann::Matrix<ElementType>(new ElementType[size_*4], size_, dim_,4);
for (size_t i=0; i<size_; ++i) {
for (size_t j=0; j<dim_; ++j) {
data_[i][j] = dataset_[i][j];
}
for (size_t j=dim_; j<4; ++j) {
data_[i][j] = 0;
}
}
thrust::copy((float4*)data_.data,(float4*)(data_.data)+size_,tmp.begin());
}
CudaKdTreeBuilder builder( tmp, leaf_max_size_ );
builder.buildTree();
gpu_helper_->gpu_splits_ = builder.splits_;
gpu_helper_->gpu_aabb_min_ = builder.aabb_min_;
gpu_helper_->gpu_aabb_max_ = builder.aabb_max_;
gpu_helper_->gpu_child1_ = builder.child1_;
gpu_helper_->gpu_parent_=builder.parent_;
gpu_helper_->gpu_vind_=builder.index_x_;
thrust::gather( builder.index_x_->begin(), builder.index_x_->end(), tmp.begin(), gpu_helper_->gpu_points_->begin());
// gpu_helper_->gpu_nodes_=new thrust::device_vector<KdTreeCudaPrivate::GpuNode>(node_count_);
// gpu_helper_->gpu_vind_=new thrust::device_vector<int>(size_);
// thrust::copy( (KdTreeCudaPrivate::GpuNode*)&(tree_[0]), ((KdTreeCudaPrivate::GpuNode*)&(tree_[0]))+tree_.size(), gpu_helper_->gpu_nodes_->begin());
// thrust::copy(vind_.begin(),vind_.end(),gpu_helper_->gpu_vind_->begin());
// buildGpuTree();
}
template<typename Distance>
void KDTreeCuda3dIndex<Distance>::clearGpuBuffers()
{
delete gpu_helper_;
gpu_helper_=0;
}
// explicit instantiations for distance-independent functions
template
void KDTreeCuda3dIndex<flann::L2<float> >::uploadTreeToGpu();
template
void KDTreeCuda3dIndex<flann::L2<float> >::clearGpuBuffers();
template
struct KDTreeCuda3dIndex<flann::L2<float> >::GpuHelper;
template
void KDTreeCuda3dIndex<flann::L2<float> >::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params);
template
int KDTreeCuda3dIndex< flann::L2<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params);
template
int KDTreeCuda3dIndex< flann::L2<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params);
// explicit instantiations for distance-independent functions
template
void KDTreeCuda3dIndex<flann::L2_Simple<float> >::uploadTreeToGpu();
template
void KDTreeCuda3dIndex<flann::L2_Simple<float> >::clearGpuBuffers();
template
struct KDTreeCuda3dIndex<flann::L2_Simple<float> >::GpuHelper;
template
void KDTreeCuda3dIndex<flann::L2_Simple<float> >::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params);
template
int KDTreeCuda3dIndex< flann::L2_Simple<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params);
template
int KDTreeCuda3dIndex< flann::L2_Simple<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params);
// explicit instantiations for distance-independent functions
template
void KDTreeCuda3dIndex<flann::L1<float> >::uploadTreeToGpu();
template
void KDTreeCuda3dIndex<flann::L1<float> >::clearGpuBuffers();
template
struct KDTreeCuda3dIndex<flann::L1<float> >::GpuHelper;
template
void KDTreeCuda3dIndex<flann::L1<float> >::knnSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params);
template
int KDTreeCuda3dIndex< flann::L1<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params);
template
int KDTreeCuda3dIndex< flann::L1<float> >::radiusSearchGpu(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices,
std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params);
}
|
5787efa4609df8c03cfdf2f65cfef7163454d3fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
// Test IntegerDivider: this tests *all* 32-bit pairs (a, b) where a % b is 0 or
// (b-1), so it takes a few minutes to run.
#include <assert.h>
#include <stdint.h>
#include <memory>
#include <vector>
#include "THH/THHIntegerDivider.cuh"
using std::vector;
template<typename Value>
struct TestCase {
Value dividend;
int divisor_idx;
int steps;
TestCase(Value dividend, int divisor_idx, int steps)
: dividend(dividend), divisor_idx(divisor_idx), steps(steps) { }
};
template<typename Value>
__global__ void testIntDivider(const IntDivider<Value> *dividers,
const TestCase<Value> *testCases,
int numCases)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numCases; i += stride) {
const TestCase<Value> &tc = testCases[i];
Value dividend = tc.dividend;
const IntDivider<Value> ÷r = dividers[tc.divisor_idx];
Value divisor = divider.divisor;
for (int j = 0; j < tc.steps; j++) {
if (sizeof(Value) == 4 && dividend > INT32_MAX) return;
DivMod<Value> qr = divider.divmod(dividend);
assert(qr.div == dividend / divisor && qr.mod == dividend % divisor);
dividend += divisor;
}
}
}
enum {
// Number of test cases per each kernel invocation.
NUM_CASES = 1000000,
// Maximum number of steps per each test case.
MAX_STEPS = 10000,
};
// Test the magic division algorithm.
template<typename Value>
class IntDividerTester {
public:
IntDividerTester() {
hipError_t err;
err = hipMalloc(÷rsBuf_, NUM_CASES * sizeof(IntDivider<Value>));
REQUIRE(err == hipSuccess);
err = hipMalloc(&testCasesBuf_, NUM_CASES * sizeof(TestCase<Value>));
REQUIRE(err == hipSuccess);
}
~IntDividerTester() {
hipError_t err;
err = hipFree(dividersBuf_);
REQUIRE(err == hipSuccess);
err = hipFree(testCasesBuf_);
REQUIRE(err == hipSuccess);
}
void addTestCase(Value dividend, Value divisor, int steps) {
// Append a new IntDivider using 'divisor' if necessary.
if (dividers_.empty() || dividers_.back().divisor != divisor)
dividers_.emplace_back(divisor);
// Append the test case.
testCases_.emplace_back(dividend, dividers_.size() - 1, steps);
// Launch the test kernel if the buffer is full.
if (testCases_.size() == NUM_CASES) flush();
}
void flush() {
hipError_t err;
if (testCases_.empty()) return;
REQUIRE(!dividers_.empty());
REQUIRE(dividers_.size() <= NUM_CASES);
REQUIRE(testCases_.size() <= NUM_CASES);
err = hipMemcpy(dividersBuf_, dividers_.data(),
dividers_.size() * sizeof(IntDivider<Value>),
hipMemcpyHostToDevice);
REQUIRE(err == hipSuccess);
err = hipMemcpy(testCasesBuf_, testCases_.data(),
testCases_.size() * sizeof(TestCase<Value>),
hipMemcpyHostToDevice);
REQUIRE(err == hipSuccess);
int numCases = testCases_.size();
hipLaunchKernelGGL(( testIntDivider<Value>), dim3(512), dim3(512), 0, 0,
dividersBuf_, testCasesBuf_, numCases);
dividers_.clear();
testCases_.clear();
}
private:
vector<IntDivider<Value>> dividers_;
vector<TestCase<Value>> testCases_;
IntDivider<Value> *dividersBuf_;
TestCase<Value> *testCasesBuf_;
};
static void testUint32Divider()
{
fprintf(stderr, "Testing 32-bit integer division ...");
IntDividerTester<uint32_t> tester;
for (uint64_t divisor = 1; divisor <= INT32_MAX; divisor++) {
if (divisor < 1000000 && divisor % 10000 == 0) fprintf(stderr, ".");
if (divisor % 10000000 == 0) fprintf(stderr, "-");
// In order to save time, we only test when the remainder is zero or
// (divisor - 1).
uint64_t dividend = 0;
while (dividend <= INT32_MAX) {
uint64_t steps = (INT32_MAX - dividend) / divisor + 1;
if (steps > MAX_STEPS) steps = MAX_STEPS;
tester.addTestCase(dividend, divisor, steps);
tester.addTestCase(dividend + divisor - 1, divisor, steps);
dividend += divisor * steps;
}
// Check the boundary cases.
tester.addTestCase(1, divisor, 1);
tester.addTestCase(INT32_MAX, divisor, 1);
}
tester.flush();
fprintf(stderr, " Done!\n");
}
// uint64_t divider uses plain division, so we just check a few random cases.
static void testUint64Divider()
{
IntDividerTester<uint64_t> tester;
uint64_t dividend = 0x123456789ULL;
uint64_t divisor = 0x54321ULL;
for (int i = 0; i < 1000; i++) {
if (divisor != 0) {
tester.addTestCase(dividend, divisor, 100);
// Test small divisor.
tester.addTestCase(dividend, divisor % 65536, 100);
// Create pseudorandom numbers.
dividend *= 0x100000001b3ULL;
dividend ^= 0x1234567890abcdefULL;
divisor *= 0x100000001b3ULL;
divisor ^= 0x1234567890abcdefULL;
}
}
tester.flush();
}
TEST_CASE( "CUDA integer divider", "[cuda]" ) {
testUint64Divider();
testUint32Divider();
hipError_t err = hipDeviceSynchronize();
REQUIRE(err == hipSuccess);
}
| 5787efa4609df8c03cfdf2f65cfef7163454d3fa.cu | #define CATCH_CONFIG_MAIN
#include "catch.hpp"
// Test IntegerDivider: this tests *all* 32-bit pairs (a, b) where a % b is 0 or
// (b-1), so it takes a few minutes to run.
#include <assert.h>
#include <stdint.h>
#include <memory>
#include <vector>
#include "THC/THCIntegerDivider.cuh"
using std::vector;
template<typename Value>
struct TestCase {
Value dividend;
int divisor_idx;
int steps;
TestCase(Value dividend, int divisor_idx, int steps)
: dividend(dividend), divisor_idx(divisor_idx), steps(steps) { }
};
template<typename Value>
__global__ void testIntDivider(const IntDivider<Value> *dividers,
const TestCase<Value> *testCases,
int numCases)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < numCases; i += stride) {
const TestCase<Value> &tc = testCases[i];
Value dividend = tc.dividend;
const IntDivider<Value> ÷r = dividers[tc.divisor_idx];
Value divisor = divider.divisor;
for (int j = 0; j < tc.steps; j++) {
if (sizeof(Value) == 4 && dividend > INT32_MAX) return;
DivMod<Value> qr = divider.divmod(dividend);
assert(qr.div == dividend / divisor && qr.mod == dividend % divisor);
dividend += divisor;
}
}
}
enum {
// Number of test cases per each kernel invocation.
NUM_CASES = 1000000,
// Maximum number of steps per each test case.
MAX_STEPS = 10000,
};
// Test the magic division algorithm.
template<typename Value>
class IntDividerTester {
public:
IntDividerTester() {
cudaError_t err;
err = cudaMalloc(÷rsBuf_, NUM_CASES * sizeof(IntDivider<Value>));
REQUIRE(err == cudaSuccess);
err = cudaMalloc(&testCasesBuf_, NUM_CASES * sizeof(TestCase<Value>));
REQUIRE(err == cudaSuccess);
}
~IntDividerTester() {
cudaError_t err;
err = cudaFree(dividersBuf_);
REQUIRE(err == cudaSuccess);
err = cudaFree(testCasesBuf_);
REQUIRE(err == cudaSuccess);
}
void addTestCase(Value dividend, Value divisor, int steps) {
// Append a new IntDivider using 'divisor' if necessary.
if (dividers_.empty() || dividers_.back().divisor != divisor)
dividers_.emplace_back(divisor);
// Append the test case.
testCases_.emplace_back(dividend, dividers_.size() - 1, steps);
// Launch the test kernel if the buffer is full.
if (testCases_.size() == NUM_CASES) flush();
}
void flush() {
cudaError_t err;
if (testCases_.empty()) return;
REQUIRE(!dividers_.empty());
REQUIRE(dividers_.size() <= NUM_CASES);
REQUIRE(testCases_.size() <= NUM_CASES);
err = cudaMemcpy(dividersBuf_, dividers_.data(),
dividers_.size() * sizeof(IntDivider<Value>),
cudaMemcpyHostToDevice);
REQUIRE(err == cudaSuccess);
err = cudaMemcpy(testCasesBuf_, testCases_.data(),
testCases_.size() * sizeof(TestCase<Value>),
cudaMemcpyHostToDevice);
REQUIRE(err == cudaSuccess);
int numCases = testCases_.size();
testIntDivider<Value><<<512, 512>>>(
dividersBuf_, testCasesBuf_, numCases);
dividers_.clear();
testCases_.clear();
}
private:
vector<IntDivider<Value>> dividers_;
vector<TestCase<Value>> testCases_;
IntDivider<Value> *dividersBuf_;
TestCase<Value> *testCasesBuf_;
};
static void testUint32Divider()
{
fprintf(stderr, "Testing 32-bit integer division ...");
IntDividerTester<uint32_t> tester;
for (uint64_t divisor = 1; divisor <= INT32_MAX; divisor++) {
if (divisor < 1000000 && divisor % 10000 == 0) fprintf(stderr, ".");
if (divisor % 10000000 == 0) fprintf(stderr, "-");
// In order to save time, we only test when the remainder is zero or
// (divisor - 1).
uint64_t dividend = 0;
while (dividend <= INT32_MAX) {
uint64_t steps = (INT32_MAX - dividend) / divisor + 1;
if (steps > MAX_STEPS) steps = MAX_STEPS;
tester.addTestCase(dividend, divisor, steps);
tester.addTestCase(dividend + divisor - 1, divisor, steps);
dividend += divisor * steps;
}
// Check the boundary cases.
tester.addTestCase(1, divisor, 1);
tester.addTestCase(INT32_MAX, divisor, 1);
}
tester.flush();
fprintf(stderr, " Done!\n");
}
// uint64_t divider uses plain division, so we just check a few random cases.
static void testUint64Divider()
{
IntDividerTester<uint64_t> tester;
uint64_t dividend = 0x123456789ULL;
uint64_t divisor = 0x54321ULL;
for (int i = 0; i < 1000; i++) {
if (divisor != 0) {
tester.addTestCase(dividend, divisor, 100);
// Test small divisor.
tester.addTestCase(dividend, divisor % 65536, 100);
// Create pseudorandom numbers.
dividend *= 0x100000001b3ULL;
dividend ^= 0x1234567890abcdefULL;
divisor *= 0x100000001b3ULL;
divisor ^= 0x1234567890abcdefULL;
}
}
tester.flush();
}
TEST_CASE( "CUDA integer divider", "[cuda]" ) {
testUint64Divider();
testUint32Divider();
cudaError_t err = cudaDeviceSynchronize();
REQUIRE(err == cudaSuccess);
}
|
805dfbcbece15139bb94b8251f909e4f3b49f8db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2015, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/oskar_mem_random_uniform_cuda.h"
#include "math/private_random_helpers.h"
#ifdef __cplusplus
extern "C" {
#endif
__global__
void oskar_mem_random_uniform_cudak_f(
const int num_elements, float* data,
const unsigned int seed, const unsigned int counter1,
const unsigned int counter2, const unsigned int counter3)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int i4 = i * 4;
if (i4 >= num_elements) return;
OSKAR_R123_GENERATE_4(seed, i, counter1, counter2, counter3)
/* Convert to uniform float. */
float4 r;
r.x = oskar_int_to_range_0_to_1_f(u.i[0]);
r.y = oskar_int_to_range_0_to_1_f(u.i[1]);
r.z = oskar_int_to_range_0_to_1_f(u.i[2]);
r.w = oskar_int_to_range_0_to_1_f(u.i[3]);
/* Store random numbers. */
if (i4 <= num_elements - 4)
{
((float4*) data)[i] = r;
}
else
{
/* End case only if length not divisible by 4. */
data[i4] = r.x;
if (i4 + 1 < num_elements)
data[i4 + 1] = r.y;
if (i4 + 2 < num_elements)
data[i4 + 2] = r.z;
if (i4 + 3 < num_elements)
data[i4 + 3] = r.w;
}
}
__global__
void oskar_mem_random_uniform_cudak_d(
const int num_elements, double* data,
const unsigned int seed, const unsigned int counter1,
const unsigned int counter2, const unsigned int counter3)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int i4 = i * 4;
if (i4 >= num_elements) return;
OSKAR_R123_GENERATE_4(seed, i, counter1, counter2, counter3)
/* Convert to uniform double. */
double4 r;
r.x = oskar_int_to_range_0_to_1_d(u.i[0]);
r.y = oskar_int_to_range_0_to_1_d(u.i[1]);
r.z = oskar_int_to_range_0_to_1_d(u.i[2]);
r.w = oskar_int_to_range_0_to_1_d(u.i[3]);
/* Store random numbers. */
if (i4 <= num_elements - 4)
{
((double4*) data)[i] = r;
}
else
{
/* End case only if length not divisible by 4. */
data[i4] = r.x;
if (i4 + 1 < num_elements)
data[i4 + 1] = r.y;
if (i4 + 2 < num_elements)
data[i4 + 2] = r.z;
if (i4 + 3 < num_elements)
data[i4 + 3] = r.w;
}
}
void oskar_mem_random_uniform_cuda_f(int num_elements,
float* d_data, unsigned int seed, unsigned int counter1,
unsigned int counter2, unsigned int counter3)
{
int num_blocks, num_threads = 256;
num_blocks = (((num_elements + 3) / 4) + num_threads - 1) / num_threads;
oskar_mem_random_uniform_cudak_f OSKAR_CUDAK_CONF(num_blocks, num_threads)
(num_elements, d_data, seed, counter1, counter2, counter3);
}
void oskar_mem_random_uniform_cuda_d(int num_elements,
double* d_data, unsigned int seed, unsigned int counter1,
unsigned int counter2, unsigned int counter3)
{
int num_blocks, num_threads = 256;
num_blocks = (((num_elements + 3) / 4) + num_threads - 1) / num_threads;
oskar_mem_random_uniform_cudak_d OSKAR_CUDAK_CONF(num_blocks, num_threads)
(num_elements, d_data, seed, counter1, counter2, counter3);
}
#ifdef __cplusplus
}
#endif
| 805dfbcbece15139bb94b8251f909e4f3b49f8db.cu | /*
* Copyright (c) 2015, The University of Oxford
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the name of the University of Oxford nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "mem/oskar_mem_random_uniform_cuda.h"
#include "math/private_random_helpers.h"
#ifdef __cplusplus
extern "C" {
#endif
__global__
void oskar_mem_random_uniform_cudak_f(
const int num_elements, float* data,
const unsigned int seed, const unsigned int counter1,
const unsigned int counter2, const unsigned int counter3)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int i4 = i * 4;
if (i4 >= num_elements) return;
OSKAR_R123_GENERATE_4(seed, i, counter1, counter2, counter3)
/* Convert to uniform float. */
float4 r;
r.x = oskar_int_to_range_0_to_1_f(u.i[0]);
r.y = oskar_int_to_range_0_to_1_f(u.i[1]);
r.z = oskar_int_to_range_0_to_1_f(u.i[2]);
r.w = oskar_int_to_range_0_to_1_f(u.i[3]);
/* Store random numbers. */
if (i4 <= num_elements - 4)
{
((float4*) data)[i] = r;
}
else
{
/* End case only if length not divisible by 4. */
data[i4] = r.x;
if (i4 + 1 < num_elements)
data[i4 + 1] = r.y;
if (i4 + 2 < num_elements)
data[i4 + 2] = r.z;
if (i4 + 3 < num_elements)
data[i4 + 3] = r.w;
}
}
__global__
void oskar_mem_random_uniform_cudak_d(
const int num_elements, double* data,
const unsigned int seed, const unsigned int counter1,
const unsigned int counter2, const unsigned int counter3)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int i4 = i * 4;
if (i4 >= num_elements) return;
OSKAR_R123_GENERATE_4(seed, i, counter1, counter2, counter3)
/* Convert to uniform double. */
double4 r;
r.x = oskar_int_to_range_0_to_1_d(u.i[0]);
r.y = oskar_int_to_range_0_to_1_d(u.i[1]);
r.z = oskar_int_to_range_0_to_1_d(u.i[2]);
r.w = oskar_int_to_range_0_to_1_d(u.i[3]);
/* Store random numbers. */
if (i4 <= num_elements - 4)
{
((double4*) data)[i] = r;
}
else
{
/* End case only if length not divisible by 4. */
data[i4] = r.x;
if (i4 + 1 < num_elements)
data[i4 + 1] = r.y;
if (i4 + 2 < num_elements)
data[i4 + 2] = r.z;
if (i4 + 3 < num_elements)
data[i4 + 3] = r.w;
}
}
void oskar_mem_random_uniform_cuda_f(int num_elements,
float* d_data, unsigned int seed, unsigned int counter1,
unsigned int counter2, unsigned int counter3)
{
int num_blocks, num_threads = 256;
num_blocks = (((num_elements + 3) / 4) + num_threads - 1) / num_threads;
oskar_mem_random_uniform_cudak_f OSKAR_CUDAK_CONF(num_blocks, num_threads)
(num_elements, d_data, seed, counter1, counter2, counter3);
}
void oskar_mem_random_uniform_cuda_d(int num_elements,
double* d_data, unsigned int seed, unsigned int counter1,
unsigned int counter2, unsigned int counter3)
{
int num_blocks, num_threads = 256;
num_blocks = (((num_elements + 3) / 4) + num_threads - 1) / num_threads;
oskar_mem_random_uniform_cudak_d OSKAR_CUDAK_CONF(num_blocks, num_threads)
(num_elements, d_data, seed, counter1, counter2, counter3);
}
#ifdef __cplusplus
}
#endif
|
0b1741259385daefe6c17340f97f66213d04f322.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include "../common/book.h"
#define N 100
////////////////////////////////////////
__global__ void cuda_add(int * a, int * b, int * c){
int tid = blockIdx.x;
if(tid<N){
a[tid] = b[tid] + c[tid];
}
}
////////////////////////////////////////
void fill_vectors(int * a, int * b, int * c){
for(int i=0; i<=N; i++){
a[i] = 0;
b[i] = i*i;
c[i] = -i;
}
}
////////////////////////////////////////
int main(void){
int a[N], b[N], c[N];
int * deva, * devb, * devc;
// allocating ON-DEVICE memory using hipMalloc(...)
HANDLE_ERROR( hipMalloc( (void**)&deva, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&devb, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&devc, N * sizeof(int) ) );
// fill vectors with numbers
int * pa = &a[0];
int * pb = &b[0];
int * pc = &c[0];
fill_vectors(pa, pb, pc);
// copy HOST -> DEVICE using hipMemcpy(...)
HANDLE_ERROR(hipMemcpy(devb, b, N * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(devc, c, N * sizeof(int), hipMemcpyHostToDevice));
// perform DEVICE operation
hipLaunchKernelGGL(( cuda_add), dim3(N),dim3(1), 0, 0, deva, devb, devc);
// return result by copying from DEVICE -> HOST
HANDLE_ERROR(hipMemcpy(a, deva, N * sizeof(int), hipMemcpyDeviceToHost));
// print final results
for(int i=0; i<N; i++){
printf("[%d] + [%d] = [%d] \n", b[i], c[i], a[i]);
}
hipFree(deva);
hipFree(devb);
hipFree(devc);
return 0;
}
| 0b1741259385daefe6c17340f97f66213d04f322.cu | #include <stdlib.h>
#include "../common/book.h"
#define N 100
////////////////////////////////////////
__global__ void cuda_add(int * a, int * b, int * c){
int tid = blockIdx.x;
if(tid<N){
a[tid] = b[tid] + c[tid];
}
}
////////////////////////////////////////
void fill_vectors(int * a, int * b, int * c){
for(int i=0; i<=N; i++){
a[i] = 0;
b[i] = i*i;
c[i] = -i;
}
}
////////////////////////////////////////
int main(void){
int a[N], b[N], c[N];
int * deva, * devb, * devc;
// allocating ON-DEVICE memory using cudaMalloc(...)
HANDLE_ERROR( cudaMalloc( (void**)&deva, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&devb, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&devc, N * sizeof(int) ) );
// fill vectors with numbers
int * pa = &a[0];
int * pb = &b[0];
int * pc = &c[0];
fill_vectors(pa, pb, pc);
// copy HOST -> DEVICE using cudaMemcpy(...)
HANDLE_ERROR(cudaMemcpy(devb, b, N * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(devc, c, N * sizeof(int), cudaMemcpyHostToDevice));
// perform DEVICE operation
cuda_add<<<N,1>>>(deva, devb, devc);
// return result by copying from DEVICE -> HOST
HANDLE_ERROR(cudaMemcpy(a, deva, N * sizeof(int), cudaMemcpyDeviceToHost));
// print final results
for(int i=0; i<N; i++){
printf("[%d] + [%d] = [%d] \n", b[i], c[i], a[i]);
}
cudaFree(deva);
cudaFree(devb);
cudaFree(devc);
return 0;
}
|
0897b8d12b8fd7bfb9b36f8166d086edf1663ec1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//******************************************************************************
// Created by Edward Connell
// Copyright (c) 2016 Connell Research. All rights reserved.
//
#include "include/CudaKernels.h"
#include "../../../../../../usr/local/cuda/include/cuda_runtime.h"
#include "../../../../../../usr/include/assert.h"
#include "../../../../../../usr/include/math.h"
#include "../../../../../../usr/local/cuda/include/device_launch_parameters.h"
#include "../../../../../../usr/local/cuda/include/cuda.h"
//------------------------------------------------------------------------------
// device kernel
template <typename T>
__global__ void validateRange_kernel1(const cudaShape_t inShape, const T* inData,
const cudaShape_t outShape, T* outData)
{
CUDA_KERNEL_LOOP(i, inShape.extent[0]) {
// TODO: this is probably divergent, think of a better way
if(!isfinite(inData[i * inShape.stride[0]])) outData[0] = 1;
}
}
//------------------------------------------------------------------------------
// Swift importable C functions
// returns
// 0 all values fall within range
// 1 one or more values are out of range
//
hipError_t cudaValidateRange(const cudaShape_t inShape, const void *inData,
const cudaShape_t outShape, void *outData,
hipStream_t stream)
{
CudaKernelPreCheck(stream);
// require flattening for now
assert(inShape.dataType == outShape.dataType);
unsigned numBlocks = CUDA_NUM_BLOCKS(inShape.extent[0]);
unsigned numThreads = CUDA_NUM_THREADS;
hipError_t status;
switch(inShape.dataType) {
case HIP_R_32F:
status = hipMemsetAsync(outData, 0, sizeof(float), stream);
hipLaunchKernelGGL(( validateRange_kernel1<float>) , dim3(numBlocks), dim3(numThreads), 0, stream,
inShape, (float*)inData, outShape, (float*)outData);
break;
case HIP_R_64F:
status = hipMemsetAsync(outData, 0, sizeof(double), stream);
hipLaunchKernelGGL(( validateRange_kernel1<double>) , dim3(numBlocks), dim3(numThreads), 0, stream,
inShape, (double*)inData, outShape, (double*)outData);
break;
default: assert(false);
};
return status != hipSuccess ? status : CudaKernelPostCheck(stream);
}
| 0897b8d12b8fd7bfb9b36f8166d086edf1663ec1.cu | //******************************************************************************
// Created by Edward Connell
// Copyright (c) 2016 Connell Research. All rights reserved.
//
#include "include/CudaKernels.h"
#include "../../../../../../usr/local/cuda/include/cuda_runtime.h"
#include "../../../../../../usr/include/assert.h"
#include "../../../../../../usr/include/math.h"
#include "../../../../../../usr/local/cuda/include/device_launch_parameters.h"
#include "../../../../../../usr/local/cuda/include/cuda.h"
//------------------------------------------------------------------------------
// device kernel
template <typename T>
__global__ void validateRange_kernel1(const cudaShape_t inShape, const T* inData,
const cudaShape_t outShape, T* outData)
{
CUDA_KERNEL_LOOP(i, inShape.extent[0]) {
// TODO: this is probably divergent, think of a better way
if(!isfinite(inData[i * inShape.stride[0]])) outData[0] = 1;
}
}
//------------------------------------------------------------------------------
// Swift importable C functions
// returns
// 0 all values fall within range
// 1 one or more values are out of range
//
cudaError_t cudaValidateRange(const cudaShape_t inShape, const void *inData,
const cudaShape_t outShape, void *outData,
cudaStream_t stream)
{
CudaKernelPreCheck(stream);
// require flattening for now
assert(inShape.dataType == outShape.dataType);
unsigned numBlocks = CUDA_NUM_BLOCKS(inShape.extent[0]);
unsigned numThreads = CUDA_NUM_THREADS;
cudaError_t status;
switch(inShape.dataType) {
case CUDA_R_32F:
status = cudaMemsetAsync(outData, 0, sizeof(float), stream);
validateRange_kernel1<float> <<<numBlocks, numThreads, 0, stream>>>
(inShape, (float*)inData, outShape, (float*)outData);
break;
case CUDA_R_64F:
status = cudaMemsetAsync(outData, 0, sizeof(double), stream);
validateRange_kernel1<double> <<<numBlocks, numThreads, 0, stream>>>
(inShape, (double*)inData, outShape, (double*)outData);
break;
default: assert(false);
};
return status != cudaSuccess ? status : CudaKernelPostCheck(stream);
}
|
5f8d5fee34c83e24cb3bf566169d020746f41bc4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <cuml/matrix/kernelparams.h>
#include <gtest/gtest.h>
#include <common/cumlHandle.hpp>
#include <common/device_buffer.hpp>
#include <common/host_buffer.hpp>
#include <cuda_utils.cuh>
#include <iostream>
#include <matrix/grammatrix.cuh>
#include <matrix/kernelfactory.cuh>
#include <memory>
#include "test_utils.h"
namespace MLCommon {
namespace Matrix {
class GramMatrixTest : public ::testing::Test {
protected:
void SetUp() override {
CUDA_CHECK(hipStreamCreate(&stream));
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
allocator = std::make_shared<raft::mr::device::default_allocator>();
host_allocator = std::make_shared<raft::mr::host::default_allocator>();
raft::allocate(x_dev, n1 * n_cols);
raft::update_device(x_dev, x_host, n1 * n_cols, stream);
raft::allocate(gram_dev, n1 * n1);
}
void TearDown() override {
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(x_dev));
CUDA_CHECK(hipFree(gram_dev));
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
}
void naiveRBFKernel(float *x1_dev, int n1, int n_cols, float *x2_dev, int n2,
float gamma) {
host_buffer<float> x1_host(host_allocator, stream, n1 * n_cols);
raft::update_host(x1_host.data(), x1_dev, n1 * n_cols, stream);
host_buffer<float> x2_host(host_allocator, stream, n2 * n_cols);
raft::update_host(x2_host.data(), x2_dev, n2 * n_cols, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int i = 0; i < n1; i++) {
for (int j = 0; j < n2; j++) {
float d = 0;
for (int k = 0; k < n_cols; k++) {
float diff = x1_host[i + k * n1] - x2_host[j + k * n2];
d += diff * diff;
}
gram_host_expected[i + j * n2] = exp(-gamma * d);
}
}
}
hipStream_t stream;
hipblasHandle_t cublas_handle;
std::shared_ptr<deviceAllocator> allocator;
std::shared_ptr<hostAllocator> host_allocator;
int n1 = 4;
int n_cols = 2;
int n2 = 4;
float *x_dev;
float *gram_dev;
float x_host[8] = {1, 2, 3, 4, 5, 6, 7, 8};
float gram_host_expected[16] = {26, 32, 38, 44, 32, 40, 48, 56,
38, 48, 58, 68, 44, 56, 68, 80};
};
TEST_F(GramMatrixTest, Base) {
GramMatrixBase<float> kernel(cublas_handle);
kernel(x_dev, n1, n_cols, x_dev, n1, gram_dev, stream);
ASSERT_TRUE(raft::devArrMatchHost(gram_host_expected, gram_dev, n1 * n1,
raft::CompareApprox<float>(1e-6f)));
}
TEST_F(GramMatrixTest, Poly) {
float offset = 2.4;
float gain = 0.5;
// naive kernel
for (int z = 0; z < n1 * n1; z++) {
float val = gain * gram_host_expected[z] + offset;
gram_host_expected[z] = val * val;
}
PolynomialKernel<float, int> kernel(2, gain, offset, cublas_handle);
kernel(x_dev, n1, n_cols, x_dev, n1, gram_dev, stream);
ASSERT_TRUE(raft::devArrMatchHost(gram_host_expected, gram_dev, n1 * n1,
raft::CompareApprox<float>(1e-6f)));
}
TEST_F(GramMatrixTest, Tanh) {
float offset = 2.4;
float gain = 0.5;
// naive kernel
for (int z = 0; z < n1 * n1; z++) {
gram_host_expected[z] = tanh(gain * gram_host_expected[z] + offset);
}
TanhKernel<float> kernel(gain, offset, cublas_handle);
kernel(x_dev, n1, n_cols, x_dev, n1, gram_dev, stream);
ASSERT_TRUE(raft::devArrMatchHost(gram_host_expected, gram_dev, n1 * n1,
raft::CompareApprox<float>(1e-6f)));
}
TEST_F(GramMatrixTest, RBF) {
float gamma = 0.5;
naiveRBFKernel(x_dev, n1, n_cols, x_dev, n1, gamma);
RBFKernel<float> kernel(gamma);
kernel(x_dev, n1, n_cols, x_dev, n1, gram_dev, stream);
ASSERT_TRUE(raft::devArrMatchHost(gram_host_expected, gram_dev, n1 * n1,
raft::CompareApprox<float>(3e-6f)));
}
TEST_F(GramMatrixTest, RBF_Rectangular) {
float gamma = 0.7;
RBFKernel<float> kernel(gamma);
// Instead of a 5x5 Gram matrix, we want to calculate a 5x3 matrix here.
// The inputs to the distance function are the vector sets x1 and x2.
//
// x1 = [ [1, 6],
// [2, 7],
// [3, 8],
// [4, 9],
// [5, 10] ];
// The vectors are stored in column major format, so actually
float x1[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
int n1 = 5;
// x2 = [ [1, 6],
// [2, 7],
// [3, 8] ];
// In column major format:
float x2[] = {1, 2, 3, 6, 7, 8};
int n2 = 3;
//
// The output is a 5x3 matrix. Here is the distance matrix (without exp)
// K(x1,x2) = [ [ 0, 2, 8],
// [ 2, 0, 2],
// [ 8, 2, 0],
// [18, 8, 2],
// [32, 18, 8] ];
//
// It is also stored in colum major format, therefore:
float K[] = {0, 2, 8, 18, 32, 2, 0, 2, 8, 18, 8, 2, 0, 2, 8};
// The RBF kernel calculates exp for the distance matrix
for (int i = 0; i < n1 * n2; i++) {
K[i] = exp(-gamma * K[i]);
}
device_buffer<float> x1_dev(allocator, stream, n1 * n_cols);
raft::update_device(x1_dev.data(), x1, n1 * n_cols, stream);
device_buffer<float> x2_dev(allocator, stream, n2 * n_cols);
raft::update_device(x2_dev.data(), x2, n2 * n_cols, stream);
kernel(x1_dev.data(), n1, n_cols, x2_dev.data(), n2, gram_dev, stream);
ASSERT_TRUE(raft::devArrMatchHost(K, gram_dev, n1 * n2,
raft::CompareApprox<float>(1e-6f)));
}
}; // end namespace Matrix
}; // end namespace MLCommon
| 5f8d5fee34c83e24cb3bf566169d020746f41bc4.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <cuml/matrix/kernelparams.h>
#include <gtest/gtest.h>
#include <common/cumlHandle.hpp>
#include <common/device_buffer.hpp>
#include <common/host_buffer.hpp>
#include <cuda_utils.cuh>
#include <iostream>
#include <matrix/grammatrix.cuh>
#include <matrix/kernelfactory.cuh>
#include <memory>
#include "test_utils.h"
namespace MLCommon {
namespace Matrix {
class GramMatrixTest : public ::testing::Test {
protected:
void SetUp() override {
CUDA_CHECK(cudaStreamCreate(&stream));
CUBLAS_CHECK(cublasCreate(&cublas_handle));
allocator = std::make_shared<raft::mr::device::default_allocator>();
host_allocator = std::make_shared<raft::mr::host::default_allocator>();
raft::allocate(x_dev, n1 * n_cols);
raft::update_device(x_dev, x_host, n1 * n_cols, stream);
raft::allocate(gram_dev, n1 * n1);
}
void TearDown() override {
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(x_dev));
CUDA_CHECK(cudaFree(gram_dev));
CUBLAS_CHECK(cublasDestroy(cublas_handle));
}
void naiveRBFKernel(float *x1_dev, int n1, int n_cols, float *x2_dev, int n2,
float gamma) {
host_buffer<float> x1_host(host_allocator, stream, n1 * n_cols);
raft::update_host(x1_host.data(), x1_dev, n1 * n_cols, stream);
host_buffer<float> x2_host(host_allocator, stream, n2 * n_cols);
raft::update_host(x2_host.data(), x2_dev, n2 * n_cols, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int i = 0; i < n1; i++) {
for (int j = 0; j < n2; j++) {
float d = 0;
for (int k = 0; k < n_cols; k++) {
float diff = x1_host[i + k * n1] - x2_host[j + k * n2];
d += diff * diff;
}
gram_host_expected[i + j * n2] = exp(-gamma * d);
}
}
}
cudaStream_t stream;
cublasHandle_t cublas_handle;
std::shared_ptr<deviceAllocator> allocator;
std::shared_ptr<hostAllocator> host_allocator;
int n1 = 4;
int n_cols = 2;
int n2 = 4;
float *x_dev;
float *gram_dev;
float x_host[8] = {1, 2, 3, 4, 5, 6, 7, 8};
float gram_host_expected[16] = {26, 32, 38, 44, 32, 40, 48, 56,
38, 48, 58, 68, 44, 56, 68, 80};
};
TEST_F(GramMatrixTest, Base) {
GramMatrixBase<float> kernel(cublas_handle);
kernel(x_dev, n1, n_cols, x_dev, n1, gram_dev, stream);
ASSERT_TRUE(raft::devArrMatchHost(gram_host_expected, gram_dev, n1 * n1,
raft::CompareApprox<float>(1e-6f)));
}
TEST_F(GramMatrixTest, Poly) {
float offset = 2.4;
float gain = 0.5;
// naive kernel
for (int z = 0; z < n1 * n1; z++) {
float val = gain * gram_host_expected[z] + offset;
gram_host_expected[z] = val * val;
}
PolynomialKernel<float, int> kernel(2, gain, offset, cublas_handle);
kernel(x_dev, n1, n_cols, x_dev, n1, gram_dev, stream);
ASSERT_TRUE(raft::devArrMatchHost(gram_host_expected, gram_dev, n1 * n1,
raft::CompareApprox<float>(1e-6f)));
}
TEST_F(GramMatrixTest, Tanh) {
float offset = 2.4;
float gain = 0.5;
// naive kernel
for (int z = 0; z < n1 * n1; z++) {
gram_host_expected[z] = tanh(gain * gram_host_expected[z] + offset);
}
TanhKernel<float> kernel(gain, offset, cublas_handle);
kernel(x_dev, n1, n_cols, x_dev, n1, gram_dev, stream);
ASSERT_TRUE(raft::devArrMatchHost(gram_host_expected, gram_dev, n1 * n1,
raft::CompareApprox<float>(1e-6f)));
}
TEST_F(GramMatrixTest, RBF) {
float gamma = 0.5;
naiveRBFKernel(x_dev, n1, n_cols, x_dev, n1, gamma);
RBFKernel<float> kernel(gamma);
kernel(x_dev, n1, n_cols, x_dev, n1, gram_dev, stream);
ASSERT_TRUE(raft::devArrMatchHost(gram_host_expected, gram_dev, n1 * n1,
raft::CompareApprox<float>(3e-6f)));
}
TEST_F(GramMatrixTest, RBF_Rectangular) {
float gamma = 0.7;
RBFKernel<float> kernel(gamma);
// Instead of a 5x5 Gram matrix, we want to calculate a 5x3 matrix here.
// The inputs to the distance function are the vector sets x1 and x2.
//
// x1 = [ [1, 6],
// [2, 7],
// [3, 8],
// [4, 9],
// [5, 10] ];
// The vectors are stored in column major format, so actually
float x1[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
int n1 = 5;
// x2 = [ [1, 6],
// [2, 7],
// [3, 8] ];
// In column major format:
float x2[] = {1, 2, 3, 6, 7, 8};
int n2 = 3;
//
// The output is a 5x3 matrix. Here is the distance matrix (without exp)
// K(x1,x2) = [ [ 0, 2, 8],
// [ 2, 0, 2],
// [ 8, 2, 0],
// [18, 8, 2],
// [32, 18, 8] ];
//
// It is also stored in colum major format, therefore:
float K[] = {0, 2, 8, 18, 32, 2, 0, 2, 8, 18, 8, 2, 0, 2, 8};
// The RBF kernel calculates exp for the distance matrix
for (int i = 0; i < n1 * n2; i++) {
K[i] = exp(-gamma * K[i]);
}
device_buffer<float> x1_dev(allocator, stream, n1 * n_cols);
raft::update_device(x1_dev.data(), x1, n1 * n_cols, stream);
device_buffer<float> x2_dev(allocator, stream, n2 * n_cols);
raft::update_device(x2_dev.data(), x2, n2 * n_cols, stream);
kernel(x1_dev.data(), n1, n_cols, x2_dev.data(), n2, gram_dev, stream);
ASSERT_TRUE(raft::devArrMatchHost(K, gram_dev, n1 * n2,
raft::CompareApprox<float>(1e-6f)));
}
}; // end namespace Matrix
}; // end namespace MLCommon
|
e78929e76032881573e8a570b873589bf086ddea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void get_temp_grad (const int n, const float *gradOutput, const float *mask, float *top_grad, const int mask_index){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
if (((int) mask[index]) == mask_index)
top_grad[index] = gradOutput[index];
} | e78929e76032881573e8a570b873589bf086ddea.cu | #include "includes.h"
__global__ void get_temp_grad (const int n, const float *gradOutput, const float *mask, float *top_grad, const int mask_index){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
if (((int) mask[index]) == mask_index)
top_grad[index] = gradOutput[index];
} |
67b0c1eb994e2759627d4694f3149506bef4b8e2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "square.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *array = NULL;
hipMalloc(&array, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
square), dim3(gridBlock),dim3(threadBlock), 0, 0, array,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
square), dim3(gridBlock),dim3(threadBlock), 0, 0, array,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
square), dim3(gridBlock),dim3(threadBlock), 0, 0, array,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 67b0c1eb994e2759627d4694f3149506bef4b8e2.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "square.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *array = NULL;
cudaMalloc(&array, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
square<<<gridBlock,threadBlock>>>(array,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
square<<<gridBlock,threadBlock>>>(array,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
square<<<gridBlock,threadBlock>>>(array,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
024c3cffe571e9c3ff0677b68debaac30816c59a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/image_resize.h>
#include <cuda_exception.h>
namespace nd4j {
namespace ops {
namespace helpers {
struct BilinearInterpolationData {
Nd4jLong bottomIndex; // Lower source index used in the interpolation
Nd4jLong topIndex; // Upper source index used in the interpolation
// 1-D linear iterpolation scale (see:
// https://en.wikipedia.org/wiki/Bilinear_interpolation)
double interpolarValue;
};
static __global__ void computeInterpolationWeights(Nd4jLong outSize,
Nd4jLong inSize,
double scale,
Nd4jLong channels,
BilinearInterpolationData* interpolationData) {
interpolationData[outSize].bottomIndex = 0;
interpolationData[outSize].topIndex = 0;
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (Nd4jLong i = outSize - tid; i >= 0; i -= step) {
double in = i * scale;
interpolationData[i].bottomIndex = static_cast<Nd4jLong>(in);
interpolationData[i].topIndex = nd4j::math::nd4j_min(interpolationData[i].bottomIndex + 1, inSize - 1);
interpolationData[i].interpolarValue = in - interpolationData[i].bottomIndex;
if (channels) {
math::atomics::nd4j_atomicMul(&interpolationData[i].bottomIndex, channels);
math::atomics::nd4j_atomicMul(&interpolationData[i].topIndex, channels);
}
}
}
static void resizeImage(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output);
template <typename T>
static __global__ void resizeImageKernel(T const* input, Nd4jLong const* inputShape, T* outputYptr, Nd4jLong* outputShape, Nd4jLong batchSize,
Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, Nd4jLong inRowSize, Nd4jLong outRowSize, Nd4jLong inBatchNumValues,
BilinearInterpolationData* xs_, BilinearInterpolationData* ys_) {
if (blockIdx.x < batchSize) {
auto pX = input + blockIdx.x * inBatchNumValues;
//auto pZ = output_y_ptr;
auto channelStart = blockIdx.z * blockDim.z + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (Nd4jLong y = threadIdx.x; y < outHeight; y += blockDim.x) {
const T *ys_input_lower_ptr = pX + ys_[y].bottomIndex * inRowSize;
const T *ys_input_upper_ptr = pX + ys_[y].topIndex * inRowSize;
double yVal = ys_[y].interpolarValue;
auto pZ = outputYptr + y * outRowSize;
for (Nd4jLong x = threadIdx.y; x < outWidth; x += blockDim.y) {
auto xsBottom = xs_[x].bottomIndex;
auto xsTop = xs_[x].topIndex;
auto xVal = xs_[x].interpolarValue;
for (int c = channelStart; c < channels; c += step) {
double topLeft(ys_input_lower_ptr[xsBottom + c]);
double topRight(ys_input_lower_ptr[xsTop + c]);
double bottomLeft(ys_input_upper_ptr[xsBottom + c]);
double bottomRight(ys_input_upper_ptr[xsTop + c]);
double top = topLeft + (topRight - topLeft) * xVal;
double bottom = bottomLeft + (bottomRight - bottomLeft) * xVal;
pZ[x * channels + c] = top + (bottom - top) * yVal;
}
}
}
}
}
template <typename T>
static void resizeImage_(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output) {
Nd4jLong inRowSize = inWidth * channels;
Nd4jLong inBatchNumValues = inHeight * inRowSize;
Nd4jLong outRowSize = outWidth * channels;
auto stream = context->getCudaStream();
T const *input_b_ptr = reinterpret_cast<T const *>(images->getSpecialBuffer()); // this works only with 'c' direction
T *output_y_ptr = reinterpret_cast<T *>(output->specialBuffer());
hipLaunchKernelGGL(( resizeImageKernel<T>), dim3(batchSize), dim3(outHeight), 256, *stream, input_b_ptr, images->getSpecialShapeInfo(), output_y_ptr, output->specialShapeInfo(), batchSize,
outWidth, outHeight, channels, inRowSize, outRowSize, inBatchNumValues, xs_, ys_);
}
template <typename T>
static int resizeBilinearFunctor_(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
// Special case for TF compatibility
if((center && inHeight < 2) || (center && inWidth < 2)){
center = false;
}
if ((center && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (center && outHeight < 2) ||
(center && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// wrong input data
nd4j_printf("image.resize_bilinear: Wrong input or output size to resize\n", "");
return ND4J_STATUS_BAD_ARGUMENTS;
}
float heightScale = center ? (inHeight - 1.f) / double(outHeight - 1.f) : (inHeight / float(outHeight));
float widthScale = center ? (inWidth - 1.f) / double(outWidth - 1.f) : (inWidth / float(outWidth));
BilinearInterpolationData* xs_;// = xs.data();
BilinearInterpolationData* ys_;// = xs.data();
hipError_t err = hipMalloc(&xs_, sizeof(BilinearInterpolationData) * (outWidth + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for vertical parts rectangulars", err);
}
err = hipMalloc(&ys_, sizeof(BilinearInterpolationData) * (outHeight + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for horizontal parts rectangulars", err);
}
auto stream = context->getCudaStream();
// Compute the cached interpolation weights on the x and y dimensions.
hipLaunchKernelGGL(( computeInterpolationWeights), dim3(256), dim3(512), 512, *stream, outHeight, inHeight, heightScale, 0, ys_);
hipLaunchKernelGGL(( computeInterpolationWeights), dim3(256), dim3(512), 512, *stream, outWidth, inWidth, widthScale, channels, xs_);
NDArray::prepareSpecialUse({output}, {images});
resizeImage(context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output);
NDArray::registerSpecialUse({output}, {images});
err = hipFree(xs_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for vertical parts rectangulars", err);
}
err = hipFree(ys_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for horizontical parts rectangulars", err);
}
return Status::OK();
}
template <typename T>
static __global__ void resizeNeighborKernel(T const* input, Nd4jLong* inputShape, T* output, Nd4jLong* outputShape,
Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool center) {
//for (int b = blockIdx.x; b < batchSize; b += gridDim.x)
if (blockIdx.x < batchSize)
{
auto b = blockIdx.x;
for (int y = threadIdx.x; y < outHeight; y += blockDim.x) {
Nd4jLong inY = nd4j::math::nd4j_min(
(center) ? static_cast<Nd4jLong>(nd4j::math::p_round<float>(y * heightScale)) : static_cast<Nd4jLong>(nd4j::math::p_floor<float>(
y * heightScale)), inHeight - 1);
for (int x = threadIdx.y; x < outWidth; x += blockDim.y) {
Nd4jLong inX = nd4j::math::nd4j_min(
(center) ? static_cast<Nd4jLong>(nd4j::math::p_round<float>(x * widthScale)) : static_cast<Nd4jLong>(nd4j::math::p_floor<float>(
x * widthScale)), inWidth - 1);
auto start = blockIdx.z * blockDim.z + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (Nd4jLong e = start; e < channels; e += step) {
Nd4jLong posX[] = {b, inY, inX, e};
Nd4jLong posZ[] = {b, y, x, e};
auto xIndex = shape::getOffset(0, shape::shapeOf(inputShape), shape::stride(inputShape), posX, 4);
auto zIndex = shape::getOffset(0, shape::shapeOf(outputShape), shape::stride(outputShape), posZ, 4);
output[zIndex] = input[xIndex];
}
}
}
}
}
template <typename T>
int resizeNeighborFunctor_(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
if ((center && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (center && outHeight < 2) ||
(center && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// wrong input data
nd4j_printf("image.resize_nearest_neighbor: Wrong input or output size to resize\n", "");
return ND4J_STATUS_BAD_ARGUMENTS;
}
double heightScale = center ? (inHeight - 1.) / double(outHeight - 1.0) : (inHeight / double(outHeight));
double widthScale = center ? (inWidth - 1.) / double(outWidth - 1.0) : (inWidth / double(outWidth));
auto imagesBuffer = reinterpret_cast<T const*>(images->getSpecialBuffer());
auto outputBuffer = reinterpret_cast<T*>(output->specialBuffer());
auto stream = context->getCudaStream();
//T const* input, Nd4jLong const* inputShape, T* output, Nd4jLong* outputShape,
// Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool center
//input, inputShape, output, outputShape,
// batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, center
NDArray::prepareSpecialUse({output}, {images});
hipLaunchKernelGGL(( resizeNeighborKernel<T>), dim3(batchSize), dim3(outHeight * outWidth), 512, *stream, imagesBuffer, images->getSpecialShapeInfo(), outputBuffer, output->specialShapeInfo(),
batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, center);
NDArray::registerSpecialUse({output}, {images});
return ND4J_STATUS_OK;
return Status::OK();
}
void resizeImage(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight,
Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth, Nd4jLong channels, BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), resizeImage_, (context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void resizeImage_,(nd4j::LaunchContext* context, NDArray const* images,
Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth,
Nd4jLong channels, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_, NDArray* output), LIBND4J_TYPES);
int resizeBilinearFunctor(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeBilinearFunctor_, (context, images, width, height, center, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBilinearFunctor_, (nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output), LIBND4J_TYPES);
int resizeNeighborFunctor(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeNeighborFunctor_, (context, images, width, height, center, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeNeighborFunctor_, (nd4j::LaunchContext* context, NDArray const* images,
int width, int height, bool center, NDArray* output), LIBND4J_TYPES);
// --------------------------------------------------------------------------------------------------------------- //
// Crop and Resize helper implementation
// --------------------------------------------------------------------------------------------------------------- //
///////
template <typename T, typename Z, typename I>
static __global__ void cropAndResizeKernel(T const *images, Nd4jLong* imagesShape, Z const* boxes, Nd4jLong* boxesShape,
I const* indices, Nd4jLong* indexShape, I const* cropSize, Nd4jLong* cropShape, int method,
double extrapolationVal, Z* output, Nd4jLong* outputShape, int numBoxes, int cropHeight, int cropWidth,
int batchSize, int imageHeight, int imageWidth, int depth) {
for (int b = blockIdx.x; b < numBoxes; b += gridDim.x)
{
Nd4jLong x1Pos[] = {b, 1};
Nd4jLong y1Pos[] = {b, 0};
Nd4jLong y2Pos[] = {b, 2};
Nd4jLong x2Pos[] = {b, 3};
Z y1 = boxes[shape::getOffset(0, shape::shapeOf(boxesShape), shape::stride(boxesShape), y1Pos, 2)];//->t<T>(b, 0)];
Z x1 = boxes[shape::getOffset(0, shape::shapeOf(boxesShape), shape::stride(boxesShape), x1Pos, 2)];
Z y2 = boxes[shape::getOffset(0, shape::shapeOf(boxesShape), shape::stride(boxesShape), y2Pos, 2)];
Z x2 = boxes[shape::getOffset(0, shape::shapeOf(boxesShape), shape::stride(boxesShape), x2Pos, 2)];
int bIn = indices[b];
if (bIn >= batchSize) {
continue;
}
Z heightScale = (cropHeight > 1) ? (y2 - y1) * (imageHeight - 1) / Z(cropHeight - 1) : Z(0);
Z widthScale = (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / Z(cropWidth - 1) : Z(0);
// PRAGMA_OMP_PARALLEL_FOR_SIMD
for (int y = threadIdx.x; y < cropHeight; y += blockDim.x) {
const float inY = (cropHeight > 1)
? y1 * (imageHeight - 1) + y * heightScale
: 0.5 * (y1 + y2) * (imageHeight - 1);
if (inY < 0 || inY > imageHeight - 1) {
for (int x = threadIdx.y; x < cropWidth; x += blockDim.y) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(0, shape::shapeOf(outputShape), shape::stride(outputShape), zPos, 4);
output[zIndex] = (Z)extrapolationVal;
//crops->p(b, y, x, d, extrapolationVal);
}
}
continue;
}
if (method == 0 /* bilinear */) {
const int topYIndex = nd4j::math::p_floor(inY);
const int bottomYIndex = nd4j::math::p_ceil(inY);
const float y_lerp = inY - topYIndex;
for (int x = 0; x < cropWidth; ++x) {
const float in_x = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (in_x < 0 || in_x > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(0, shape::shapeOf(outputShape), shape::stride(outputShape), zPos, 4);
output[zIndex] = (Z)extrapolationVal;
// crops->p(b, y, x, d, extrapolationVal);
}
continue;
}
int left_x_index = math::p_floor(in_x);
int right_x_index = math::p_ceil(in_x);
T x_lerp = in_x - left_x_index;
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong topLeftPos[] = {bIn, topYIndex, left_x_index, d};
Nd4jLong topRightPos[] = {bIn, topYIndex, right_x_index, d};
Nd4jLong bottomLeftPos[] = {bIn, bottomYIndex, left_x_index, d};
Nd4jLong bottomRightPos[] = {bIn, bottomYIndex, right_x_index, d};
const T topLeft(images[shape::getOffset(0, shape::shapeOf(imagesShape), shape::stride(imagesShape), topLeftPos, 4)]); //->e<float>(bIn, topYIndex, left_x_index, d));
const T topRight(images[shape::getOffset(0, shape::shapeOf(imagesShape), shape::stride(imagesShape), topRightPos, 4)]); //->e<float>(bIn, topYIndex, right_x_index, d));
const T bottomLeft(images[shape::getOffset(0, shape::shapeOf(imagesShape), shape::stride(imagesShape), bottomLeftPos, 4)]);//->e<float>(bIn, bottomYIndex, left_x_index, d));
const T bottomRight(images[shape::getOffset(0, shape::shapeOf(imagesShape), shape::stride(imagesShape), bottomRightPos, 4)]); //->e<float>(bIn, bottomYIndex, right_x_index, d));
const T top = topLeft + (topRight - topLeft) * x_lerp;
const T bottom = bottomLeft + (bottomRight - bottomLeft) * x_lerp;
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(0, shape::shapeOf(outputShape), shape::stride(outputShape), zPos, 4);
output[zIndex] = Z(top + (bottom - top) * y_lerp);
// crops->p(b, y, x, d, top + (bottom - top) * y_lerp);
}
}
} else { // method is "nearest neighbor"
for (int x = 0; x < cropWidth; ++x) {
const float inX = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (inX < 0 || inX > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(0, shape::shapeOf(outputShape), shape::stride(outputShape), zPos, 4);
output[zIndex] = (Z)extrapolationVal;
}
continue;
}
const int closestXIndex = roundf(inX);
const int closestYIndex = roundf(inY);
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
Nd4jLong xPos[] = {bIn, closestYIndex, closestXIndex, d};
auto zIndex = shape::getOffset(0, shape::shapeOf(outputShape), shape::stride(outputShape), zPos, 4);
auto xIndex = shape::getOffset(0, shape::shapeOf(imagesShape), shape::stride(imagesShape), xPos, 4);
output[zIndex] = images[xIndex];
// crops->p(b, y, x, d, images->e<T>(bIn, closestYIndex, closestXIndex, d));
}
}
}
}
}
}
template <typename T, typename Z, typename I>
static void cropAndResizeFunctor_(nd4j::LaunchContext* context, NDArray const *images, NDArray const *boxes, NDArray const *indices,
NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
const int batchSize = images->sizeAt(0);
const int imageHeight = images->sizeAt(1);
const int imageWidth = images->sizeAt(2);
const int numBoxes = crops->sizeAt(0);
const int cropHeight = crops->sizeAt(1);
const int cropWidth = crops->sizeAt(2);
const int depth = crops->sizeAt(3);
auto stream = context->getCudaStream();
T const* imagesBuf = reinterpret_cast<T const*>(images->getSpecialBuffer());
Z const* boxesBuf = reinterpret_cast<Z const*>(boxes->getSpecialBuffer());
I const* indexBuf = reinterpret_cast<I const*>(indices->getSpecialBuffer());
I const* cropSizes = reinterpret_cast<I const*>(cropSize->getSpecialBuffer());
Z* outBuf = reinterpret_cast<Z*>(crops->specialBuffer());
hipLaunchKernelGGL(( cropAndResizeKernel<T,Z,I>), dim3(batchSize), dim3(math::nd4j_max(imageHeight * imageWidth, cropHeight * cropWidth)), 512, *stream, imagesBuf, images->getSpecialShapeInfo(), boxesBuf, boxes->getSpecialShapeInfo(), indexBuf, indices->getSpecialShapeInfo(),
cropSizes, cropSize->getSpecialShapeInfo(), method, extrapolationVal, outBuf, crops->specialShapeInfo(), numBoxes, cropHeight, cropWidth, batchSize, imageHeight, imageWidth, depth);
}
void cropAndResizeFunctor(nd4j::LaunchContext * context, NDArray const *images, NDArray const *boxes, NDArray const *indices, NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
BUILD_TRIPLE_SELECTOR(images->dataType(), boxes->dataType(), indices->dataType(), cropAndResizeFunctor_,
(context, images, boxes, indices, cropSize, method, extrapolationVal, crops), NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
//
}
BUILD_TRIPLE_TEMPLATE(template void cropAndResizeFunctor_,
(nd4j::LaunchContext * context, NDArray const* images, NDArray const* boxes, NDArray const* indices, NDArray const* cropSize, int method, double extrapolationVal, NDArray* crops),
NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
}
}
} | 024c3cffe571e9c3ff0677b68debaac30816c59a.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/image_resize.h>
#include <cuda_exception.h>
namespace nd4j {
namespace ops {
namespace helpers {
struct BilinearInterpolationData {
Nd4jLong bottomIndex; // Lower source index used in the interpolation
Nd4jLong topIndex; // Upper source index used in the interpolation
// 1-D linear iterpolation scale (see:
// https://en.wikipedia.org/wiki/Bilinear_interpolation)
double interpolarValue;
};
static __global__ void computeInterpolationWeights(Nd4jLong outSize,
Nd4jLong inSize,
double scale,
Nd4jLong channels,
BilinearInterpolationData* interpolationData) {
interpolationData[outSize].bottomIndex = 0;
interpolationData[outSize].topIndex = 0;
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (Nd4jLong i = outSize - tid; i >= 0; i -= step) {
double in = i * scale;
interpolationData[i].bottomIndex = static_cast<Nd4jLong>(in);
interpolationData[i].topIndex = nd4j::math::nd4j_min(interpolationData[i].bottomIndex + 1, inSize - 1);
interpolationData[i].interpolarValue = in - interpolationData[i].bottomIndex;
if (channels) {
math::atomics::nd4j_atomicMul(&interpolationData[i].bottomIndex, channels);
math::atomics::nd4j_atomicMul(&interpolationData[i].topIndex, channels);
}
}
}
static void resizeImage(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output);
template <typename T>
static __global__ void resizeImageKernel(T const* input, Nd4jLong const* inputShape, T* outputYptr, Nd4jLong* outputShape, Nd4jLong batchSize,
Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, Nd4jLong inRowSize, Nd4jLong outRowSize, Nd4jLong inBatchNumValues,
BilinearInterpolationData* xs_, BilinearInterpolationData* ys_) {
if (blockIdx.x < batchSize) {
auto pX = input + blockIdx.x * inBatchNumValues;
//auto pZ = output_y_ptr;
auto channelStart = blockIdx.z * blockDim.z + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (Nd4jLong y = threadIdx.x; y < outHeight; y += blockDim.x) {
const T *ys_input_lower_ptr = pX + ys_[y].bottomIndex * inRowSize;
const T *ys_input_upper_ptr = pX + ys_[y].topIndex * inRowSize;
double yVal = ys_[y].interpolarValue;
auto pZ = outputYptr + y * outRowSize;
for (Nd4jLong x = threadIdx.y; x < outWidth; x += blockDim.y) {
auto xsBottom = xs_[x].bottomIndex;
auto xsTop = xs_[x].topIndex;
auto xVal = xs_[x].interpolarValue;
for (int c = channelStart; c < channels; c += step) {
double topLeft(ys_input_lower_ptr[xsBottom + c]);
double topRight(ys_input_lower_ptr[xsTop + c]);
double bottomLeft(ys_input_upper_ptr[xsBottom + c]);
double bottomRight(ys_input_upper_ptr[xsTop + c]);
double top = topLeft + (topRight - topLeft) * xVal;
double bottom = bottomLeft + (bottomRight - bottomLeft) * xVal;
pZ[x * channels + c] = top + (bottom - top) * yVal;
}
}
}
}
}
template <typename T>
static void resizeImage_(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output) {
Nd4jLong inRowSize = inWidth * channels;
Nd4jLong inBatchNumValues = inHeight * inRowSize;
Nd4jLong outRowSize = outWidth * channels;
auto stream = context->getCudaStream();
T const *input_b_ptr = reinterpret_cast<T const *>(images->getSpecialBuffer()); // this works only with 'c' direction
T *output_y_ptr = reinterpret_cast<T *>(output->specialBuffer());
resizeImageKernel<T><<<batchSize, outHeight, 256, *stream>>>(input_b_ptr, images->getSpecialShapeInfo(), output_y_ptr, output->specialShapeInfo(), batchSize,
outWidth, outHeight, channels, inRowSize, outRowSize, inBatchNumValues, xs_, ys_);
}
template <typename T>
static int resizeBilinearFunctor_(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
// Special case for TF compatibility
if((center && inHeight < 2) || (center && inWidth < 2)){
center = false;
}
if ((center && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (center && outHeight < 2) ||
(center && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// wrong input data
nd4j_printf("image.resize_bilinear: Wrong input or output size to resize\n", "");
return ND4J_STATUS_BAD_ARGUMENTS;
}
float heightScale = center ? (inHeight - 1.f) / double(outHeight - 1.f) : (inHeight / float(outHeight));
float widthScale = center ? (inWidth - 1.f) / double(outWidth - 1.f) : (inWidth / float(outWidth));
BilinearInterpolationData* xs_;// = xs.data();
BilinearInterpolationData* ys_;// = xs.data();
cudaError_t err = cudaMalloc(&xs_, sizeof(BilinearInterpolationData) * (outWidth + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for vertical parts rectangulars", err);
}
err = cudaMalloc(&ys_, sizeof(BilinearInterpolationData) * (outHeight + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for horizontal parts rectangulars", err);
}
auto stream = context->getCudaStream();
// Compute the cached interpolation weights on the x and y dimensions.
computeInterpolationWeights<<<256, 512, 512, *stream>>>(outHeight, inHeight, heightScale, 0, ys_);
computeInterpolationWeights<<<256, 512, 512, *stream>>>(outWidth, inWidth, widthScale, channels, xs_);
NDArray::prepareSpecialUse({output}, {images});
resizeImage(context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output);
NDArray::registerSpecialUse({output}, {images});
err = cudaFree(xs_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for vertical parts rectangulars", err);
}
err = cudaFree(ys_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for horizontical parts rectangulars", err);
}
return Status::OK();
}
template <typename T>
static __global__ void resizeNeighborKernel(T const* input, Nd4jLong* inputShape, T* output, Nd4jLong* outputShape,
Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool center) {
//for (int b = blockIdx.x; b < batchSize; b += gridDim.x)
if (blockIdx.x < batchSize)
{
auto b = blockIdx.x;
for (int y = threadIdx.x; y < outHeight; y += blockDim.x) {
Nd4jLong inY = nd4j::math::nd4j_min(
(center) ? static_cast<Nd4jLong>(nd4j::math::p_round<float>(y * heightScale)) : static_cast<Nd4jLong>(nd4j::math::p_floor<float>(
y * heightScale)), inHeight - 1);
for (int x = threadIdx.y; x < outWidth; x += blockDim.y) {
Nd4jLong inX = nd4j::math::nd4j_min(
(center) ? static_cast<Nd4jLong>(nd4j::math::p_round<float>(x * widthScale)) : static_cast<Nd4jLong>(nd4j::math::p_floor<float>(
x * widthScale)), inWidth - 1);
auto start = blockIdx.z * blockDim.z + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (Nd4jLong e = start; e < channels; e += step) {
Nd4jLong posX[] = {b, inY, inX, e};
Nd4jLong posZ[] = {b, y, x, e};
auto xIndex = shape::getOffset(0, shape::shapeOf(inputShape), shape::stride(inputShape), posX, 4);
auto zIndex = shape::getOffset(0, shape::shapeOf(outputShape), shape::stride(outputShape), posZ, 4);
output[zIndex] = input[xIndex];
}
}
}
}
}
template <typename T>
int resizeNeighborFunctor_(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
if ((center && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (center && outHeight < 2) ||
(center && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// wrong input data
nd4j_printf("image.resize_nearest_neighbor: Wrong input or output size to resize\n", "");
return ND4J_STATUS_BAD_ARGUMENTS;
}
double heightScale = center ? (inHeight - 1.) / double(outHeight - 1.0) : (inHeight / double(outHeight));
double widthScale = center ? (inWidth - 1.) / double(outWidth - 1.0) : (inWidth / double(outWidth));
auto imagesBuffer = reinterpret_cast<T const*>(images->getSpecialBuffer());
auto outputBuffer = reinterpret_cast<T*>(output->specialBuffer());
auto stream = context->getCudaStream();
//T const* input, Nd4jLong const* inputShape, T* output, Nd4jLong* outputShape,
// Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool center
//input, inputShape, output, outputShape,
// batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, center
NDArray::prepareSpecialUse({output}, {images});
resizeNeighborKernel<T><<<batchSize, outHeight * outWidth, 512, *stream>>>(imagesBuffer, images->getSpecialShapeInfo(), outputBuffer, output->specialShapeInfo(),
batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, center);
NDArray::registerSpecialUse({output}, {images});
return ND4J_STATUS_OK;
return Status::OK();
}
void resizeImage(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight,
Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth, Nd4jLong channels, BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), resizeImage_, (context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void resizeImage_,(nd4j::LaunchContext* context, NDArray const* images,
Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth,
Nd4jLong channels, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_, NDArray* output), LIBND4J_TYPES);
int resizeBilinearFunctor(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeBilinearFunctor_, (context, images, width, height, center, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBilinearFunctor_, (nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output), LIBND4J_TYPES);
int resizeNeighborFunctor(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeNeighborFunctor_, (context, images, width, height, center, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeNeighborFunctor_, (nd4j::LaunchContext* context, NDArray const* images,
int width, int height, bool center, NDArray* output), LIBND4J_TYPES);
// --------------------------------------------------------------------------------------------------------------- //
// Crop and Resize helper implementation
// --------------------------------------------------------------------------------------------------------------- //
///////
template <typename T, typename Z, typename I>
static __global__ void cropAndResizeKernel(T const *images, Nd4jLong* imagesShape, Z const* boxes, Nd4jLong* boxesShape,
I const* indices, Nd4jLong* indexShape, I const* cropSize, Nd4jLong* cropShape, int method,
double extrapolationVal, Z* output, Nd4jLong* outputShape, int numBoxes, int cropHeight, int cropWidth,
int batchSize, int imageHeight, int imageWidth, int depth) {
for (int b = blockIdx.x; b < numBoxes; b += gridDim.x)
{
Nd4jLong x1Pos[] = {b, 1};
Nd4jLong y1Pos[] = {b, 0};
Nd4jLong y2Pos[] = {b, 2};
Nd4jLong x2Pos[] = {b, 3};
Z y1 = boxes[shape::getOffset(0, shape::shapeOf(boxesShape), shape::stride(boxesShape), y1Pos, 2)];//->t<T>(b, 0)];
Z x1 = boxes[shape::getOffset(0, shape::shapeOf(boxesShape), shape::stride(boxesShape), x1Pos, 2)];
Z y2 = boxes[shape::getOffset(0, shape::shapeOf(boxesShape), shape::stride(boxesShape), y2Pos, 2)];
Z x2 = boxes[shape::getOffset(0, shape::shapeOf(boxesShape), shape::stride(boxesShape), x2Pos, 2)];
int bIn = indices[b];
if (bIn >= batchSize) {
continue;
}
Z heightScale = (cropHeight > 1) ? (y2 - y1) * (imageHeight - 1) / Z(cropHeight - 1) : Z(0);
Z widthScale = (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / Z(cropWidth - 1) : Z(0);
// PRAGMA_OMP_PARALLEL_FOR_SIMD
for (int y = threadIdx.x; y < cropHeight; y += blockDim.x) {
const float inY = (cropHeight > 1)
? y1 * (imageHeight - 1) + y * heightScale
: 0.5 * (y1 + y2) * (imageHeight - 1);
if (inY < 0 || inY > imageHeight - 1) {
for (int x = threadIdx.y; x < cropWidth; x += blockDim.y) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(0, shape::shapeOf(outputShape), shape::stride(outputShape), zPos, 4);
output[zIndex] = (Z)extrapolationVal;
//crops->p(b, y, x, d, extrapolationVal);
}
}
continue;
}
if (method == 0 /* bilinear */) {
const int topYIndex = nd4j::math::p_floor(inY);
const int bottomYIndex = nd4j::math::p_ceil(inY);
const float y_lerp = inY - topYIndex;
for (int x = 0; x < cropWidth; ++x) {
const float in_x = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (in_x < 0 || in_x > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(0, shape::shapeOf(outputShape), shape::stride(outputShape), zPos, 4);
output[zIndex] = (Z)extrapolationVal;
// crops->p(b, y, x, d, extrapolationVal);
}
continue;
}
int left_x_index = math::p_floor(in_x);
int right_x_index = math::p_ceil(in_x);
T x_lerp = in_x - left_x_index;
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong topLeftPos[] = {bIn, topYIndex, left_x_index, d};
Nd4jLong topRightPos[] = {bIn, topYIndex, right_x_index, d};
Nd4jLong bottomLeftPos[] = {bIn, bottomYIndex, left_x_index, d};
Nd4jLong bottomRightPos[] = {bIn, bottomYIndex, right_x_index, d};
const T topLeft(images[shape::getOffset(0, shape::shapeOf(imagesShape), shape::stride(imagesShape), topLeftPos, 4)]); //->e<float>(bIn, topYIndex, left_x_index, d));
const T topRight(images[shape::getOffset(0, shape::shapeOf(imagesShape), shape::stride(imagesShape), topRightPos, 4)]); //->e<float>(bIn, topYIndex, right_x_index, d));
const T bottomLeft(images[shape::getOffset(0, shape::shapeOf(imagesShape), shape::stride(imagesShape), bottomLeftPos, 4)]);//->e<float>(bIn, bottomYIndex, left_x_index, d));
const T bottomRight(images[shape::getOffset(0, shape::shapeOf(imagesShape), shape::stride(imagesShape), bottomRightPos, 4)]); //->e<float>(bIn, bottomYIndex, right_x_index, d));
const T top = topLeft + (topRight - topLeft) * x_lerp;
const T bottom = bottomLeft + (bottomRight - bottomLeft) * x_lerp;
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(0, shape::shapeOf(outputShape), shape::stride(outputShape), zPos, 4);
output[zIndex] = Z(top + (bottom - top) * y_lerp);
// crops->p(b, y, x, d, top + (bottom - top) * y_lerp);
}
}
} else { // method is "nearest neighbor"
for (int x = 0; x < cropWidth; ++x) {
const float inX = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (inX < 0 || inX > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(0, shape::shapeOf(outputShape), shape::stride(outputShape), zPos, 4);
output[zIndex] = (Z)extrapolationVal;
}
continue;
}
const int closestXIndex = roundf(inX);
const int closestYIndex = roundf(inY);
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
Nd4jLong xPos[] = {bIn, closestYIndex, closestXIndex, d};
auto zIndex = shape::getOffset(0, shape::shapeOf(outputShape), shape::stride(outputShape), zPos, 4);
auto xIndex = shape::getOffset(0, shape::shapeOf(imagesShape), shape::stride(imagesShape), xPos, 4);
output[zIndex] = images[xIndex];
// crops->p(b, y, x, d, images->e<T>(bIn, closestYIndex, closestXIndex, d));
}
}
}
}
}
}
template <typename T, typename Z, typename I>
static void cropAndResizeFunctor_(nd4j::LaunchContext* context, NDArray const *images, NDArray const *boxes, NDArray const *indices,
NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
const int batchSize = images->sizeAt(0);
const int imageHeight = images->sizeAt(1);
const int imageWidth = images->sizeAt(2);
const int numBoxes = crops->sizeAt(0);
const int cropHeight = crops->sizeAt(1);
const int cropWidth = crops->sizeAt(2);
const int depth = crops->sizeAt(3);
auto stream = context->getCudaStream();
T const* imagesBuf = reinterpret_cast<T const*>(images->getSpecialBuffer());
Z const* boxesBuf = reinterpret_cast<Z const*>(boxes->getSpecialBuffer());
I const* indexBuf = reinterpret_cast<I const*>(indices->getSpecialBuffer());
I const* cropSizes = reinterpret_cast<I const*>(cropSize->getSpecialBuffer());
Z* outBuf = reinterpret_cast<Z*>(crops->specialBuffer());
cropAndResizeKernel<T,Z,I><<<batchSize, math::nd4j_max(imageHeight * imageWidth, cropHeight * cropWidth), 512, *stream>>>(imagesBuf, images->getSpecialShapeInfo(), boxesBuf, boxes->getSpecialShapeInfo(), indexBuf, indices->getSpecialShapeInfo(),
cropSizes, cropSize->getSpecialShapeInfo(), method, extrapolationVal, outBuf, crops->specialShapeInfo(), numBoxes, cropHeight, cropWidth, batchSize, imageHeight, imageWidth, depth);
}
void cropAndResizeFunctor(nd4j::LaunchContext * context, NDArray const *images, NDArray const *boxes, NDArray const *indices, NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
BUILD_TRIPLE_SELECTOR(images->dataType(), boxes->dataType(), indices->dataType(), cropAndResizeFunctor_,
(context, images, boxes, indices, cropSize, method, extrapolationVal, crops), NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
//
}
BUILD_TRIPLE_TEMPLATE(template void cropAndResizeFunctor_,
(nd4j::LaunchContext * context, NDArray const* images, NDArray const* boxes, NDArray const* indices, NDArray const* cropSize, int method, double extrapolationVal, NDArray* crops),
NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
}
}
} |
9e4ac2cd95548a5bdc65b4df6125b855cab7cfa2.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************
* ECE408 Parallel Programming - Final Project *
* *
* Topic: Terrain Generation *
* Members: Lai,Haoming; Ma,Yunhan; Wang,Bangqi *
* *
************************************************************/
/*
* Terrain Generation:
* Algorithmn: Diamond Square Algorithmn.
* Version:
* 0. Serial version: 1 * square loop + 4 * diamond loop;
* 1. Parallel version: 1 * sdsfsdfsdf + 4 * diamond kernel;
* 2. Less Kernel Version: 1 * square kernal + 1 * simple diamond kernel (1 thread => 4 vertex);
* 3. Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* 4. One Kernel Version: 1 * square_diamond kernel combined; (based on version 2)
* 5. Kernel Device Version: 1 * kernel + 1 * square device + 1 * diamond device;
* 6. Less Threads Version: 1 * kernel + 1 * square device + 1 * diamond device (only active threads we need);
* 7. Less Block Version: 1 * kernel + 1 * square device + 1 * diamond device (only call the size of kernel we need);
* 8. Shared Memory Version: 1 * kernel + 1 * square device + 1 * diamond device (use share memory);
* 9. 2D Simple Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex)(based on version 3);
* 10. 2D Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex)(based on version 6);
* 11. 2D Smarter, Less Threads, less block Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex)(based on version 10);
*
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <time.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <math.h>
/* Set the parameter */
/* Choose the version to use */
//#define VERSION 0
/* Set the length of each edge. please put power of 2 */
#define SIZE 4096
/* Set number of array */
#define N (SIZE+1)*(SIZE+1)
/* Set the roughness for terrain */
#define ROUGHNESS 4
/* Set the height for each corner */
#define CORNER 0
/* main function for different version */
int version_0(void);
int version_1(int block_size);
int version_2(int block_size);
int version_3(bool print, int block_size);
int version_4(bool print, int block_size);
int version_5(bool print, int block_size);
int version_6(bool print, int block_size);
int version_7(bool print, int block_size);
int version_8(void);
int version_9(bool print, int block_size);
int version_10(bool print, int block_size);
int version_11(bool print, int block_size);
int version_12(bool print, int block_size);
int version_100(bool print);
/* main function */
int main(void){
int VERSION;
int p;
int block_size;
int cont =1;
while (cont == 1){
bool print = false;
printf("what version do you want: ");
scanf("%d", &VERSION);
printf("print? (0/1): ");
scanf("%d", &p);
printf("please define block_size(max = 32): ");
scanf("%d", &block_size);
if (p)
print = true;
switch (VERSION){
/* test version 0 */
case 0:
version_0();
break;
case 1:
/* test version 1 */
version_1(block_size);
break;
case 2:
/* test version 2 */
version_2(block_size);
break;
case 3:
/* test version 3 */
version_3(print, block_size);
break;
case 4:
/* test version 4 */
version_4(print, block_size);
break;
case 5:
/* test version 5 */
version_5(print, block_size);
break;
case 6:
/* test version 6 */
version_6(print, block_size);
break;
case 7:
/* test version 7 */
version_7(print, block_size);
break;
case 8:
/* test version 8 */
version_8();
break;
case 9:
/* test version 9 */
version_9(print, block_size);
break;
case 10:
/* test version 10 */
version_10(print, block_size);
break;
case 11:
/* test version 10 */
version_11(print, block_size);
break;
case 12:
/* test version 10 */
version_12(print, block_size);
break;
case 100:
/* test version 10 */
version_100(print);
break;
default:
/* test version 0 */
version_0();
return 0;
}
printf("done. Continue(1=continue, other#=exit)?");
scanf("%d", &cont);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 0.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 0.0:
* 0.0 Serial version: 1 * square loop + 4 * loop;
*
*/
/* host code for serial version */
int version_0(void) {
clock_t start, end;
double runTime;
float **heightMap = new float*[SIZE + 1];
for (int i = 0; i<SIZE + 1; i++)
heightMap[i] = new float[SIZE + 1];
for (int i = 0; i<SIZE + 1; i++){
for (int j = 0; j<SIZE + 1; j++){
heightMap[i][j] = 0.0;
}
}
//initial the first four points
heightMap[0][0] = 0;
heightMap[SIZE][0] = 0;
heightMap[0][SIZE] = 0;
heightMap[SIZE][SIZE] = 0;
srand(time(NULL));
start = clock();
int stride = SIZE;
while (stride >= 2){
for (int i = 0; i<(SIZE / stride); i++){
for (int j = 0; j<(SIZE / stride); j++){
int leftbottom_x = i* stride;
int leftbottom_y = j* stride;
float average = heightMap[leftbottom_x][leftbottom_y] + heightMap[leftbottom_x + stride][leftbottom_y] + heightMap[leftbottom_x][leftbottom_y + stride] + heightMap[leftbottom_x + stride][leftbottom_y + stride];
average = average / 4;
float guess = ((float)((rand() % 200) - 100) / 50);
// printf("%1.5f\n", guess);
heightMap[leftbottom_x + stride / 2][leftbottom_y + stride / 2] = average + ((float)stride/128)*guess;
heightMap[leftbottom_x + stride / 2][leftbottom_y] = (average + heightMap[leftbottom_x][leftbottom_y] + heightMap[leftbottom_x + stride][leftbottom_y]) / 3 + ((float)stride / 128)*guess;
heightMap[leftbottom_x][leftbottom_y + stride / 2] = (average + heightMap[leftbottom_x][leftbottom_y] + heightMap[leftbottom_x][leftbottom_y + stride]) / 3 + ((float)stride / 128)*guess;
heightMap[leftbottom_x + stride][leftbottom_y + stride / 2] = (average + heightMap[leftbottom_x + stride][leftbottom_y] + heightMap[leftbottom_x + stride][leftbottom_y + stride]) / 3 + ((float)stride / 128)*guess;
heightMap[leftbottom_x + stride / 2][leftbottom_y + stride] = (average + heightMap[leftbottom_x][leftbottom_y + stride] + heightMap[leftbottom_x + stride][leftbottom_y + stride]) / 3 + ((float)stride / 128)*guess;
}
}
printf("%d \n", stride);
stride = stride / 2;
}
end = clock();
runTime = (double)(end - start) / CLOCKS_PER_SEC;
// for (int i=0; i<=SIZE; i++){
// for(int j=0; j<=SIZE; j++){
// printf("%d: x = %d, y = %d; hm = %f\n", i*j, i, j, heightMap[i][j]);
// }
// }
FILE *fp_out;
float index_x;
float index_y;
fp_out = fopen("vertex.txt", "w");
for (int j = 0; j<SIZE + 1; j++){
for (int i = 0; i<SIZE + 1; i++){
index_x = (float)i / (SIZE / 2) - 1;
index_y = (float)j / (SIZE / 2) - 1;
fprintf(fp_out, "%f %f %f\n", index_x, index_y, heightMap[i][j]);
}
}
fclose(fp_out);
printf("Run time for Version_0: %f\n", runTime);
printf("Version 0\n");
for (int i = SIZE; i >= 0; i--)
delete[] heightMap[i];
delete[] heightMap;
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 1.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 1.0:
* 1.0 Parallel version: 1 * square kernel + 4 * diamond kernel;
* This parallel function parallelize the serial code directly. it change the one square loop to
* one square kernel and change four diamond loop to four different diamond kernel. 1
*/
/* setup random number*/
__global__ void setseed(hiprandState_t * state, unsigned long seed)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(seed, id, 0, &state[id]);
}
__global__ void generate(float* random, hiprandState_t* globalState, int n)
{
// generate random numbers
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<n)
{
hiprandState_t localState = globalState[idx];
float RANDOM = hiprand_uniform(&localState);
globalState[idx] = localState;
random[idx] = RANDOM;
}
}
/* square kernel to calculate the middle point */
__global__ void Square_1(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t localState = rng[idx];
float rand = v1 + (v2 - v1) * hiprand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + rand;
__syncthreads();
}
}
/* diamond kernel 1_1 to calcualte middle bottom point */
__global__ void Diamond_1_1(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, mi, j;
int pmi_b, pmj_b;
float hm_b;
int num_b;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
mi = i + half;
/* find 4 diamond vertex */
/* bottom vertex */
pmi_b = mi;
pmj_b = j;
/* set the value */
/* bottom height */
hm_b = 0;
num_b = 3;
if (pmj_b - half >= 0){
hm_b += hm[pmi_b + (pmj_b - half)*(SIZE + 1)];
num_b = 4;
}
hm_b += hm[pmi_b + (pmj_b + half)*(SIZE + 1)];
hm_b += hm[(pmi_b - half) + pmj_b*(SIZE + 1)];
hm_b += hm[(pmi_b + half) + pmj_b*(SIZE + 1)];
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t localState = rng[idx];
float rand1 = v1 + (v2 - v1) * hiprand_uniform(&localState);
/* set height map */
hm[pmi_b + pmj_b*(SIZE + 1)] = hm_b / num_b + rand1;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* diamond kernel 1_2 to calcualte left point */
__global__ void Diamond_1_2(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j, mj;
int pmi_l, pmj_l;
float hm_l;
int num_l;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
mj = j + half;
/* find 4 diamond vertex */
/* left vertex */
pmi_l = i;
pmj_l = mj;
/* set the value */
/* left height */
hm_l = 0;
num_l = 3;
if (pmi_l - half >= 0){
hm_l += hm[(pmi_l - half) + pmj_l*(SIZE + 1)];
num_l = 4;
}
hm_l += hm[(pmi_l + half) + pmj_l*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l - half)*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l + half)*(SIZE + 1)];
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t localState = rng[idx];
float rand2 = v1 + (v2 - v1) * hiprand_uniform(&localState);
/* set height map */
hm[pmi_l + pmj_l*(SIZE + 1)] = hm_l / num_l + rand2;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* diamond kernel 1_3 to calcualte right point */
__global__ void Diamond_1_3(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, mj;
int pmi_r, pmj_r;
float hm_r;
int num_r;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
mj = j + half;
/* find 4 diamond vertex */
/* right vertex */
pmi_r = ni;
pmj_r = mj;
/* set the value */
/* right height */
hm_r = 0;
num_r = 3;
if (pmi_r + half <= SIZE){
hm_r += hm[(pmi_r + half) + pmj_r*(SIZE + 1)];
num_r = 4;
}
hm_r += hm[(pmi_r - half) + pmj_r*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r - half)*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r + half)*(SIZE + 1)];
/* get height for */
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t localState = rng[idx];
float rand3 = v1 + (v2 - v1) * hiprand_uniform(&localState);
/* set height map */
hm[pmi_r + pmj_r*(SIZE + 1)] = hm_r / num_r + rand3;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* diamond kernel 1_4 to calcualte middle top point */
__global__ void Diamond_1_4(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j, mi, nj;
int pmi_t, pmj_t;
float hm_t;
int num_t;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
nj = j + rect;
mi = i + half;
/* find 4 diamond vertex */
/* top vertex */
pmi_t = mi;
pmj_t = nj;
/* set the value */
/* top height */
hm_t = 0;
num_t = 3;
if (pmj_t + half <= SIZE){
hm_t += hm[pmi_t + (pmj_t + half)*(SIZE + 1)];
num_t = 4;
}
hm_t += hm[pmi_t + (pmj_t - half)*(SIZE + 1)];
hm_t += hm[(pmi_t - half) + pmj_t*(SIZE + 1)];
hm_t += hm[(pmi_t + half) + pmj_t*(SIZE + 1)];
/* get height for */
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t localState = rng[idx];
float rand4 = v1 + (v2 - v1) * hiprand_uniform(&localState);
/* set height map */
hm[pmi_t + pmj_t*(SIZE + 1)] = hm_t / num_t + rand4;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* host code for version 1.0 */
int version_1(int block_size) {
printf("Version 1: square kernel + 4 diamond kernel\n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
hiprandState_t* rng;
/* allocate memory for device */
hipMalloc(&rng, N * sizeof(hiprandState_t));
hipMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
hipMemcpy(dev_heightMap, heightMap, N * sizeof(float), hipMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i = SIZE; i>1; i = i / 2){
Square_1 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
Diamond_1_1 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
Diamond_1_2 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
Diamond_1_3 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
Diamond_1_4 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
hipMemcpy(heightMap, dev_heightMap, N * sizeof(float), hipMemcpyDeviceToHost);
/* print the output */
// for (int i = 0; i<N; i++){
// printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
// }
// printf("%f\n", cpu_time_used);
hipFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_1: %f\n", runTime);
/* FILE *fp_out;
fp_out = fopen("vertex.txt", "w");
for (int i = 0; i<N; i++)
fprintf(fp_out, "%d %d %f\n", i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
fclose(fp_out);
*/
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 2.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 2.0:
* 2.0 Less Kernel Version: 1 * square kernal + 1 * simple diamond kernel (1 thread => 4 vertex);
* This kernel combine the four diamond kernel to one single kernel. However, each thread in diamond
* kernel needs to calculate four vertex.
*/
/* combined diamond kernel to calculate the four point in each thread */
__global__ void Diamond_2(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
int pmi_b, pmj_b, pmi_l, pmj_l, pmi_r, pmj_r, pmi_t, pmj_t;
float hm_b, hm_l, hm_r, hm_t;
int num_b, num_l, num_r, num_t;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* find 4 diamond vertex */
/* bottom vertex */
pmi_b = mi;
pmj_b = j;
/* left vertex */
pmi_l = i;
pmj_l = mj;
/* right vertex */
pmi_r = ni;
pmj_r = mj;
/* top vertex */
pmi_t = mi;
pmj_t = nj;
/* set the value */
/* bottom height */
hm_b = 0;
num_b = 3;
if (pmj_b - half >= 0){
hm_b += hm[pmi_b + (pmj_b - half)*(SIZE + 1)];
num_b = 4;
}
hm_b += hm[pmi_b + (pmj_b + half)*(SIZE + 1)];
hm_b += hm[(pmi_b - half) + pmj_b*(SIZE + 1)];
hm_b += hm[(pmi_b + half) + pmj_b*(SIZE + 1)];
/* left height */
hm_l = 0;
num_l = 3;
if (pmi_l - half >= 0){
hm_l += hm[(pmi_l - half) + pmj_l*(SIZE + 1)];
num_l = 4;
}
hm_l += hm[(pmi_l + half) + pmj_l*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l - half)*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l + half)*(SIZE + 1)];
/* right height */
hm_r = 0;
num_r = 3;
if (pmi_r + half <= SIZE){
hm_r += hm[(pmi_r + half) + pmj_r*(SIZE + 1)];
num_r = 4;
}
hm_r += hm[(pmi_r - half) + pmj_r*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r - half)*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r + half)*(SIZE + 1)];
/* top height */
hm_t = 0;
num_t = 3;
if (pmj_t + half <= SIZE){
hm_t += hm[pmi_t + (pmj_t + half)*(SIZE + 1)];
num_t = 4;
}
hm_t += hm[pmi_t + (pmj_t - half)*(SIZE + 1)];
hm_t += hm[(pmi_t - half) + pmj_t*(SIZE + 1)];
hm_t += hm[(pmi_t + half) + pmj_t*(SIZE + 1)];
/* get height for */
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t localState = rng[idx];
float rand1 = v1 + (v2 - v1) * hiprand_uniform(&localState);
float rand2 = v1 + (v2 - v1) * hiprand_uniform(&localState);
float rand3 = v1 + (v2 - v1) * hiprand_uniform(&localState);
float rand4 = v1 + (v2 - v1) * hiprand_uniform(&localState);
/* set height map */
hm[pmi_b + pmj_b*(SIZE + 1)] = hm_b / num_b + rand1;
hm[pmi_l + pmj_l*(SIZE + 1)] = hm_l / num_l + rand2;
hm[pmi_r + pmj_r*(SIZE + 1)] = hm_r / num_r + rand3;
hm[pmi_t + pmj_t*(SIZE + 1)] = hm_t / num_t + rand4;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* the host code for version 2: 1 square kernel + 1 stupid diamond kernel */
int version_2(int block_size) {
printf("Version 2: square kernel + stupid diamond kernel\n");
/* initialize variables */
float *heightMap= new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
hiprandState_t* rng;
/* allocate memory for device */
hipMalloc(&rng, N * sizeof(hiprandState_t));
hipMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
hipMemcpy(dev_heightMap, heightMap, N * sizeof(float), hipMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i = SIZE; i>1; i = i / 2){
Square_1 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
Diamond_2 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
hipMemcpy(heightMap, dev_heightMap, N * sizeof(float), hipMemcpyDeviceToHost);
/* print the output */
// for (int i = 0; i<N; i++){
// printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
// }
// printf("%f\n", cpu_time_used);
hipFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_2: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 3.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 3.0:
* 3.0 Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* This version reconstruct the diamond kernel to use different threads for different vertx. Each
* thread in diamond kernel only need to calculate one vertex.
*/
/* smart diamond kernel calculate the diamond vertex and each thread only calculate one vertex */
__global__ void Diamond_3(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow*squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj - half)*(SIZE + 1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj + half)*(SIZE + 1)];
num_p++;
}
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t localState = rng[idx];
float rand = v1 + (v2 - v1) * hiprand_uniform(&localState);
/* get height for */
hm[pmi + pmj*(SIZE + 1)] = hm_p / num_p + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* the host code for version 3: 1 square kernel + 1 smart diamond kernel */
int version_3(bool print, int block_size) {
printf("Version 3: square kernel + smart diamond kernel\n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
int size = block_size * block_size;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
hiprandState_t* rng;
/* allocate memory for device */
hipMalloc(&rng, N * sizeof(hiprandState_t));
hipMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
hipMemcpy(dev_heightMap, heightMap, N * sizeof(float), hipMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i = SIZE; i>1; i = i / 2){
Square_1 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
Diamond_3 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
hipMemcpy(heightMap, dev_heightMap, N * sizeof(float), hipMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
}
// printf("\n");
// for (int i=0; i<N; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
hipFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_3: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 4.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 4.0:
* 4.0 Less Kernel Version: 1 * square kernal + 1 * simple diamond kernel (1 thread => 4 vertex);
* This kernel combine the four diamond kernel to one single kernel. However, each thread in diamond
* kernel needs to calculate four vertex.
*/
/* combined diamond kernel to calculate the four point in each thread */
__global__ void Square_Diamond_4(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
int pmi_b, pmj_b, pmi_l, pmj_l, pmi_r, pmj_r, pmi_t, pmj_t;
float hm_b, hm_l, hm_r, hm_t;
int num_b, num_l, num_r, num_t;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t localState = rng[idx];
float rand = v1 + (v2 - v1) * hiprand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + rand;
__syncthreads();
/* find 4 diamond vertex */
/* bottom vertex */
pmi_b = mi;
pmj_b = j;
/* left vertex */
pmi_l = i;
pmj_l = mj;
/* right vertex */
pmi_r = ni;
pmj_r = mj;
/* top vertex */
pmi_t = mi;
pmj_t = nj;
/* set the value */
/* bottom height */
hm_b = 0;
num_b = 3;
if (pmj_b - half >= 0){
hm_b += hm[pmi_b + (pmj_b - half)*(SIZE + 1)];
num_b = 4;
}
hm_b += hm[pmi_b + (pmj_b + half)*(SIZE + 1)];
hm_b += hm[(pmi_b - half) + pmj_b*(SIZE + 1)];
hm_b += hm[(pmi_b + half) + pmj_b*(SIZE + 1)];
/* left height */
hm_l = 0;
num_l = 3;
if (pmi_l - half >= 0){
hm_l += hm[(pmi_l - half) + pmj_l*(SIZE + 1)];
num_l = 4;
}
hm_l += hm[(pmi_l + half) + pmj_l*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l - half)*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l + half)*(SIZE + 1)];
/* right height */
hm_r = 0;
num_r = 3;
if (pmi_r + half <= SIZE){
hm_r += hm[(pmi_r + half) + pmj_r*(SIZE + 1)];
num_r = 4;
}
hm_r += hm[(pmi_r - half) + pmj_r*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r - half)*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r + half)*(SIZE + 1)];
/* top height */
hm_t = 0;
num_t = 3;
if (pmj_t + half <= SIZE){
hm_t += hm[pmi_t + (pmj_t + half)*(SIZE + 1)];
num_t = 4;
}
hm_t += hm[pmi_t + (pmj_t - half)*(SIZE + 1)];
hm_t += hm[(pmi_t - half) + pmj_t*(SIZE + 1)];
hm_t += hm[(pmi_t + half) + pmj_t*(SIZE + 1)];
/* get height for */
/* set random generator */
float rand1 = v1 + (v2 - v1) * hiprand_uniform(&localState);
float rand2 = v1 + (v2 - v1) * hiprand_uniform(&localState);
float rand3 = v1 + (v2 - v1) * hiprand_uniform(&localState);
float rand4 = v1 + (v2 - v1) * hiprand_uniform(&localState);
/* set height map */
hm[pmi_b + pmj_b*(SIZE + 1)] = hm_b / num_b + rand1;
hm[pmi_l + pmj_l*(SIZE + 1)] = hm_l / num_l + rand2;
hm[pmi_r + pmj_r*(SIZE + 1)] = hm_r / num_r + rand3;
hm[pmi_t + pmj_t*(SIZE + 1)] = hm_t / num_t + rand4;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* the host code for version 2: 1 square kernel + 1 stupid diamond kernel */
int version_4(bool print, int block_size) {
printf("Version 4: Less Kernel Version: 1 * square kernal + 1 * simple diamond kernel (1 thread => 4 vertex)\n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
hiprandState_t* rng;
/* allocate memory for device */
hipMalloc(&rng, N * sizeof(hiprandState_t));
hipMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
hipMemcpy(dev_heightMap, heightMap, N * sizeof(float), hipMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i = SIZE; i>1; i = i / 2){
Square_Diamond_4 << <ceil((float)N / 256), 256 >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
hipMemcpy(heightMap, dev_heightMap, N * sizeof(float), hipMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
}
// printf("%f\n", cpu_time_used);
hipFree(dev_heightMap);
runTime = (float)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_4: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 5.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 5.0:
* Version 5.0: Kernel Device Version: 1 * kernel + 1 * square device + 1 * diamond device;
* This version calls two device functions and calls one kernel which loop these two steops
* within the kernel.
*/
/* smart diamond kernel calculate the diamond vertex and each thread only calculate one vertex */
__device__ void Square_5(hiprandState_t* rng, float* hm, int rect)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int squareInRow = SIZE / rect;
if (idx < squareInRow * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t localState = rng[idx];
float rand = v1 + (v2 - v1) * hiprand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + rand;
__syncthreads();
}
}
__device__ void Diamond_5(hiprandState_t* rng, float* hm, int rect)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int squareInRow = SIZE / rect;
if (idx < 4 * squareInRow * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow*squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj - half)*(SIZE + 1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj + half)*(SIZE + 1)];
num_p++;
}
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t localState = rng[idx];
float rand = v1 + (v2 - v1) * hiprand_uniform(&localState);
/* get height for */
hm[pmi + pmj*(SIZE + 1)] = hm_p / num_p + rand;
rng[idx] = localState;
__syncthreads();
}
}
__global__ void Square_Diamond_5(hiprandState_t* rng, float* hm)
{
for (int i = SIZE; i > 1; i = i / 2)
{
__syncthreads();
Square_5(rng, hm, i);
Diamond_5(rng, hm, i);
}
}
int version_5(bool print, int block_size) {
printf("Version 5.0: Kernel Device Version: 1 * kernel + 1 * square device + 1 * diamond device; \n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
hiprandState_t* rng;
/* allocate memory for device */
hipMalloc(&rng, N * sizeof(hiprandState_t));
hipMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
hipMemcpy(dev_heightMap, heightMap, N * sizeof(float), hipMemcpyHostToDevice);
start = clock();
/* run kernel */
Square_Diamond_5 << <ceil((float)N / 256), 256 >> >(rng, (float*)dev_heightMap);
hipDeviceSynchronize();
end = clock();
/* memory copy from device to host*/
hipMemcpy(heightMap, dev_heightMap, N * sizeof(float), hipMemcpyDeviceToHost);
// printf("\n");
// for (int i=0; i<N; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
}
// }
// printf("%f\n", cpu_time_used);
hipFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_5: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 6.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 6.0:
* 6. Less Threads Version: 1 * kernel + 1 * square device + 1 * diamond device (only active threads we need);
* This kernel combine the four diamond kernel to one single kernel. However, each thread in diamond
* kernel needs to calculate four vertex.
*/
/* square kernel to calculate the middle point */
__global__ void Square_6(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int squareInRow = SIZE / rect;
if (idx < squareInRow * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set check value */
// check1[idx] = mi;
// check2[idx] = mj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t state = rng[idx];
float random = v1 + (v2 - v1) * (float)hiprand_uniform(&state);
rng[idx] = state;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
/* smart diamond kernel calculate the diamond vertex and each thread only calculate one vertex */
__global__ void Diamond_6(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int squareInRow = SIZE / rect;
if (idx < 4 * squareInRow * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow*squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj - half)*(SIZE + 1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj + half)*(SIZE + 1)];
num_p++;
}
/* set check value */
// check1[idx] = pmi;
// check2[idx] = pmj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t state = rng[idx];
float random = v1 + (v2 - v1) * (float)hiprand_uniform(&state);
rng[idx] = state;
/* get height for */
hm[pmi + pmj*(SIZE + 1)] = hm_p / num_p + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
/* the host code for version 3: 1 square kernel + 1 smart diamond kernel */
int version_6(bool print, int block_size) {
printf("Version 6: square kernel + smart diamond kernel (only active needed threads) \n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
// float *dev_check1;
// float *dev_check2;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
hiprandState_t* rng;
/* allocate memory for device */
hipMalloc(&rng, N * sizeof(hiprandState_t));
hipMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
hipMemcpy(dev_heightMap, heightMap, N * sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(dev_check1, check1, N * sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(dev_check2, check2, N * sizeof(float), hipMemcpyHostToDevice);
/*
setseed << < ceil((float)N / 256), 256 >> > (rng, unsigned(time(NULL)));
float* rand;
hipMalloc((void**)&rand, N*sizeof(float));
generate << <ceil((float)N / 256), 256 >> > (rand, rng, N);
*/
start = clock();
/* run kernel */
for (int i = SIZE; i>1; i = i / 2){
Square_6 << <ceil((float)N / (block_size*block_size)), block_size*block_size >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
Diamond_6 << <ceil((float)N / (block_size*block_size)), block_size*block_size >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
hipMemcpy(heightMap, dev_heightMap, N * sizeof(float), hipMemcpyDeviceToHost);
// hipMemcpy(check1, dev_check1, N * sizeof(float), hipMemcpyDeviceToHost);
// hipMemcpy(check2, dev_check2, N * sizeof(float), hipMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
FILE *fp_out;
float index_x;
float index_y;
fp_out = fopen("vertex.txt", "w");
for (int j = 0; j<SIZE + 1; j++){
for (int i = 0; i<SIZE + 1; i++){
index_x = (float)i / (SIZE / 2) - 1;
index_y = (float)j / (SIZE / 2) - 1;
fprintf(fp_out, "%f %f %f\n", index_x, index_y, heightMap[i + j*(SIZE + 1)]);
}
}
fclose(fp_out);
}
// printf("\n");
// for (int i=0; i<N; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
hipFree(dev_heightMap);
// hipFree(dev_check1);
// hipFree(dev_check2);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_6: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 7.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 7.0:
* 7. Less Threads Version: 1 * kernel + 1 * square device + 1 * diamond device (only active threads and kernel we need);
* This kernel combine the four diamond kernel to one single kernel. However, each thread in diamond
* kernel needs to calculate four vertex.
*/
/* square kernel to calculate the middle point */
__global__ void Square_7(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int squareInRow = SIZE / rect;
if (idx < squareInRow * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t state = rng[idx];
float random = v1 + (v2 - v1) * (float)hiprand_uniform(&state);
rng[idx] = state;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
/* smart diamond kernel calculate the diamond vertex and each thread only calculate one vertex */
__global__ void Diamond_7(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int squareInRow = SIZE / rect;
if (idx < 4 * squareInRow * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow*squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj - half)*(SIZE + 1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj + half)*(SIZE + 1)];
num_p++;
}
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t state = rng[idx];
float random = v1 + (v2 - v1) * (float)hiprand_uniform(&state);
rng[idx] = state;
/* get height for */
hm[pmi + pmj*(SIZE + 1)] = hm_p / num_p + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
/* the host code for version 3: 1 square kernel + 1 smart diamond kernel */
int version_7(bool print, int block_size) {
printf("Version 7: square kernel + smart diamond kernel (only active needed threads&kernel) \n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
hiprandState_t* rng;
/* allocate memory for device */
hipMalloc(&rng, N * sizeof(hiprandState_t));
hipMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
hipMemcpy(dev_heightMap, heightMap, N * sizeof(float), hipMemcpyHostToDevice);
/*set up random number*/
/*
setseed << < ceil((float)N / 256), 256 >> > (rng, unsigned(time(NULL)));
float* rand;
hipMalloc((void**)&rand, N*sizeof(float));
generate << <ceil((float)N / 256), 256 >> > (rand, rng, N);
*/
start = clock();
/* run kernel */
for (int i = SIZE; i>1; i = i / 2){
int squareInRow = SIZE / i;
int size_need = squareInRow * squareInRow;
int size_need2 = 4 * size_need;
Square_7 << <ceil((float)size_need / 32), 32 >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
Diamond_7 << <ceil((float)size_need2 / 32), 32 >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
hipMemcpy(heightMap, dev_heightMap, N * sizeof(float), hipMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
FILE *fp_out;
float index_x;
float index_y;
fp_out = fopen("vertex.txt", "w");
for (int j = 0; j<SIZE + 1; j++){
for (int i = 0; i<SIZE + 1; i++){
index_x = (float)i / (SIZE / 2) - 1;
index_y = (float)j / (SIZE / 2) - 1;
fprintf(fp_out, "%f %f %f\n", index_x, index_y, heightMap[i + j*(SIZE + 1)]);
}
}
fclose(fp_out);
}
// printf("\n");
// for (int i=0; i<N; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
hipFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_7: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
int version_8(void) {
printf("8\n");
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 9.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 9.0:
* 9.0 Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* This version reconstruct the diamond kernel to use different threads for different vertx. Each
* thread in diamond kernel only need to calculate one vertex. (A simple revised 2D version of version 3)
*/
__global__ void Square_9(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx_temp = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_temp < SIZE + 1 && idy < SIZE + 1){
int idx = idy*(SIZE + 1) + idx_temp;
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t state = rng[idx];
float random = v1 + (v2 - v1) * (float)hiprand_uniform(&state);
rng[idx] = state;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
__global__ void Diamond_9(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx_temp = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_temp < SIZE + 1 && idy < SIZE + 1){
int idx = idy*(SIZE + 1) + idx_temp;
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow*squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj - half)*(SIZE + 1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj + half)*(SIZE + 1)];
num_p++;
}
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t state = rng[idx];
float random = v1 + (v2 - v1) * (float)hiprand_uniform(&state);
rng[idx] = state;
/* get height for */
hm[pmi + pmj*(SIZE + 1)] = hm_p / num_p + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
/* the host code for version 8: 2D + 1 square kernel + 1 smart diamond kernel. */
int version_9(bool print, int block_size) {
printf("Version 9: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex)\n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = 1;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = 2;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = 3;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = 4;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
hiprandState_t* rng;
/* allocate memory for device */
hipMalloc(&rng, N * sizeof(hiprandState_t));
hipMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
hipMemcpy(dev_heightMap, heightMap, N * sizeof(float), hipMemcpyHostToDevice);
/*set up random number*/
/*
setseed << < ceil((float)N / (block_size*block_size)), (block_size*block_size) >> > (rng, unsigned(time(NULL)));
float* rand;
hipMalloc((void**)&rand, N*sizeof(float));
generate << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> > (rand, rng, N);
*/
start = clock();
/* run kernel */
dim3 DimGrid(ceil(((float)SIZE) / block_size), ceil(((float)SIZE) / block_size), 1);
dim3 DimBlock(block_size, block_size, 1);
for (int i = SIZE; i>1; i = i / 2){
Square_9 << <DimGrid, DimBlock >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
Diamond_9 << <DimGrid, DimBlock >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
hipMemcpy(heightMap, dev_heightMap, N * sizeof(float), hipMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
FILE *fp_out;
float index_x;
float index_y;
fp_out = fopen("vertex.txt", "w");
for (int j = 0; j<SIZE + 1; j++){
for (int i = 0; i<SIZE + 1; i++){
index_x = (float)i / (SIZE / 2) - 1;
index_y = (float)j / (SIZE / 2) - 1;
fprintf(fp_out, "%f %f %f\n", index_x, index_y, heightMap[i + j*(SIZE + 1)]);
}
}
fclose(fp_out);
}
// printf("\n");
// for (int i=0; i<SIZE+1; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
hipFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_9: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 10.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 10.0:
* 10.0 Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* This version reconstruct the diamond kernel to use different threads for different vertx. Each
* thread in diamond kernel only need to calculate one vertex. (A smart revised 2D version of version 3)
*/
__global__ void Square_10(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int squareInRow = SIZE / rect;
if (idx < squareInRow && idy < squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
/* calculate vertex */
i = idx;
j = idy;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t state = rng[idx];
float random = v1 + (v2 - v1) * (float)hiprand_uniform(&state);
rng[idx] = state;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
__global__ void Diamond_10(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int squareInRow = SIZE / rect;
if (idx < 2*squareInRow && idy < 2*squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
/* calculate vertex */
i = idx;
j = idy;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj - half)*(SIZE + 1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj + half)*(SIZE + 1)];
num_p++;
}
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t state = rng[idx];
float random = v1 + (v2 - v1) * (float)hiprand_uniform(&state);
rng[idx] = state;
/* get height for */
hm[pmi + pmj*(SIZE + 1)] = hm_p / num_p + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
/* the host code for version 10: 2D(more smart) + 1 square kernel + 1 smart diamond kernel. */
int version_10(bool print, int block_size) {
printf("Version 10: square kernel + smart diamond kernel\n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = 1;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = 2;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = 3;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = 4;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
hiprandState_t* rng;
/* allocate memory for device */
hipMalloc(&rng, N * sizeof(hiprandState_t));
hipMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
hipMemcpy(dev_heightMap, heightMap, N * sizeof(float), hipMemcpyHostToDevice);
/*set up random number*/
/*
setseed << < ceil((float)N / (block_size*block_size)), (block_size*block_size) >> > (rng, unsigned(time(NULL)));
float* rand;
hipMalloc((void**)&rand, N*sizeof(float));
generate << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> > (rand, rng, N);
*/
/* run kernel */
dim3 DimGrid(ceil(((float)SIZE) / block_size), ceil(((float)SIZE) / block_size), 1);
dim3 DimBlock(block_size, block_size, 1);
start = clock();
for (int i = SIZE; i>1; i = i / 2){
Square_10 << <DimGrid, DimBlock >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
Diamond_10 << <DimGrid, DimBlock >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
hipMemcpy(heightMap, dev_heightMap, N * sizeof(float), hipMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
FILE *fp_out;
float index_x;
float index_y;
fp_out = fopen("vertex.txt", "w");
for (int j = 0; j<SIZE + 1; j++){
for (int i = 0; i<SIZE + 1; i++){
index_x = (float)i / (SIZE / 2) - 1;
index_y = (float)j / (SIZE / 2) - 1;
fprintf(fp_out, "%f %f %f\n", index_x, index_y, heightMap[i + j*(SIZE + 1)]);
}
}
fclose(fp_out);
}
// printf("\n");
// for (int i=0; i<SIZE+1; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
hipFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_10: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 11.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 11.0:
* 11.0 Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* This version reconstruct the diamond kernel to use different threads for different vertx. Each
* thread in diamond kernel only need to calculate one vertex. (A smart revised 2D version of version 6)
*/
__global__ void Square_11(hiprandState_t* rng, float** hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int squareInRow = SIZE / rect;
if (idx < squareInRow && idy < squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
/* calculate vertex */
i = idx;
j = idy;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t localState = rng[idx];
float rand = v1 + (v2 - v1) * hiprand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi][mj] = (hm[i ][j] + hm[ni][j] + hm[i][nj] + hm[ni][nj]) / 4 + rand;
__syncthreads();
}
}
__global__ void Diamond_11(hiprandState_t* rng, float** hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int squareInRow = SIZE / rect;
if (idx < 2 * squareInRow && idy < 2 * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = idx;
j = idy;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half)][pmj];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half)][pmj];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi] [(pmj - half)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi][(pmj + half)];
num_p++;
}
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t localState = rng[idx];
float rand = v1 + (v2 - v1) * hiprand_uniform(&localState);
/* get height for */
hm[pmi][pmj] = hm_p / num_p + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* the host code for version 1: 2D(more smart) + 1 square kernel + 1 smart diamond kernel. */
int version_11(bool print, int block_size) {
printf("Version 11: square kernel + smart diamond kernel\n");
/* initialize variables */
float **heightMap = new float*[SIZE + 1];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<SIZE + 1; i++){
heightMap[i] = new float[SIZE + 1];
for (int j = 0; j<SIZE + 1; j++){
heightMap[i][j] = 0.0;
}
}
/* set height for corner */
heightMap[0][0] = 1;
printf("heightMap_corner0: %f\n", heightMap[0][0]);
heightMap[SIZE][0] = 2;
printf("heightMap_corner1: %f\n", heightMap[SIZE][0]);
heightMap[0][SIZE] = 3;
printf("heightMap_corner3: %f\n", heightMap[0][SIZE]);
heightMap[SIZE][SIZE] = 4;
printf("heightMap_corner2: %f\n", heightMap[SIZE][SIZE]);
hiprandState_t* rng;
/* allocate memory for device */
hipMalloc(&rng, N * sizeof(hiprandState_t));
hipMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
hipMemcpy(dev_heightMap, heightMap, N * sizeof(float), hipMemcpyHostToDevice);
start = clock();
/* run kernel */
dim3 DimGrid(ceil(((float)SIZE) / block_size), ceil(((float)SIZE) / block_size), 1);
dim3 DimBlock(block_size, block_size, 1);
for (int i = SIZE; i>1; i = i / 2){
Square_11 << <DimGrid, DimBlock >> >(rng, (float**)dev_heightMap, i);
hipDeviceSynchronize();
Diamond_11 << <DimGrid, DimBlock >> >(rng, (float**)dev_heightMap, i);
hipDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
hipMemcpy(heightMap, dev_heightMap, N * sizeof(float), hipMemcpyDeviceToHost);
/* print the output */
// if (print){
// for (int i = 0; i<N; i++){
// printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
// }
// }
// printf("\n");
// for (int i=0; i<SIZE+1; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
hipFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_11: %0.20f\n", runTime);
for (int i = 0; i<SIZE + 1; i++)
delete[] heightMap[i];
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 12.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 12.0:
* 12.0 Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* This version reconstruct the diamond kernel to use different threads for different vertx. Each
* thread in diamond kernel only need to calculate one vertex. (A smart revised 2D version of version 3)
*/
__global__ void Square_12(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int squareInRow = SIZE / rect;
if (idx < squareInRow && idy < squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
/* calculate vertex */
i = idx;
j = idy;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t state = rng[idx];
float random = v1 + (v2 - v1) * (float)hiprand_uniform(&state);
rng[idx] = state;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
__global__ void Diamond_12(hiprandState_t* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int squareInRow = SIZE / rect;
if (idx < 2 * squareInRow && idy < 2 * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
/* calculate vertex */
i = idx;
j = idy;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj - half)*(SIZE + 1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj + half)*(SIZE + 1)];
num_p++;
}
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t state = rng[idx];
float random = v1 + (v2 - v1) * (float)hiprand_uniform(&state);
rng[idx] = state;
/* get height for */
hm[pmi + pmj*(SIZE + 1)] = hm_p / num_p + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
/* the host code for version 12: 2D(more smart) + 1 square kernel + 1 smart diamond kernel. */
int version_12(bool print, int block_size) {
printf("Version 12: square kernel + smart diamond kernel\n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = 1;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = 2;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = 3;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = 4;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
hiprandState_t* rng;
/* allocate memory for device */
hipMalloc(&rng, N * sizeof(hiprandState_t));
hipMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
hipMemcpy(dev_heightMap, heightMap, N * sizeof(float), hipMemcpyHostToDevice);
/*set up random number*/
/*
setseed << < ceil((float)N / (block_size*block_size)), (block_size*block_size) >> > (rng, unsigned(time(NULL)));
float* rand;
hipMalloc((void**)&rand, N*sizeof(float));
generate << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> > (rand, rng, N);
*/
/* run kernel */
start = clock();
for (int i = SIZE; i>1; i = i / 2){
int size_need = SIZE / i;
dim3 DimGrid(ceil(((float)size_need) / block_size), ceil(((float)size_need) / block_size), 1);
dim3 DimBlock(block_size, block_size, 1);
Square_12 << <DimGrid, DimBlock >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
Diamond_12 << <DimGrid, DimBlock >> >(rng, (float*)dev_heightMap, i);
hipDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
hipMemcpy(heightMap, dev_heightMap, N * sizeof(float), hipMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
FILE *fp_out;
float index_x;
float index_y;
fp_out = fopen("vertex.txt", "w");
for (int j = 0; j<SIZE + 1; j++){
for (int i = 0; i<SIZE + 1; i++){
index_x = (float)i / (SIZE / 2) - 1;
index_y = (float)j / (SIZE / 2) - 1;
fprintf(fp_out, "%f %f %f\n", index_x, index_y, heightMap[i + j*(SIZE + 1)]);
}
}
fclose(fp_out);
}
// printf("\n");
// for (int i=0; i<SIZE+1; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
hipFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_12: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 100.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 100.0:
* 100.0 Less Kernel Version: 1 * square kernal and simple diamond kernel (1 thread => 4 vertex);
* This kernel combine the four diamond kernel to one single kernel. However, each thread in diamond
* kernel needs to calculate four vertex. (from version 4)
*/
/* combined diamond kernel to calculate the four point in each thread */
__global__ void Square_Diamond_100(hiprandState_t* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float subTile[256];
subTile[idx % 256] = hm[idx];
for (int rect = SIZE; rect>1; rect = rect / 2){
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
int pmi_b, pmj_b, pmi_l, pmj_l, pmi_r, pmj_r, pmi_t, pmj_t;
float hm_b, hm_l, hm_r, hm_t;
int num_b, num_l, num_r, num_t;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set check value */
check1[idx] = mi;
check2[idx] = mj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
hiprandState_t localState = rng[idx];
float rand = v1 + (v2 - v1) * hiprand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + rand;
__syncthreads();
/* find 4 diamond vertex */
/* bottom vertex */
pmi_b = mi;
pmj_b = j;
/* left vertex */
pmi_l = i;
pmj_l = mj;
/* right vertex */
pmi_r = ni;
pmj_r = mj;
/* top vertex */
pmi_t = mi;
pmj_t = nj;
/* set the value */
/* bottom height */
hm_b = 0;
num_b = 3;
if (pmj_b - half >= 0){
hm_b += hm[pmi_b + (pmj_b - half)*(SIZE + 1)];
num_b = 4;
}
hm_b += hm[pmi_b + (pmj_b + half)*(SIZE + 1)];
hm_b += hm[(pmi_b - half) + pmj_b*(SIZE + 1)];
hm_b += hm[(pmi_b + half) + pmj_b*(SIZE + 1)];
/* left height */
hm_l = 0;
num_l = 3;
if (pmi_l - half >= 0){
hm_l += hm[(pmi_l - half) + pmj_l*(SIZE + 1)];
num_l = 4;
}
hm_l += hm[(pmi_l + half) + pmj_l*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l - half)*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l + half)*(SIZE + 1)];
/* right height */
hm_r = 0;
num_r = 3;
if (pmi_r + half <= SIZE){
hm_r += hm[(pmi_r + half) + pmj_r*(SIZE + 1)];
num_r = 4;
}
hm_r += hm[(pmi_r - half) + pmj_r*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r - half)*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r + half)*(SIZE + 1)];
/* top height */
hm_t = 0;
num_t = 3;
if (pmj_t + half <= SIZE){
hm_t += hm[pmi_t + (pmj_t + half)*(SIZE + 1)];
num_t = 4;
}
hm_t += hm[pmi_t + (pmj_t - half)*(SIZE + 1)];
hm_t += hm[(pmi_t - half) + pmj_t*(SIZE + 1)];
hm_t += hm[(pmi_t + half) + pmj_t*(SIZE + 1)];
/* set check value */
check1[idx] = hm_l;
check2[idx] = hm_l;
/* get height for */
/* set random generator */
float rand1 = v1 + (v2 - v1) * hiprand_uniform(&localState);
float rand2 = v1 + (v2 - v1) * hiprand_uniform(&localState);
float rand3 = v1 + (v2 - v1) * hiprand_uniform(&localState);
float rand4 = v1 + (v2 - v1) * hiprand_uniform(&localState);
/* set height map */
hm[pmi_b + pmj_b*(SIZE + 1)] = hm_b / num_b + rand1;
hm[pmi_l + pmj_l*(SIZE + 1)] = hm_l / num_l + rand2;
hm[pmi_r + pmj_r*(SIZE + 1)] = hm_r / num_r + rand3;
hm[pmi_t + pmj_t*(SIZE + 1)] = hm_t / num_t + rand4;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
}
/* the host code for version 2: 1 square kernel + 1 stupid diamond kernel */
int version_100(bool print) {
printf("Version 4: Less Kernel Version: 1 * square kernal + 1 * simple diamond kernel (1 thread => 4 vertex)\n");
/* initialize variables */
float check1[N];
float check2[N];
float heightMap[N];
/* initialize device */
float *dev_heightMap;
float *dev_check1;
float *dev_check2;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
hiprandState_t* rng;
/* allocate memory for device */
hipMalloc(&rng, N * sizeof(hiprandState_t));
hipMalloc((void**)&dev_heightMap, N * sizeof(float));
hipMalloc((void**)&dev_check1, N * sizeof(float));
hipMalloc((void**)&dev_check2, N * sizeof(float));
/* memory copy from host to device */
hipMemcpy(dev_heightMap, heightMap, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_check1, check1, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_check2, check2, N * sizeof(float), hipMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i = SIZE; i>1; i = i / 2){
Square_Diamond_100 << <ceil((float)N / 256), 256 >> >(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
hipDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
hipMemcpy(heightMap, dev_heightMap, N * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(check1, dev_check1, N * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(check2, dev_check2, N * sizeof(float), hipMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
}
// printf("%f\n", cpu_time_used);
hipFree(dev_heightMap);
hipFree(dev_check1);
hipFree(dev_check2);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_4: %0.20f\n", runTime);
return EXIT_SUCCESS;
}
| 9e4ac2cd95548a5bdc65b4df6125b855cab7cfa2.cu | /************************************************************
* ECE408 Parallel Programming - Final Project *
* *
* Topic: Terrain Generation *
* Members: Lai,Haoming; Ma,Yunhan; Wang,Bangqi *
* *
************************************************************/
/*
* Terrain Generation:
* Algorithmn: Diamond Square Algorithmn.
* Version:
* 0. Serial version: 1 * square loop + 4 * diamond loop;
* 1. Parallel version: 1 * sdsfsdfsdf + 4 * diamond kernel;
* 2. Less Kernel Version: 1 * square kernal + 1 * simple diamond kernel (1 thread => 4 vertex);
* 3. Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* 4. One Kernel Version: 1 * square_diamond kernel combined; (based on version 2)
* 5. Kernel Device Version: 1 * kernel + 1 * square device + 1 * diamond device;
* 6. Less Threads Version: 1 * kernel + 1 * square device + 1 * diamond device (only active threads we need);
* 7. Less Block Version: 1 * kernel + 1 * square device + 1 * diamond device (only call the size of kernel we need);
* 8. Shared Memory Version: 1 * kernel + 1 * square device + 1 * diamond device (use share memory);
* 9. 2D Simple Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex)(based on version 3);
* 10. 2D Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex)(based on version 6);
* 11. 2D Smarter, Less Threads, less block Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex)(based on version 10);
*
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <stdio.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <time.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <math.h>
/* Set the parameter */
/* Choose the version to use */
//#define VERSION 0
/* Set the length of each edge. please put power of 2 */
#define SIZE 4096
/* Set number of array */
#define N (SIZE+1)*(SIZE+1)
/* Set the roughness for terrain */
#define ROUGHNESS 4
/* Set the height for each corner */
#define CORNER 0
/* main function for different version */
int version_0(void);
int version_1(int block_size);
int version_2(int block_size);
int version_3(bool print, int block_size);
int version_4(bool print, int block_size);
int version_5(bool print, int block_size);
int version_6(bool print, int block_size);
int version_7(bool print, int block_size);
int version_8(void);
int version_9(bool print, int block_size);
int version_10(bool print, int block_size);
int version_11(bool print, int block_size);
int version_12(bool print, int block_size);
int version_100(bool print);
/* main function */
int main(void){
int VERSION;
int p;
int block_size;
int cont =1;
while (cont == 1){
bool print = false;
printf("what version do you want: ");
scanf("%d", &VERSION);
printf("print? (0/1): ");
scanf("%d", &p);
printf("please define block_size(max = 32): ");
scanf("%d", &block_size);
if (p)
print = true;
switch (VERSION){
/* test version 0 */
case 0:
version_0();
break;
case 1:
/* test version 1 */
version_1(block_size);
break;
case 2:
/* test version 2 */
version_2(block_size);
break;
case 3:
/* test version 3 */
version_3(print, block_size);
break;
case 4:
/* test version 4 */
version_4(print, block_size);
break;
case 5:
/* test version 5 */
version_5(print, block_size);
break;
case 6:
/* test version 6 */
version_6(print, block_size);
break;
case 7:
/* test version 7 */
version_7(print, block_size);
break;
case 8:
/* test version 8 */
version_8();
break;
case 9:
/* test version 9 */
version_9(print, block_size);
break;
case 10:
/* test version 10 */
version_10(print, block_size);
break;
case 11:
/* test version 10 */
version_11(print, block_size);
break;
case 12:
/* test version 10 */
version_12(print, block_size);
break;
case 100:
/* test version 10 */
version_100(print);
break;
default:
/* test version 0 */
version_0();
return 0;
}
printf("done. Continue(1=continue, other#=exit)?");
scanf("%d", &cont);
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 0.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 0.0:
* 0.0 Serial version: 1 * square loop + 4 * loop;
*
*/
/* host code for serial version */
int version_0(void) {
clock_t start, end;
double runTime;
float **heightMap = new float*[SIZE + 1];
for (int i = 0; i<SIZE + 1; i++)
heightMap[i] = new float[SIZE + 1];
for (int i = 0; i<SIZE + 1; i++){
for (int j = 0; j<SIZE + 1; j++){
heightMap[i][j] = 0.0;
}
}
//initial the first four points
heightMap[0][0] = 0;
heightMap[SIZE][0] = 0;
heightMap[0][SIZE] = 0;
heightMap[SIZE][SIZE] = 0;
srand(time(NULL));
start = clock();
int stride = SIZE;
while (stride >= 2){
for (int i = 0; i<(SIZE / stride); i++){
for (int j = 0; j<(SIZE / stride); j++){
int leftbottom_x = i* stride;
int leftbottom_y = j* stride;
float average = heightMap[leftbottom_x][leftbottom_y] + heightMap[leftbottom_x + stride][leftbottom_y] + heightMap[leftbottom_x][leftbottom_y + stride] + heightMap[leftbottom_x + stride][leftbottom_y + stride];
average = average / 4;
float guess = ((float)((rand() % 200) - 100) / 50);
// printf("%1.5f\n", guess);
heightMap[leftbottom_x + stride / 2][leftbottom_y + stride / 2] = average + ((float)stride/128)*guess;
heightMap[leftbottom_x + stride / 2][leftbottom_y] = (average + heightMap[leftbottom_x][leftbottom_y] + heightMap[leftbottom_x + stride][leftbottom_y]) / 3 + ((float)stride / 128)*guess;
heightMap[leftbottom_x][leftbottom_y + stride / 2] = (average + heightMap[leftbottom_x][leftbottom_y] + heightMap[leftbottom_x][leftbottom_y + stride]) / 3 + ((float)stride / 128)*guess;
heightMap[leftbottom_x + stride][leftbottom_y + stride / 2] = (average + heightMap[leftbottom_x + stride][leftbottom_y] + heightMap[leftbottom_x + stride][leftbottom_y + stride]) / 3 + ((float)stride / 128)*guess;
heightMap[leftbottom_x + stride / 2][leftbottom_y + stride] = (average + heightMap[leftbottom_x][leftbottom_y + stride] + heightMap[leftbottom_x + stride][leftbottom_y + stride]) / 3 + ((float)stride / 128)*guess;
}
}
printf("%d \n", stride);
stride = stride / 2;
}
end = clock();
runTime = (double)(end - start) / CLOCKS_PER_SEC;
// for (int i=0; i<=SIZE; i++){
// for(int j=0; j<=SIZE; j++){
// printf("%d: x = %d, y = %d; hm = %f\n", i*j, i, j, heightMap[i][j]);
// }
// }
FILE *fp_out;
float index_x;
float index_y;
fp_out = fopen("vertex.txt", "w");
for (int j = 0; j<SIZE + 1; j++){
for (int i = 0; i<SIZE + 1; i++){
index_x = (float)i / (SIZE / 2) - 1;
index_y = (float)j / (SIZE / 2) - 1;
fprintf(fp_out, "%f %f %f\n", index_x, index_y, heightMap[i][j]);
}
}
fclose(fp_out);
printf("Run time for Version_0: %f\n", runTime);
printf("Version 0\n");
for (int i = SIZE; i >= 0; i--)
delete[] heightMap[i];
delete[] heightMap;
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 1.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 1.0:
* 1.0 Parallel version: 1 * square kernel + 4 * diamond kernel;
* This parallel function parallelize the serial code directly. it change the one square loop to
* one square kernel and change four diamond loop to four different diamond kernel. 1
*/
/* setup random number*/
__global__ void setseed(curandState * state, unsigned long seed)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(seed, id, 0, &state[id]);
}
__global__ void generate(float* random, curandState* globalState, int n)
{
// generate random numbers
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<n)
{
curandState localState = globalState[idx];
float RANDOM = curand_uniform(&localState);
globalState[idx] = localState;
random[idx] = RANDOM;
}
}
/* square kernel to calculate the middle point */
__global__ void Square_1(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + rand;
__syncthreads();
}
}
/* diamond kernel 1_1 to calcualte middle bottom point */
__global__ void Diamond_1_1(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, mi, j;
int pmi_b, pmj_b;
float hm_b;
int num_b;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
mi = i + half;
/* find 4 diamond vertex */
/* bottom vertex */
pmi_b = mi;
pmj_b = j;
/* set the value */
/* bottom height */
hm_b = 0;
num_b = 3;
if (pmj_b - half >= 0){
hm_b += hm[pmi_b + (pmj_b - half)*(SIZE + 1)];
num_b = 4;
}
hm_b += hm[pmi_b + (pmj_b + half)*(SIZE + 1)];
hm_b += hm[(pmi_b - half) + pmj_b*(SIZE + 1)];
hm_b += hm[(pmi_b + half) + pmj_b*(SIZE + 1)];
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState localState = rng[idx];
float rand1 = v1 + (v2 - v1) * curand_uniform(&localState);
/* set height map */
hm[pmi_b + pmj_b*(SIZE + 1)] = hm_b / num_b + rand1;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* diamond kernel 1_2 to calcualte left point */
__global__ void Diamond_1_2(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j, mj;
int pmi_l, pmj_l;
float hm_l;
int num_l;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
mj = j + half;
/* find 4 diamond vertex */
/* left vertex */
pmi_l = i;
pmj_l = mj;
/* set the value */
/* left height */
hm_l = 0;
num_l = 3;
if (pmi_l - half >= 0){
hm_l += hm[(pmi_l - half) + pmj_l*(SIZE + 1)];
num_l = 4;
}
hm_l += hm[(pmi_l + half) + pmj_l*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l - half)*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l + half)*(SIZE + 1)];
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState localState = rng[idx];
float rand2 = v1 + (v2 - v1) * curand_uniform(&localState);
/* set height map */
hm[pmi_l + pmj_l*(SIZE + 1)] = hm_l / num_l + rand2;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* diamond kernel 1_3 to calcualte right point */
__global__ void Diamond_1_3(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, mj;
int pmi_r, pmj_r;
float hm_r;
int num_r;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
mj = j + half;
/* find 4 diamond vertex */
/* right vertex */
pmi_r = ni;
pmj_r = mj;
/* set the value */
/* right height */
hm_r = 0;
num_r = 3;
if (pmi_r + half <= SIZE){
hm_r += hm[(pmi_r + half) + pmj_r*(SIZE + 1)];
num_r = 4;
}
hm_r += hm[(pmi_r - half) + pmj_r*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r - half)*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r + half)*(SIZE + 1)];
/* get height for */
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState localState = rng[idx];
float rand3 = v1 + (v2 - v1) * curand_uniform(&localState);
/* set height map */
hm[pmi_r + pmj_r*(SIZE + 1)] = hm_r / num_r + rand3;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* diamond kernel 1_4 to calcualte middle top point */
__global__ void Diamond_1_4(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j, mi, nj;
int pmi_t, pmj_t;
float hm_t;
int num_t;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
nj = j + rect;
mi = i + half;
/* find 4 diamond vertex */
/* top vertex */
pmi_t = mi;
pmj_t = nj;
/* set the value */
/* top height */
hm_t = 0;
num_t = 3;
if (pmj_t + half <= SIZE){
hm_t += hm[pmi_t + (pmj_t + half)*(SIZE + 1)];
num_t = 4;
}
hm_t += hm[pmi_t + (pmj_t - half)*(SIZE + 1)];
hm_t += hm[(pmi_t - half) + pmj_t*(SIZE + 1)];
hm_t += hm[(pmi_t + half) + pmj_t*(SIZE + 1)];
/* get height for */
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState localState = rng[idx];
float rand4 = v1 + (v2 - v1) * curand_uniform(&localState);
/* set height map */
hm[pmi_t + pmj_t*(SIZE + 1)] = hm_t / num_t + rand4;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* host code for version 1.0 */
int version_1(int block_size) {
printf("Version 1: square kernel + 4 diamond kernel\n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i = SIZE; i>1; i = i / 2){
Square_1 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
Diamond_1_1 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
Diamond_1_2 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
Diamond_1_3 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
Diamond_1_4 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
// for (int i = 0; i<N; i++){
// printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
// }
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_1: %f\n", runTime);
/* FILE *fp_out;
fp_out = fopen("vertex.txt", "w");
for (int i = 0; i<N; i++)
fprintf(fp_out, "%d %d %f\n", i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
fclose(fp_out);
*/
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 2.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 2.0:
* 2.0 Less Kernel Version: 1 * square kernal + 1 * simple diamond kernel (1 thread => 4 vertex);
* This kernel combine the four diamond kernel to one single kernel. However, each thread in diamond
* kernel needs to calculate four vertex.
*/
/* combined diamond kernel to calculate the four point in each thread */
__global__ void Diamond_2(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
int pmi_b, pmj_b, pmi_l, pmj_l, pmi_r, pmj_r, pmi_t, pmj_t;
float hm_b, hm_l, hm_r, hm_t;
int num_b, num_l, num_r, num_t;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* find 4 diamond vertex */
/* bottom vertex */
pmi_b = mi;
pmj_b = j;
/* left vertex */
pmi_l = i;
pmj_l = mj;
/* right vertex */
pmi_r = ni;
pmj_r = mj;
/* top vertex */
pmi_t = mi;
pmj_t = nj;
/* set the value */
/* bottom height */
hm_b = 0;
num_b = 3;
if (pmj_b - half >= 0){
hm_b += hm[pmi_b + (pmj_b - half)*(SIZE + 1)];
num_b = 4;
}
hm_b += hm[pmi_b + (pmj_b + half)*(SIZE + 1)];
hm_b += hm[(pmi_b - half) + pmj_b*(SIZE + 1)];
hm_b += hm[(pmi_b + half) + pmj_b*(SIZE + 1)];
/* left height */
hm_l = 0;
num_l = 3;
if (pmi_l - half >= 0){
hm_l += hm[(pmi_l - half) + pmj_l*(SIZE + 1)];
num_l = 4;
}
hm_l += hm[(pmi_l + half) + pmj_l*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l - half)*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l + half)*(SIZE + 1)];
/* right height */
hm_r = 0;
num_r = 3;
if (pmi_r + half <= SIZE){
hm_r += hm[(pmi_r + half) + pmj_r*(SIZE + 1)];
num_r = 4;
}
hm_r += hm[(pmi_r - half) + pmj_r*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r - half)*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r + half)*(SIZE + 1)];
/* top height */
hm_t = 0;
num_t = 3;
if (pmj_t + half <= SIZE){
hm_t += hm[pmi_t + (pmj_t + half)*(SIZE + 1)];
num_t = 4;
}
hm_t += hm[pmi_t + (pmj_t - half)*(SIZE + 1)];
hm_t += hm[(pmi_t - half) + pmj_t*(SIZE + 1)];
hm_t += hm[(pmi_t + half) + pmj_t*(SIZE + 1)];
/* get height for */
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState localState = rng[idx];
float rand1 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand2 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand3 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand4 = v1 + (v2 - v1) * curand_uniform(&localState);
/* set height map */
hm[pmi_b + pmj_b*(SIZE + 1)] = hm_b / num_b + rand1;
hm[pmi_l + pmj_l*(SIZE + 1)] = hm_l / num_l + rand2;
hm[pmi_r + pmj_r*(SIZE + 1)] = hm_r / num_r + rand3;
hm[pmi_t + pmj_t*(SIZE + 1)] = hm_t / num_t + rand4;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* the host code for version 2: 1 square kernel + 1 stupid diamond kernel */
int version_2(int block_size) {
printf("Version 2: square kernel + stupid diamond kernel\n");
/* initialize variables */
float *heightMap= new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i = SIZE; i>1; i = i / 2){
Square_1 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
Diamond_2 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
// for (int i = 0; i<N; i++){
// printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
// }
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_2: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 3.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 3.0:
* 3.0 Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* This version reconstruct the diamond kernel to use different threads for different vertx. Each
* thread in diamond kernel only need to calculate one vertex.
*/
/* smart diamond kernel calculate the diamond vertex and each thread only calculate one vertex */
__global__ void Diamond_3(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow*squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj - half)*(SIZE + 1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj + half)*(SIZE + 1)];
num_p++;
}
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
/* get height for */
hm[pmi + pmj*(SIZE + 1)] = hm_p / num_p + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* the host code for version 3: 1 square kernel + 1 smart diamond kernel */
int version_3(bool print, int block_size) {
printf("Version 3: square kernel + smart diamond kernel\n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
int size = block_size * block_size;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i = SIZE; i>1; i = i / 2){
Square_1 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
Diamond_3 << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
}
// printf("\n");
// for (int i=0; i<N; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_3: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 4.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 4.0:
* 4.0 Less Kernel Version: 1 * square kernal + 1 * simple diamond kernel (1 thread => 4 vertex);
* This kernel combine the four diamond kernel to one single kernel. However, each thread in diamond
* kernel needs to calculate four vertex.
*/
/* combined diamond kernel to calculate the four point in each thread */
__global__ void Square_Diamond_4(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
int pmi_b, pmj_b, pmi_l, pmj_l, pmi_r, pmj_r, pmi_t, pmj_t;
float hm_b, hm_l, hm_r, hm_t;
int num_b, num_l, num_r, num_t;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + rand;
__syncthreads();
/* find 4 diamond vertex */
/* bottom vertex */
pmi_b = mi;
pmj_b = j;
/* left vertex */
pmi_l = i;
pmj_l = mj;
/* right vertex */
pmi_r = ni;
pmj_r = mj;
/* top vertex */
pmi_t = mi;
pmj_t = nj;
/* set the value */
/* bottom height */
hm_b = 0;
num_b = 3;
if (pmj_b - half >= 0){
hm_b += hm[pmi_b + (pmj_b - half)*(SIZE + 1)];
num_b = 4;
}
hm_b += hm[pmi_b + (pmj_b + half)*(SIZE + 1)];
hm_b += hm[(pmi_b - half) + pmj_b*(SIZE + 1)];
hm_b += hm[(pmi_b + half) + pmj_b*(SIZE + 1)];
/* left height */
hm_l = 0;
num_l = 3;
if (pmi_l - half >= 0){
hm_l += hm[(pmi_l - half) + pmj_l*(SIZE + 1)];
num_l = 4;
}
hm_l += hm[(pmi_l + half) + pmj_l*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l - half)*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l + half)*(SIZE + 1)];
/* right height */
hm_r = 0;
num_r = 3;
if (pmi_r + half <= SIZE){
hm_r += hm[(pmi_r + half) + pmj_r*(SIZE + 1)];
num_r = 4;
}
hm_r += hm[(pmi_r - half) + pmj_r*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r - half)*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r + half)*(SIZE + 1)];
/* top height */
hm_t = 0;
num_t = 3;
if (pmj_t + half <= SIZE){
hm_t += hm[pmi_t + (pmj_t + half)*(SIZE + 1)];
num_t = 4;
}
hm_t += hm[pmi_t + (pmj_t - half)*(SIZE + 1)];
hm_t += hm[(pmi_t - half) + pmj_t*(SIZE + 1)];
hm_t += hm[(pmi_t + half) + pmj_t*(SIZE + 1)];
/* get height for */
/* set random generator */
float rand1 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand2 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand3 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand4 = v1 + (v2 - v1) * curand_uniform(&localState);
/* set height map */
hm[pmi_b + pmj_b*(SIZE + 1)] = hm_b / num_b + rand1;
hm[pmi_l + pmj_l*(SIZE + 1)] = hm_l / num_l + rand2;
hm[pmi_r + pmj_r*(SIZE + 1)] = hm_r / num_r + rand3;
hm[pmi_t + pmj_t*(SIZE + 1)] = hm_t / num_t + rand4;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* the host code for version 2: 1 square kernel + 1 stupid diamond kernel */
int version_4(bool print, int block_size) {
printf("Version 4: Less Kernel Version: 1 * square kernal + 1 * simple diamond kernel (1 thread => 4 vertex)\n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i = SIZE; i>1; i = i / 2){
Square_Diamond_4 << <ceil((float)N / 256), 256 >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
}
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
runTime = (float)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_4: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 5.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 5.0:
* Version 5.0: Kernel Device Version: 1 * kernel + 1 * square device + 1 * diamond device;
* This version calls two device functions and calls one kernel which loop these two steops
* within the kernel.
*/
/* smart diamond kernel calculate the diamond vertex and each thread only calculate one vertex */
__device__ void Square_5(curandState* rng, float* hm, int rect)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int squareInRow = SIZE / rect;
if (idx < squareInRow * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + rand;
__syncthreads();
}
}
__device__ void Diamond_5(curandState* rng, float* hm, int rect)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int squareInRow = SIZE / rect;
if (idx < 4 * squareInRow * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow*squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj - half)*(SIZE + 1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj + half)*(SIZE + 1)];
num_p++;
}
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
/* get height for */
hm[pmi + pmj*(SIZE + 1)] = hm_p / num_p + rand;
rng[idx] = localState;
__syncthreads();
}
}
__global__ void Square_Diamond_5(curandState* rng, float* hm)
{
for (int i = SIZE; i > 1; i = i / 2)
{
__syncthreads();
Square_5(rng, hm, i);
Diamond_5(rng, hm, i);
}
}
int version_5(bool print, int block_size) {
printf("Version 5.0: Kernel Device Version: 1 * kernel + 1 * square device + 1 * diamond device; \n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
start = clock();
/* run kernel */
Square_Diamond_5 << <ceil((float)N / 256), 256 >> >(rng, (float*)dev_heightMap);
cudaDeviceSynchronize();
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
// printf("\n");
// for (int i=0; i<N; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
}
// }
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_5: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 6.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 6.0:
* 6. Less Threads Version: 1 * kernel + 1 * square device + 1 * diamond device (only active threads we need);
* This kernel combine the four diamond kernel to one single kernel. However, each thread in diamond
* kernel needs to calculate four vertex.
*/
/* square kernel to calculate the middle point */
__global__ void Square_6(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int squareInRow = SIZE / rect;
if (idx < squareInRow * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set check value */
// check1[idx] = mi;
// check2[idx] = mj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState state = rng[idx];
float random = v1 + (v2 - v1) * (float)curand_uniform(&state);
rng[idx] = state;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
/* smart diamond kernel calculate the diamond vertex and each thread only calculate one vertex */
__global__ void Diamond_6(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int squareInRow = SIZE / rect;
if (idx < 4 * squareInRow * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow*squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj - half)*(SIZE + 1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj + half)*(SIZE + 1)];
num_p++;
}
/* set check value */
// check1[idx] = pmi;
// check2[idx] = pmj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState state = rng[idx];
float random = v1 + (v2 - v1) * (float)curand_uniform(&state);
rng[idx] = state;
/* get height for */
hm[pmi + pmj*(SIZE + 1)] = hm_p / num_p + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
/* the host code for version 3: 1 square kernel + 1 smart diamond kernel */
int version_6(bool print, int block_size) {
printf("Version 6: square kernel + smart diamond kernel (only active needed threads) \n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
// float *dev_check1;
// float *dev_check2;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(dev_check1, check1, N * sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(dev_check2, check2, N * sizeof(float), cudaMemcpyHostToDevice);
/*
setseed << < ceil((float)N / 256), 256 >> > (rng, unsigned(time(NULL)));
float* rand;
cudaMalloc((void**)&rand, N*sizeof(float));
generate << <ceil((float)N / 256), 256 >> > (rand, rng, N);
*/
start = clock();
/* run kernel */
for (int i = SIZE; i>1; i = i / 2){
Square_6 << <ceil((float)N / (block_size*block_size)), block_size*block_size >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
Diamond_6 << <ceil((float)N / (block_size*block_size)), block_size*block_size >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy(check1, dev_check1, N * sizeof(float), cudaMemcpyDeviceToHost);
// cudaMemcpy(check2, dev_check2, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
FILE *fp_out;
float index_x;
float index_y;
fp_out = fopen("vertex.txt", "w");
for (int j = 0; j<SIZE + 1; j++){
for (int i = 0; i<SIZE + 1; i++){
index_x = (float)i / (SIZE / 2) - 1;
index_y = (float)j / (SIZE / 2) - 1;
fprintf(fp_out, "%f %f %f\n", index_x, index_y, heightMap[i + j*(SIZE + 1)]);
}
}
fclose(fp_out);
}
// printf("\n");
// for (int i=0; i<N; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
// cudaFree(dev_check1);
// cudaFree(dev_check2);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_6: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 7.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 7.0:
* 7. Less Threads Version: 1 * kernel + 1 * square device + 1 * diamond device (only active threads and kernel we need);
* This kernel combine the four diamond kernel to one single kernel. However, each thread in diamond
* kernel needs to calculate four vertex.
*/
/* square kernel to calculate the middle point */
__global__ void Square_7(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int squareInRow = SIZE / rect;
if (idx < squareInRow * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState state = rng[idx];
float random = v1 + (v2 - v1) * (float)curand_uniform(&state);
rng[idx] = state;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
/* smart diamond kernel calculate the diamond vertex and each thread only calculate one vertex */
__global__ void Diamond_7(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int squareInRow = SIZE / rect;
if (idx < 4 * squareInRow * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow*squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj - half)*(SIZE + 1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj + half)*(SIZE + 1)];
num_p++;
}
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState state = rng[idx];
float random = v1 + (v2 - v1) * (float)curand_uniform(&state);
rng[idx] = state;
/* get height for */
hm[pmi + pmj*(SIZE + 1)] = hm_p / num_p + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
/* the host code for version 3: 1 square kernel + 1 smart diamond kernel */
int version_7(bool print, int block_size) {
printf("Version 7: square kernel + smart diamond kernel (only active needed threads&kernel) \n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
/*set up random number*/
/*
setseed << < ceil((float)N / 256), 256 >> > (rng, unsigned(time(NULL)));
float* rand;
cudaMalloc((void**)&rand, N*sizeof(float));
generate << <ceil((float)N / 256), 256 >> > (rand, rng, N);
*/
start = clock();
/* run kernel */
for (int i = SIZE; i>1; i = i / 2){
int squareInRow = SIZE / i;
int size_need = squareInRow * squareInRow;
int size_need2 = 4 * size_need;
Square_7 << <ceil((float)size_need / 32), 32 >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
Diamond_7 << <ceil((float)size_need2 / 32), 32 >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
FILE *fp_out;
float index_x;
float index_y;
fp_out = fopen("vertex.txt", "w");
for (int j = 0; j<SIZE + 1; j++){
for (int i = 0; i<SIZE + 1; i++){
index_x = (float)i / (SIZE / 2) - 1;
index_y = (float)j / (SIZE / 2) - 1;
fprintf(fp_out, "%f %f %f\n", index_x, index_y, heightMap[i + j*(SIZE + 1)]);
}
}
fclose(fp_out);
}
// printf("\n");
// for (int i=0; i<N; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_7: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
int version_8(void) {
printf("8\n");
return 0;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 9.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 9.0:
* 9.0 Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* This version reconstruct the diamond kernel to use different threads for different vertx. Each
* thread in diamond kernel only need to calculate one vertex. (A simple revised 2D version of version 3)
*/
__global__ void Square_9(curandState* rng, float* hm, int rect){
/* set idx */
int idx_temp = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_temp < SIZE + 1 && idy < SIZE + 1){
int idx = idy*(SIZE + 1) + idx_temp;
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState state = rng[idx];
float random = v1 + (v2 - v1) * (float)curand_uniform(&state);
rng[idx] = state;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
__global__ void Diamond_9(curandState* rng, float* hm, int rect){
/* set idx */
int idx_temp = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_temp < SIZE + 1 && idy < SIZE + 1){
int idx = idy*(SIZE + 1) + idx_temp;
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow*squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj - half)*(SIZE + 1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj + half)*(SIZE + 1)];
num_p++;
}
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState state = rng[idx];
float random = v1 + (v2 - v1) * (float)curand_uniform(&state);
rng[idx] = state;
/* get height for */
hm[pmi + pmj*(SIZE + 1)] = hm_p / num_p + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
/* the host code for version 8: 2D + 1 square kernel + 1 smart diamond kernel. */
int version_9(bool print, int block_size) {
printf("Version 9: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex)\n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = 1;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = 2;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = 3;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = 4;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
/*set up random number*/
/*
setseed << < ceil((float)N / (block_size*block_size)), (block_size*block_size) >> > (rng, unsigned(time(NULL)));
float* rand;
cudaMalloc((void**)&rand, N*sizeof(float));
generate << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> > (rand, rng, N);
*/
start = clock();
/* run kernel */
dim3 DimGrid(ceil(((float)SIZE) / block_size), ceil(((float)SIZE) / block_size), 1);
dim3 DimBlock(block_size, block_size, 1);
for (int i = SIZE; i>1; i = i / 2){
Square_9 << <DimGrid, DimBlock >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
Diamond_9 << <DimGrid, DimBlock >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
FILE *fp_out;
float index_x;
float index_y;
fp_out = fopen("vertex.txt", "w");
for (int j = 0; j<SIZE + 1; j++){
for (int i = 0; i<SIZE + 1; i++){
index_x = (float)i / (SIZE / 2) - 1;
index_y = (float)j / (SIZE / 2) - 1;
fprintf(fp_out, "%f %f %f\n", index_x, index_y, heightMap[i + j*(SIZE + 1)]);
}
}
fclose(fp_out);
}
// printf("\n");
// for (int i=0; i<SIZE+1; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_9: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 10.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 10.0:
* 10.0 Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* This version reconstruct the diamond kernel to use different threads for different vertx. Each
* thread in diamond kernel only need to calculate one vertex. (A smart revised 2D version of version 3)
*/
__global__ void Square_10(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int squareInRow = SIZE / rect;
if (idx < squareInRow && idy < squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
/* calculate vertex */
i = idx;
j = idy;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState state = rng[idx];
float random = v1 + (v2 - v1) * (float)curand_uniform(&state);
rng[idx] = state;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
__global__ void Diamond_10(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int squareInRow = SIZE / rect;
if (idx < 2*squareInRow && idy < 2*squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
/* calculate vertex */
i = idx;
j = idy;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj - half)*(SIZE + 1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj + half)*(SIZE + 1)];
num_p++;
}
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState state = rng[idx];
float random = v1 + (v2 - v1) * (float)curand_uniform(&state);
rng[idx] = state;
/* get height for */
hm[pmi + pmj*(SIZE + 1)] = hm_p / num_p + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
/* the host code for version 10: 2D(more smart) + 1 square kernel + 1 smart diamond kernel. */
int version_10(bool print, int block_size) {
printf("Version 10: square kernel + smart diamond kernel\n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = 1;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = 2;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = 3;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = 4;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
/*set up random number*/
/*
setseed << < ceil((float)N / (block_size*block_size)), (block_size*block_size) >> > (rng, unsigned(time(NULL)));
float* rand;
cudaMalloc((void**)&rand, N*sizeof(float));
generate << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> > (rand, rng, N);
*/
/* run kernel */
dim3 DimGrid(ceil(((float)SIZE) / block_size), ceil(((float)SIZE) / block_size), 1);
dim3 DimBlock(block_size, block_size, 1);
start = clock();
for (int i = SIZE; i>1; i = i / 2){
Square_10 << <DimGrid, DimBlock >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
Diamond_10 << <DimGrid, DimBlock >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
FILE *fp_out;
float index_x;
float index_y;
fp_out = fopen("vertex.txt", "w");
for (int j = 0; j<SIZE + 1; j++){
for (int i = 0; i<SIZE + 1; i++){
index_x = (float)i / (SIZE / 2) - 1;
index_y = (float)j / (SIZE / 2) - 1;
fprintf(fp_out, "%f %f %f\n", index_x, index_y, heightMap[i + j*(SIZE + 1)]);
}
}
fclose(fp_out);
}
// printf("\n");
// for (int i=0; i<SIZE+1; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_10: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 11.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 11.0:
* 11.0 Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* This version reconstruct the diamond kernel to use different threads for different vertx. Each
* thread in diamond kernel only need to calculate one vertex. (A smart revised 2D version of version 6)
*/
__global__ void Square_11(curandState* rng, float** hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int squareInRow = SIZE / rect;
if (idx < squareInRow && idy < squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
/* calculate vertex */
i = idx;
j = idy;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi][mj] = (hm[i ][j] + hm[ni][j] + hm[i][nj] + hm[ni][nj]) / 4 + rand;
__syncthreads();
}
}
__global__ void Diamond_11(curandState* rng, float** hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int squareInRow = SIZE / rect;
if (idx < 2 * squareInRow && idy < 2 * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = idx;
j = idy;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half)][pmj];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half)][pmj];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi] [(pmj - half)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi][(pmj + half)];
num_p++;
}
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
/* get height for */
hm[pmi][pmj] = hm_p / num_p + rand;
rng[idx] = localState;
__syncthreads();
}
}
/* the host code for version 1: 2D(more smart) + 1 square kernel + 1 smart diamond kernel. */
int version_11(bool print, int block_size) {
printf("Version 11: square kernel + smart diamond kernel\n");
/* initialize variables */
float **heightMap = new float*[SIZE + 1];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<SIZE + 1; i++){
heightMap[i] = new float[SIZE + 1];
for (int j = 0; j<SIZE + 1; j++){
heightMap[i][j] = 0.0;
}
}
/* set height for corner */
heightMap[0][0] = 1;
printf("heightMap_corner0: %f\n", heightMap[0][0]);
heightMap[SIZE][0] = 2;
printf("heightMap_corner1: %f\n", heightMap[SIZE][0]);
heightMap[0][SIZE] = 3;
printf("heightMap_corner3: %f\n", heightMap[0][SIZE]);
heightMap[SIZE][SIZE] = 4;
printf("heightMap_corner2: %f\n", heightMap[SIZE][SIZE]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
start = clock();
/* run kernel */
dim3 DimGrid(ceil(((float)SIZE) / block_size), ceil(((float)SIZE) / block_size), 1);
dim3 DimBlock(block_size, block_size, 1);
for (int i = SIZE; i>1; i = i / 2){
Square_11 << <DimGrid, DimBlock >> >(rng, (float**)dev_heightMap, i);
cudaDeviceSynchronize();
Diamond_11 << <DimGrid, DimBlock >> >(rng, (float**)dev_heightMap, i);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
// if (print){
// for (int i = 0; i<N; i++){
// printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
// }
// }
// printf("\n");
// for (int i=0; i<SIZE+1; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_11: %0.20f\n", runTime);
for (int i = 0; i<SIZE + 1; i++)
delete[] heightMap[i];
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 12.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 12.0:
* 12.0 Smarter Kernel Version: 1 * sqaure kernel + 1 * smart diamond kernel (1 thread => 1 vertex);
* This version reconstruct the diamond kernel to use different threads for different vertx. Each
* thread in diamond kernel only need to calculate one vertex. (A smart revised 2D version of version 3)
*/
__global__ void Square_12(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int squareInRow = SIZE / rect;
if (idx < squareInRow && idy < squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
/* calculate vertex */
i = idx;
j = idy;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState state = rng[idx];
float random = v1 + (v2 - v1) * (float)curand_uniform(&state);
rng[idx] = state;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
__global__ void Diamond_12(curandState* rng, float* hm, int rect){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int squareInRow = SIZE / rect;
if (idx < 2 * squareInRow && idy < 2 * squareInRow){
/* initialize vairable */
int half = rect / 2;
int i, j;
int pmi, pmj;
float hm_p;
int num_p;
/* calculate vertex */
i = idx;
j = idy;
/* Calculate the diamond vertex use idx */
int tid = idx / (squareInRow) % 4;
pmi = i + (1 - tid % 2)*half + tid / 2 * half;
pmj = j + tid % 2 * half + tid / 2 * half;
/* Set the value */
hm_p = 0;
num_p = 0;
if (pmi - half >= 0){
hm_p += hm[(pmi - half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmi + half <= SIZE){
hm_p += hm[(pmi + half) + pmj*(SIZE + 1)];
num_p++;
}
if (pmj - half >= 0){
hm_p += hm[pmi + (pmj - half)*(SIZE + 1)];
num_p++;
}
if (pmj + half <= SIZE){
hm_p += hm[pmi + (pmj + half)*(SIZE + 1)];
num_p++;
}
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState state = rng[idx];
float random = v1 + (v2 - v1) * (float)curand_uniform(&state);
rng[idx] = state;
/* get height for */
hm[pmi + pmj*(SIZE + 1)] = hm_p / num_p + random*((float)rect / (float)SIZE);
__syncthreads();
}
}
/* the host code for version 12: 2D(more smart) + 1 square kernel + 1 smart diamond kernel. */
int version_12(bool print, int block_size) {
printf("Version 12: square kernel + smart diamond kernel\n");
/* initialize variables */
float *heightMap = new float[N];
/* initialize device */
float *dev_heightMap;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = 1;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = 2;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = 3;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = 4;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
/*set up random number*/
/*
setseed << < ceil((float)N / (block_size*block_size)), (block_size*block_size) >> > (rng, unsigned(time(NULL)));
float* rand;
cudaMalloc((void**)&rand, N*sizeof(float));
generate << <ceil((float)N / (block_size*block_size)), (block_size*block_size) >> > (rand, rng, N);
*/
/* run kernel */
start = clock();
for (int i = SIZE; i>1; i = i / 2){
int size_need = SIZE / i;
dim3 DimGrid(ceil(((float)size_need) / block_size), ceil(((float)size_need) / block_size), 1);
dim3 DimBlock(block_size, block_size, 1);
Square_12 << <DimGrid, DimBlock >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
Diamond_12 << <DimGrid, DimBlock >> >(rng, (float*)dev_heightMap, i);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
FILE *fp_out;
float index_x;
float index_y;
fp_out = fopen("vertex.txt", "w");
for (int j = 0; j<SIZE + 1; j++){
for (int i = 0; i<SIZE + 1; i++){
index_x = (float)i / (SIZE / 2) - 1;
index_y = (float)j / (SIZE / 2) - 1;
fprintf(fp_out, "%f %f %f\n", index_x, index_y, heightMap[i + j*(SIZE + 1)]);
}
}
fclose(fp_out);
}
// printf("\n");
// for (int i=0; i<SIZE+1; i++){
// printf("%d: pmi = %f, pmj = %f\n", i, check1[i], check2[i]);
// }
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_12: %0.20f\n", runTime);
delete[] heightMap;
return EXIT_SUCCESS;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////// VERSION 100.0 ////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////
/* VERSION 100.0:
* 100.0 Less Kernel Version: 1 * square kernal and simple diamond kernel (1 thread => 4 vertex);
* This kernel combine the four diamond kernel to one single kernel. However, each thread in diamond
* kernel needs to calculate four vertex. (from version 4)
*/
/* combined diamond kernel to calculate the four point in each thread */
__global__ void Square_Diamond_100(curandState* rng, float* hm, int rect, float* check1, float* check2){
/* set idx */
int idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float subTile[256];
subTile[idx % 256] = hm[idx];
for (int rect = SIZE; rect>1; rect = rect / 2){
if (idx < N){
/* initialize vairable */
int half = rect / 2;
int i, j, ni, nj, mi, mj;
int pmi_b, pmj_b, pmi_l, pmj_l, pmi_r, pmj_r, pmi_t, pmj_t;
float hm_b, hm_l, hm_r, hm_t;
int num_b, num_l, num_r, num_t;
int squareInRow = SIZE / rect;
/* calculate vertex */
i = (idx%squareInRow*rect) % SIZE;
j = (idx / squareInRow*rect) % SIZE;
ni = i + rect;
nj = j + rect;
mi = i + half;
mj = j + half;
/* set check value */
check1[idx] = mi;
check2[idx] = mj;
/* set random generator */
float v1 = (0.0f - (float)ROUGHNESS) / 2;
float v2 = ((float)ROUGHNESS) / 2;
curandState localState = rng[idx];
float rand = v1 + (v2 - v1) * curand_uniform(&localState);
rng[idx] = localState;
/* set height map */
hm[mi + mj*(SIZE + 1)] = (hm[i + j*(SIZE + 1)] + hm[ni + j*(SIZE + 1)] + hm[i + nj*(SIZE + 1)] + hm[ni + nj*(SIZE + 1)]) / 4 + rand;
__syncthreads();
/* find 4 diamond vertex */
/* bottom vertex */
pmi_b = mi;
pmj_b = j;
/* left vertex */
pmi_l = i;
pmj_l = mj;
/* right vertex */
pmi_r = ni;
pmj_r = mj;
/* top vertex */
pmi_t = mi;
pmj_t = nj;
/* set the value */
/* bottom height */
hm_b = 0;
num_b = 3;
if (pmj_b - half >= 0){
hm_b += hm[pmi_b + (pmj_b - half)*(SIZE + 1)];
num_b = 4;
}
hm_b += hm[pmi_b + (pmj_b + half)*(SIZE + 1)];
hm_b += hm[(pmi_b - half) + pmj_b*(SIZE + 1)];
hm_b += hm[(pmi_b + half) + pmj_b*(SIZE + 1)];
/* left height */
hm_l = 0;
num_l = 3;
if (pmi_l - half >= 0){
hm_l += hm[(pmi_l - half) + pmj_l*(SIZE + 1)];
num_l = 4;
}
hm_l += hm[(pmi_l + half) + pmj_l*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l - half)*(SIZE + 1)];
hm_l += hm[pmi_l + (pmj_l + half)*(SIZE + 1)];
/* right height */
hm_r = 0;
num_r = 3;
if (pmi_r + half <= SIZE){
hm_r += hm[(pmi_r + half) + pmj_r*(SIZE + 1)];
num_r = 4;
}
hm_r += hm[(pmi_r - half) + pmj_r*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r - half)*(SIZE + 1)];
hm_r += hm[pmi_r + (pmj_r + half)*(SIZE + 1)];
/* top height */
hm_t = 0;
num_t = 3;
if (pmj_t + half <= SIZE){
hm_t += hm[pmi_t + (pmj_t + half)*(SIZE + 1)];
num_t = 4;
}
hm_t += hm[pmi_t + (pmj_t - half)*(SIZE + 1)];
hm_t += hm[(pmi_t - half) + pmj_t*(SIZE + 1)];
hm_t += hm[(pmi_t + half) + pmj_t*(SIZE + 1)];
/* set check value */
check1[idx] = hm_l;
check2[idx] = hm_l;
/* get height for */
/* set random generator */
float rand1 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand2 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand3 = v1 + (v2 - v1) * curand_uniform(&localState);
float rand4 = v1 + (v2 - v1) * curand_uniform(&localState);
/* set height map */
hm[pmi_b + pmj_b*(SIZE + 1)] = hm_b / num_b + rand1;
hm[pmi_l + pmj_l*(SIZE + 1)] = hm_l / num_l + rand2;
hm[pmi_r + pmj_r*(SIZE + 1)] = hm_r / num_r + rand3;
hm[pmi_t + pmj_t*(SIZE + 1)] = hm_t / num_t + rand4;
// hm[mi + mj*(SIZE+1)] = (hm[i + j*(SIZE+1)] + hm[ni + j*(SIZE+1)] + hm[i + nj*(SIZE+1)] + hm[ni + nj*(SIZE+1)])/4 + rand;
rng[idx] = localState;
__syncthreads();
}
}
}
/* the host code for version 2: 1 square kernel + 1 stupid diamond kernel */
int version_100(bool print) {
printf("Version 4: Less Kernel Version: 1 * square kernal + 1 * simple diamond kernel (1 thread => 4 vertex)\n");
/* initialize variables */
float check1[N];
float check2[N];
float heightMap[N];
/* initialize device */
float *dev_heightMap;
float *dev_check1;
float *dev_check2;
/* initialize time*/
clock_t start, end;
double runTime;
/* initial height map */
for (int i = 0; i<N; i++){
heightMap[i] = 0;
}
/* set height for corner */
heightMap[0 + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner0: %f\n", heightMap[0 + 0 * (SIZE + 1)]);
heightMap[SIZE + 0 * (SIZE + 1)] = CORNER;
printf("heightMap_corner1: %f\n", heightMap[SIZE + 0 * (SIZE + 1)]);
heightMap[0 + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner3: %f\n", heightMap[0 + SIZE * (SIZE + 1)]);
heightMap[SIZE + SIZE * (SIZE + 1)] = CORNER;
printf("heightMap_corner2: %f\n", heightMap[SIZE + SIZE * (SIZE + 1)]);
curandState* rng;
/* allocate memory for device */
cudaMalloc(&rng, N * sizeof(curandState));
cudaMalloc((void**)&dev_heightMap, N * sizeof(float));
cudaMalloc((void**)&dev_check1, N * sizeof(float));
cudaMalloc((void**)&dev_check2, N * sizeof(float));
/* memory copy from host to device */
cudaMemcpy(dev_heightMap, heightMap, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check1, check1, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_check2, check2, N * sizeof(float), cudaMemcpyHostToDevice);
start = clock();
/* run kernel */
for (int i = SIZE; i>1; i = i / 2){
Square_Diamond_100 << <ceil((float)N / 256), 256 >> >(rng, (float*)dev_heightMap, i, dev_check1, dev_check2);
cudaDeviceSynchronize();
}
end = clock();
/* memory copy from device to host*/
cudaMemcpy(heightMap, dev_heightMap, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check1, dev_check1, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(check2, dev_check2, N * sizeof(float), cudaMemcpyDeviceToHost);
/* print the output */
if (print){
for (int i = 0; i<N; i++){
printf("%d: x = %d, y = %d; hm = %f\n", i, i % (SIZE + 1), i / (SIZE + 1), heightMap[i]);
}
}
// printf("%f\n", cpu_time_used);
cudaFree(dev_heightMap);
cudaFree(dev_check1);
cudaFree(dev_check2);
runTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Run time for Version_4: %0.20f\n", runTime);
return EXIT_SUCCESS;
}
|
470745178ad9f3885688deccdf6c158e49f5ce43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void hello_GPU(int index){
printf("Hello from GPU%d[%d]!\n", index, threadIdx.x);
}
int main(void) {
printf("Hello from CPU!\n");
hipLaunchKernelGGL(( hello_GPU), dim3(1),dim3(4), 0, 0, 1);
hipLaunchKernelGGL(( hello_GPU), dim3(1),dim3(6), 0, 0, 2);
hipDeviceSynchronize();
return 0;
}
| 470745178ad9f3885688deccdf6c158e49f5ce43.cu | #include <stdio.h>
__global__ void hello_GPU(int index){
printf("Hello from GPU%d[%d]!\n", index, threadIdx.x);
}
int main(void) {
printf("Hello from CPU!\n");
hello_GPU<<<1,4>>>(1);
hello_GPU<<<1,6>>>(2);
cudaDeviceSynchronize();
return 0;
}
|
6efd80442c340701bc6216eb4eb681baff0fab79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaSuperpixel.h"
#include "hipcub/hipcub.hpp"
#include <iostream>
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include <thrust/device_vector.h>
#include <thrust/pair.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/fill.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include "GpuTimer.h"
texture<float4> ImageTexture;
__global__ void InitClusterCentersKernel( float4* floatBuffer, int* labels, int nWidth, int nHeight,int step, int nSegs, SLICClusterCenter* vSLICCenterList )
{
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
if (clusterIdx >= nSegs)
return;
int offsetBlock = (blockIdx.x *nWidth + threadIdx.x )* step;
float2 avXY;
avXY.x=threadIdx.x*step + (float)step/2.0;
avXY.y=blockIdx.x*step + (float)step/2.0;
float4 tmp;
tmp.x = 0;
tmp.y =0;
tmp.z = 0;
for(int i=0; i<step; i++)
{
for(int j=0; j<step; j++)
{
if ((threadIdx.x )* step + i >= nWidth ||
(blockIdx.x)*step + j>= nHeight)
continue;
int idx = offsetBlock + i + j*nWidth;
float4 pixel = tex1Dfetch(ImageTexture,idx);
tmp.x += pixel.x;
tmp.y += pixel.y;
tmp.z += pixel.z;
labels[idx] = clusterIdx;
}
}
double sz = step * step;
tmp.x = tmp.x / sz;
tmp.y = tmp.y /sz;
tmp.z = tmp.z/sz;
vSLICCenterList[clusterIdx].rgb= tmp;
vSLICCenterList[clusterIdx].xy=avXY;
vSLICCenterList[clusterIdx].nPoints= (int)sz;
}
__global__ void UpdateClustersKernel(int nHeight, int nWidth, int* keys,SLICClusterCenter* d_inCenters,SLICClusterCenter* d_outCenters, int nClusters,int tNClusters )
{
//
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
if (clusterIdx >= nClusters)
return;
d_outCenters[keys[clusterIdx]] = d_inCenters[clusterIdx] * (1.0/d_inCenters[clusterIdx].nPoints);
}
__global__ void UpdateClusterCenterKernel(int heigth,int width, int step, int* d_labels, SLICClusterCenter* d_inCenters,int nClusters)
{
const int size = 5*5;
__shared__ int sLabels[size];
__shared__ float4 sPixels[size];
__shared__ float4 Rgb[size];
__shared__ float2 XY[size];
__shared__ int flag[size];
int clusterIdx = blockIdx.x;
int cx = d_inCenters[clusterIdx].xy.x;
int cy = d_inCenters[clusterIdx].xy.y;
}
__global__ void UpdateClusterCenterKernel(float4* imgBuffer, int height, int width, int step,int * d_labels, SLICClusterCenter* d_inCenters,int nClusters)
{
//
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
if (clusterIdx >= nClusters)
return;
int k = d_inCenters[clusterIdx].xy.x;
int j = d_inCenters[clusterIdx].xy.y;
float4 crgb = make_float4(0,0,0,0);
float2 cxy = make_float2(0,0);
int n = 0;
//step +
int radius = step;
for (int x = k- radius; x<= k+radius; x++)
{
for(int y = j - radius; y<= j+radius; y++)
{
if (x<0 || x>width-1 || y<0 || y> height-1)
continue;
int idx = x+y*width;
if (d_labels[idx] == clusterIdx)
{
float4 pixel = tex1Dfetch(ImageTexture,idx);
crgb.x += pixel.x;
crgb.y += pixel.y;
crgb.z += pixel.z;
cxy.x += x;
cxy.y += y;
n++;
}
}
}
d_inCenters[clusterIdx].rgb = make_float4(crgb.x/n,crgb.y/n,crgb.z/n,0);
d_inCenters[clusterIdx].xy = make_float2(cxy.x/n,cxy.y/n);
d_inCenters[clusterIdx].nPoints = n;
}
__global__ void InitClustersKernel(float4* imgBuffer, int nHeight, int nWidth, SLICClusterCenter* d_ceneters)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int idx = k+j*nWidth;
if (idx <nHeight*nWidth)
{
d_ceneters[idx].xy.x = k;
d_ceneters[idx].xy.y = j;
d_ceneters[idx].rgb = imgBuffer[idx];
d_ceneters[idx].nPoints = 1;
}
}
__device__ double distance(int x, int y, float4* imgBuffer,int width, int height, float alpha, float radius, int label, SLICClusterCenter* d_ceneters )
{
int idx = x + y*width;
float4 pixel = tex1Dfetch(ImageTexture,idx);
double dr = (pixel.x - d_ceneters[label].rgb.x);
double dg = (pixel.y - d_ceneters[label].rgb.y) ;
double db = (pixel.z - d_ceneters[label].rgb.z);
double d_rgb = sqrt(dr*dr + dg*dg + db*db);
double dx = (x*1.f - d_ceneters[label].xy.x);
double dy = (y*1.f - d_ceneters[label].xy.y);
double d_xy = (dx*dx + dy*dy);
return (1-alpha)*d_rgb + alpha*d_xy/(radius);
}
__global__ void UpdateBoundaryKernel(float4* imgBuffer, int nHeight, int nWidth,int* labels,SLICClusterCenter* d_ceneters, int nClusters,float alpha, float radius)
{
int dx4[4] = {-1, 0, 1, 0,};
int dy4[4] = { 0, -1, 0, 1};
int k = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int mainindex = k+j*nWidth;
int np(0);
int nl[4];
for( int i = 0; i < 4; i++ )
{
int x = k + dx4[i];
int y = j + dy4[i];
if( (x >= 0 && x < nWidth) && (y >= 0 && y < nHeight) )
{
int index = y*nWidth + x;
if( labels[mainindex] != labels[index] )
{
nl[np++] = labels[index];
}
}
}
if( np > 1 )//change to 2 or 3 for thinner lines
{
double min = distance(k,j,imgBuffer,nWidth,nHeight,alpha,radius,labels[mainindex],d_ceneters);
int idx = -1;
for(int i=0; i<np; i++)
{
double dis = distance(k,j,imgBuffer,nWidth,nHeight,alpha,radius,nl[i],d_ceneters);
if (dis < min)
{
min = dis;
idx = i;
}
}
if (idx >=0)
labels[mainindex] = nl[idx];
}
}
__global__ void AvgClusterCenterKernel(SLICClusterCenter* d_cenetersIn, int nClusters)
{
//
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
if (clusterIdx >= nClusters || (d_cenetersIn[clusterIdx].nPoints ==0))
return;
int n = d_cenetersIn[clusterIdx].nPoints;
d_cenetersIn[clusterIdx].rgb.x /= n;
d_cenetersIn[clusterIdx].rgb.y /= n;
d_cenetersIn[clusterIdx].rgb.z /= n;
d_cenetersIn[clusterIdx].xy.x /= n;
d_cenetersIn[clusterIdx].xy.y /= n;
}
__global__ void UpdateBoundaryKernel(float4* imgBuffer, int nHeight, int nWidth,int* labels,SLICClusterCenter* d_ceneters, SLICClusterCenter* d_centersOut, int nClusters,float alpha, float radius)
{
int dx4[4] = {-1, 0, 1, 0,};
int dy4[4] = { 0, -1, 0, 1};
int k = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int mainindex = k+j*nWidth;
if (mainindex > nHeight*nWidth-1)
return;
int np(0);
int nl[4];
for( int i = 0; i < 4; i++ )
{
int x = k + dx4[i];
int y = j + dy4[i];
if( (x >= 0 && x < nWidth) && (y >= 0 && y < nHeight) )
{
int index = y*nWidth + x;
if( labels[mainindex] != labels[index] )
{
nl[np++] = labels[index];
}
}
}
if( np > 0 )//change to 2 or 3 for thinner lines
{
double min = distance(k,j,imgBuffer,nWidth,nHeight,alpha,radius,labels[mainindex],d_ceneters);
int idx = -1;
for(int i=0; i<np; i++)
{
double dis = distance(k,j,imgBuffer,nWidth,nHeight,alpha,radius,nl[i],d_ceneters);
if (dis < min)
{
min = dis;
idx = i;
}
}
if (idx >=0)
labels[mainindex] = nl[idx];
}
/*d_centersOut[labels[mainindex]].nPoints++;
d_centersOut[labels[mainindex]].rgb.x +=imgBuffer[mainindex].x;
d_centersOut[labels[mainindex]].rgb.y +=imgBuffer[mainindex].y;
d_centersOut[labels[mainindex]].rgb.z +=imgBuffer[mainindex].z;
d_centersOut[labels[mainindex]].xy.x +=k;
d_centersOut[labels[mainindex]].xy.y +=j;*/
}
void UpdateClusterCenter(float4* imgBuffer, int height, int width, int step, int * d_labels, SLICClusterCenter* d_inCenters, int nClusters)
{
//GpuTimer timer;
//timer.Start();
dim3 blockDim(128);
dim3 gridDim((nClusters+127)/128);
hipLaunchKernelGGL(( UpdateClusterCenterKernel), dim3(gridDim),dim3(blockDim), 0, 0, imgBuffer, height,width,step,d_labels, d_inCenters,nClusters);
//timer.Stop();
//std::cout<<"UpdateClusterCenter "<<timer.Elapsed()<<std::endl;
}
void InitClusterCenters(float4* d_rgbaBuffer, int* d_labels,int width, int height, int step, int &nSeg, SLICClusterCenter* d_centers)
{
/*GpuTimer timer;
timer.Start();*/
dim3 blockDim = (width+ step-1) / step ;
dim3 gridDim = (height + step -1) / step;
nSeg = blockDim.x * gridDim.x;
hipBindTexture( NULL, ImageTexture,
d_rgbaBuffer,
sizeof(float4)*width*height );
hipLaunchKernelGGL(( InitClusterCentersKernel), dim3(gridDim),dim3(blockDim), 0, 0, d_rgbaBuffer,d_labels,width,height,step,nSeg,d_centers);
/*timer.Stop();
std::cout<<" InitClusterCentersKernel"<<timer.Elapsed()<<std::endl;*/
}
void UpdateBoundary(float4* imgBuffer, int nHeight, int nWidth,int* labels, SLICClusterCenter* d_cenetersIn, SLICClusterCenter* d_centersOut, int nClusters,float alpha, float radius)
{
GpuTimer timer;
timer.Start();
dim3 blockDim(16,16);
dim3 gridDim((nWidth+15)/16,(nHeight+15)/16);
hipLaunchKernelGGL(( UpdateBoundaryKernel), dim3(gridDim),dim3(blockDim), 0, 0, imgBuffer,nHeight,nWidth,labels,d_cenetersIn, d_centersOut,nClusters,alpha,radius);
timer.Stop();
std::cout<<"update UpdateBoundary"<<timer.Elapsed()<<std::endl;
}
void AvgClusterCenter(SLICClusterCenter* d_cenetersIn, int nClusters)
{
GpuTimer timer;
timer.Start();
dim3 blockDim(128);
dim3 gridDim((nClusters+127)/128);
hipLaunchKernelGGL(( AvgClusterCenterKernel), dim3(gridDim),dim3(blockDim), 0, 0, d_cenetersIn, nClusters);
timer.Stop();
std::cout<<"avg cluster center "<<timer.Elapsed()<<std::endl;
}
void UpdateBoundary(float4* imgBuffer, int nHeight, int nWidth,int * labels,SLICClusterCenter* d_centers, int nClusters,float alpha, float radius)
{
//GpuTimer timer;
//timer.Start();
dim3 blockDim(16,16);
dim3 gridDim((nWidth+15)/16,(nHeight+15)/16);
hipLaunchKernelGGL(( UpdateBoundaryKernel), dim3(gridDim),dim3(blockDim), 0, 0, imgBuffer,nHeight,nWidth,labels,d_centers,nClusters,alpha,radius);
//timer.Stop();
//std::cout<<"update UpdateBoundary "<<timer.Elapsed()<<std::endl;
}
void InitConstClusterCenters(float4* imgBuffer, int nHeight, int nWidth,SLICClusterCenter* d_centers_in)
{
dim3 blockDim(16,16);
dim3 gridDim((nWidth+15)/16,(nHeight+15)/16);
hipLaunchKernelGGL(( InitClustersKernel), dim3(gridDim),dim3(blockDim), 0, 0, imgBuffer,nHeight,nWidth,d_centers_in);
}
void UpdateClusters(float4* imgBuffer, int nHeight, int nWidth,int* labels, SLICClusterCenter* d_centers_in,
thrust::device_ptr<int>& outKeyPtr,
thrust::device_ptr<SLICClusterCenter>& outValuePtr,
SLICClusterCenter* d_centers, int& nClusters,int tNClusters)
{
GpuTimer timer;
timer.Start();
int size = nHeight*nWidth;
typedef thrust::device_ptr<int> keyPtr;
typedef thrust::device_ptr<SLICClusterCenter> valuePtr;
valuePtr centersPtr(d_centers);
valuePtr d_ptrV(d_centers_in);
keyPtr d_ptrK(labels);
//std::cout<<"keys in before sort\n";
//thrust::copy(d_ptrK, d_ptrK+size, std::ostream_iterator<int>(std::cout, "\n"));
//std::cout<<"centers in before sort\n";
//thrust::copy(d_ptrV, d_ptrV+size , std::ostream_iterator<SLICClusterCenter>(std::cout, "\n"));
//std::cout<<"sort\n";
thrust::sort_by_key(d_ptrK,d_ptrK+size,d_ptrV);
timer.Stop();
std::cout<<"sort by key "<<timer.Elapsed()<<std::endl;
//std::cout<<"reduce\n";
thrust::pair<keyPtr, valuePtr> new_end;
//std::cout<<"keys in after sort\n";
//thrust::copy(d_ptrK, d_ptrK+size, std::ostream_iterator<int>(std::cout, "\n"));
timer.Start();
//std::cout<<"centers in after sort\n";
//thrust::copy(d_ptrV, d_ptrV+size , std::ostream_iterator<SLICClusterCenter>(std::cout, "\n"));
new_end = thrust::reduce_by_key(d_ptrK,d_ptrK+size,d_ptrV,outKeyPtr,outValuePtr);
//std::cout<<"keys out\n";
//thrust::copy(outKeyPtr, new_end.first , std::ostream_iterator<int>(std::cout, "\n"));
timer.Stop();
std::cout<<"reduce by key "<<timer.Elapsed()<<std::endl;
nClusters = new_end.first - outKeyPtr;
timer.Start();
SLICClusterCenter* d_centersOut = thrust::raw_pointer_cast(outValuePtr);
int * d_keysOut = thrust::raw_pointer_cast(outKeyPtr);;
dim3 blockDim(16,16);
dim3 gridDim((nWidth+15)/16,(nHeight+15)/16);
hipLaunchKernelGGL(( UpdateClustersKernel), dim3(gridDim),dim3(blockDim), 0, 0, nHeight, nWidth, d_keysOut,d_centersOut,d_centers, nClusters, tNClusters);
//std::cout<<"update\n";
/*std::cout << "Number of results are: \n" << new_end.first - outKeyVector.begin() << std::endl;*/
//thrust::device_ptr<SLICClusterCenter> centersoutPtr(d_centers);
//std::cout<<"out centers\n";
//thrust::copy(centersoutPtr, centersoutPtr+nClusters , std::ostream_iterator<SLICClusterCenter>(std::cout, "\n"));
timer.Stop();
std::cout<<"update cluster kernel"<<timer.Elapsed()<<std::endl;
} | 6efd80442c340701bc6216eb4eb681baff0fab79.cu | #include "CudaSuperpixel.h"
#include "cub/cub.cuh"
#include <iostream>
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
#include <thrust/device_vector.h>
#include <thrust/pair.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/fill.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include "GpuTimer.h"
texture<float4> ImageTexture;
__global__ void InitClusterCentersKernel( float4* floatBuffer, int* labels, int nWidth, int nHeight,int step, int nSegs, SLICClusterCenter* vSLICCenterList )
{
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
if (clusterIdx >= nSegs)
return;
int offsetBlock = (blockIdx.x *nWidth + threadIdx.x )* step;
float2 avXY;
avXY.x=threadIdx.x*step + (float)step/2.0;
avXY.y=blockIdx.x*step + (float)step/2.0;
float4 tmp;
tmp.x = 0;
tmp.y =0;
tmp.z = 0;
for(int i=0; i<step; i++)
{
for(int j=0; j<step; j++)
{
if ((threadIdx.x )* step + i >= nWidth ||
(blockIdx.x)*step + j>= nHeight)
continue;
int idx = offsetBlock + i + j*nWidth;
float4 pixel = tex1Dfetch(ImageTexture,idx);
tmp.x += pixel.x;
tmp.y += pixel.y;
tmp.z += pixel.z;
labels[idx] = clusterIdx;
}
}
double sz = step * step;
tmp.x = tmp.x / sz;
tmp.y = tmp.y /sz;
tmp.z = tmp.z/sz;
vSLICCenterList[clusterIdx].rgb= tmp;
vSLICCenterList[clusterIdx].xy=avXY;
vSLICCenterList[clusterIdx].nPoints= (int)sz;
}
__global__ void UpdateClustersKernel(int nHeight, int nWidth, int* keys,SLICClusterCenter* d_inCenters,SLICClusterCenter* d_outCenters, int nClusters,int tNClusters )
{
//每个超像素一个线程
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
if (clusterIdx >= nClusters)
return;
d_outCenters[keys[clusterIdx]] = d_inCenters[clusterIdx] * (1.0/d_inCenters[clusterIdx].nPoints);
}
__global__ void UpdateClusterCenterKernel(int heigth,int width, int step, int* d_labels, SLICClusterCenter* d_inCenters,int nClusters)
{
const int size = 5*5;
__shared__ int sLabels[size];
__shared__ float4 sPixels[size];
__shared__ float4 Rgb[size];
__shared__ float2 XY[size];
__shared__ int flag[size];
int clusterIdx = blockIdx.x;
int cx = d_inCenters[clusterIdx].xy.x;
int cy = d_inCenters[clusterIdx].xy.y;
}
__global__ void UpdateClusterCenterKernel(float4* imgBuffer, int height, int width, int step,int * d_labels, SLICClusterCenter* d_inCenters,int nClusters)
{
//每个超像素一个线程
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
if (clusterIdx >= nClusters)
return;
int k = d_inCenters[clusterIdx].xy.x;
int j = d_inCenters[clusterIdx].xy.y;
float4 crgb = make_float4(0,0,0,0);
float2 cxy = make_float2(0,0);
int n = 0;
//以原来的中心点为中心,step +1 为半径进行更新
int radius = step;
for (int x = k- radius; x<= k+radius; x++)
{
for(int y = j - radius; y<= j+radius; y++)
{
if (x<0 || x>width-1 || y<0 || y> height-1)
continue;
int idx = x+y*width;
if (d_labels[idx] == clusterIdx)
{
float4 pixel = tex1Dfetch(ImageTexture,idx);
crgb.x += pixel.x;
crgb.y += pixel.y;
crgb.z += pixel.z;
cxy.x += x;
cxy.y += y;
n++;
}
}
}
d_inCenters[clusterIdx].rgb = make_float4(crgb.x/n,crgb.y/n,crgb.z/n,0);
d_inCenters[clusterIdx].xy = make_float2(cxy.x/n,cxy.y/n);
d_inCenters[clusterIdx].nPoints = n;
}
__global__ void InitClustersKernel(float4* imgBuffer, int nHeight, int nWidth, SLICClusterCenter* d_ceneters)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int idx = k+j*nWidth;
if (idx <nHeight*nWidth)
{
d_ceneters[idx].xy.x = k;
d_ceneters[idx].xy.y = j;
d_ceneters[idx].rgb = imgBuffer[idx];
d_ceneters[idx].nPoints = 1;
}
}
__device__ double distance(int x, int y, float4* imgBuffer,int width, int height, float alpha, float radius, int label, SLICClusterCenter* d_ceneters )
{
int idx = x + y*width;
float4 pixel = tex1Dfetch(ImageTexture,idx);
double dr = (pixel.x - d_ceneters[label].rgb.x);
double dg = (pixel.y - d_ceneters[label].rgb.y) ;
double db = (pixel.z - d_ceneters[label].rgb.z);
double d_rgb = sqrt(dr*dr + dg*dg + db*db);
double dx = (x*1.f - d_ceneters[label].xy.x);
double dy = (y*1.f - d_ceneters[label].xy.y);
double d_xy = (dx*dx + dy*dy);
return (1-alpha)*d_rgb + alpha*d_xy/(radius);
}
__global__ void UpdateBoundaryKernel(float4* imgBuffer, int nHeight, int nWidth,int* labels,SLICClusterCenter* d_ceneters, int nClusters,float alpha, float radius)
{
int dx4[4] = {-1, 0, 1, 0,};
int dy4[4] = { 0, -1, 0, 1};
int k = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int mainindex = k+j*nWidth;
int np(0);
int nl[4];
for( int i = 0; i < 4; i++ )
{
int x = k + dx4[i];
int y = j + dy4[i];
if( (x >= 0 && x < nWidth) && (y >= 0 && y < nHeight) )
{
int index = y*nWidth + x;
if( labels[mainindex] != labels[index] )
{
nl[np++] = labels[index];
}
}
}
if( np > 1 )//change to 2 or 3 for thinner lines
{
double min = distance(k,j,imgBuffer,nWidth,nHeight,alpha,radius,labels[mainindex],d_ceneters);
int idx = -1;
for(int i=0; i<np; i++)
{
double dis = distance(k,j,imgBuffer,nWidth,nHeight,alpha,radius,nl[i],d_ceneters);
if (dis < min)
{
min = dis;
idx = i;
}
}
if (idx >=0)
labels[mainindex] = nl[idx];
}
}
__global__ void AvgClusterCenterKernel(SLICClusterCenter* d_cenetersIn, int nClusters)
{
//每个超像素一个线程
int clusterIdx=blockIdx.x*blockDim.x+threadIdx.x;
if (clusterIdx >= nClusters || (d_cenetersIn[clusterIdx].nPoints ==0))
return;
int n = d_cenetersIn[clusterIdx].nPoints;
d_cenetersIn[clusterIdx].rgb.x /= n;
d_cenetersIn[clusterIdx].rgb.y /= n;
d_cenetersIn[clusterIdx].rgb.z /= n;
d_cenetersIn[clusterIdx].xy.x /= n;
d_cenetersIn[clusterIdx].xy.y /= n;
}
__global__ void UpdateBoundaryKernel(float4* imgBuffer, int nHeight, int nWidth,int* labels,SLICClusterCenter* d_ceneters, SLICClusterCenter* d_centersOut, int nClusters,float alpha, float radius)
{
int dx4[4] = {-1, 0, 1, 0,};
int dy4[4] = { 0, -1, 0, 1};
int k = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int mainindex = k+j*nWidth;
if (mainindex > nHeight*nWidth-1)
return;
int np(0);
int nl[4];
for( int i = 0; i < 4; i++ )
{
int x = k + dx4[i];
int y = j + dy4[i];
if( (x >= 0 && x < nWidth) && (y >= 0 && y < nHeight) )
{
int index = y*nWidth + x;
if( labels[mainindex] != labels[index] )
{
nl[np++] = labels[index];
}
}
}
if( np > 0 )//change to 2 or 3 for thinner lines
{
double min = distance(k,j,imgBuffer,nWidth,nHeight,alpha,radius,labels[mainindex],d_ceneters);
int idx = -1;
for(int i=0; i<np; i++)
{
double dis = distance(k,j,imgBuffer,nWidth,nHeight,alpha,radius,nl[i],d_ceneters);
if (dis < min)
{
min = dis;
idx = i;
}
}
if (idx >=0)
labels[mainindex] = nl[idx];
}
/*d_centersOut[labels[mainindex]].nPoints++;
d_centersOut[labels[mainindex]].rgb.x +=imgBuffer[mainindex].x;
d_centersOut[labels[mainindex]].rgb.y +=imgBuffer[mainindex].y;
d_centersOut[labels[mainindex]].rgb.z +=imgBuffer[mainindex].z;
d_centersOut[labels[mainindex]].xy.x +=k;
d_centersOut[labels[mainindex]].xy.y +=j;*/
}
void UpdateClusterCenter(float4* imgBuffer, int height, int width, int step, int * d_labels, SLICClusterCenter* d_inCenters, int nClusters)
{
//GpuTimer timer;
//timer.Start();
dim3 blockDim(128);
dim3 gridDim((nClusters+127)/128);
UpdateClusterCenterKernel<<<gridDim,blockDim>>>(imgBuffer, height,width,step,d_labels, d_inCenters,nClusters);
//timer.Stop();
//std::cout<<"UpdateClusterCenter "<<timer.Elapsed()<<std::endl;
}
void InitClusterCenters(float4* d_rgbaBuffer, int* d_labels,int width, int height, int step, int &nSeg, SLICClusterCenter* d_centers)
{
/*GpuTimer timer;
timer.Start();*/
dim3 blockDim = (width+ step-1) / step ;
dim3 gridDim = (height + step -1) / step;
nSeg = blockDim.x * gridDim.x;
cudaBindTexture( NULL, ImageTexture,
d_rgbaBuffer,
sizeof(float4)*width*height );
InitClusterCentersKernel<<<gridDim,blockDim>>>(d_rgbaBuffer,d_labels,width,height,step,nSeg,d_centers);
/*timer.Stop();
std::cout<<" InitClusterCentersKernel"<<timer.Elapsed()<<std::endl;*/
}
void UpdateBoundary(float4* imgBuffer, int nHeight, int nWidth,int* labels, SLICClusterCenter* d_cenetersIn, SLICClusterCenter* d_centersOut, int nClusters,float alpha, float radius)
{
GpuTimer timer;
timer.Start();
dim3 blockDim(16,16);
dim3 gridDim((nWidth+15)/16,(nHeight+15)/16);
UpdateBoundaryKernel<<<gridDim,blockDim>>>(imgBuffer,nHeight,nWidth,labels,d_cenetersIn, d_centersOut,nClusters,alpha,radius);
timer.Stop();
std::cout<<"update UpdateBoundary"<<timer.Elapsed()<<std::endl;
}
void AvgClusterCenter(SLICClusterCenter* d_cenetersIn, int nClusters)
{
GpuTimer timer;
timer.Start();
dim3 blockDim(128);
dim3 gridDim((nClusters+127)/128);
AvgClusterCenterKernel<<<gridDim,blockDim>>>(d_cenetersIn, nClusters);
timer.Stop();
std::cout<<"avg cluster center "<<timer.Elapsed()<<std::endl;
}
void UpdateBoundary(float4* imgBuffer, int nHeight, int nWidth,int * labels,SLICClusterCenter* d_centers, int nClusters,float alpha, float radius)
{
//GpuTimer timer;
//timer.Start();
dim3 blockDim(16,16);
dim3 gridDim((nWidth+15)/16,(nHeight+15)/16);
UpdateBoundaryKernel<<<gridDim,blockDim>>>(imgBuffer,nHeight,nWidth,labels,d_centers,nClusters,alpha,radius);
//timer.Stop();
//std::cout<<"update UpdateBoundary "<<timer.Elapsed()<<std::endl;
}
void InitConstClusterCenters(float4* imgBuffer, int nHeight, int nWidth,SLICClusterCenter* d_centers_in)
{
dim3 blockDim(16,16);
dim3 gridDim((nWidth+15)/16,(nHeight+15)/16);
InitClustersKernel<<<gridDim,blockDim>>>(imgBuffer,nHeight,nWidth,d_centers_in);
}
void UpdateClusters(float4* imgBuffer, int nHeight, int nWidth,int* labels, SLICClusterCenter* d_centers_in,
thrust::device_ptr<int>& outKeyPtr,
thrust::device_ptr<SLICClusterCenter>& outValuePtr,
SLICClusterCenter* d_centers, int& nClusters,int tNClusters)
{
GpuTimer timer;
timer.Start();
int size = nHeight*nWidth;
typedef thrust::device_ptr<int> keyPtr;
typedef thrust::device_ptr<SLICClusterCenter> valuePtr;
valuePtr centersPtr(d_centers);
valuePtr d_ptrV(d_centers_in);
keyPtr d_ptrK(labels);
//std::cout<<"keys in before sort\n";
//thrust::copy(d_ptrK, d_ptrK+size, std::ostream_iterator<int>(std::cout, "\n"));
//std::cout<<"centers in before sort\n";
//thrust::copy(d_ptrV, d_ptrV+size , std::ostream_iterator<SLICClusterCenter>(std::cout, "\n"));
//std::cout<<"sort\n";
thrust::sort_by_key(d_ptrK,d_ptrK+size,d_ptrV);
timer.Stop();
std::cout<<"sort by key "<<timer.Elapsed()<<std::endl;
//std::cout<<"reduce\n";
thrust::pair<keyPtr, valuePtr> new_end;
//std::cout<<"keys in after sort\n";
//thrust::copy(d_ptrK, d_ptrK+size, std::ostream_iterator<int>(std::cout, "\n"));
timer.Start();
//std::cout<<"centers in after sort\n";
//thrust::copy(d_ptrV, d_ptrV+size , std::ostream_iterator<SLICClusterCenter>(std::cout, "\n"));
new_end = thrust::reduce_by_key(d_ptrK,d_ptrK+size,d_ptrV,outKeyPtr,outValuePtr);
//std::cout<<"keys out\n";
//thrust::copy(outKeyPtr, new_end.first , std::ostream_iterator<int>(std::cout, "\n"));
timer.Stop();
std::cout<<"reduce by key "<<timer.Elapsed()<<std::endl;
nClusters = new_end.first - outKeyPtr;
timer.Start();
SLICClusterCenter* d_centersOut = thrust::raw_pointer_cast(outValuePtr);
int * d_keysOut = thrust::raw_pointer_cast(outKeyPtr);;
dim3 blockDim(16,16);
dim3 gridDim((nWidth+15)/16,(nHeight+15)/16);
UpdateClustersKernel<<<gridDim,blockDim>>>(nHeight, nWidth, d_keysOut,d_centersOut,d_centers, nClusters, tNClusters);
//std::cout<<"update\n";
/*std::cout << "Number of results are: \n" << new_end.first - outKeyVector.begin() << std::endl;*/
//thrust::device_ptr<SLICClusterCenter> centersoutPtr(d_centers);
//std::cout<<"out centers\n";
//thrust::copy(centersoutPtr, centersoutPtr+nClusters , std::ostream_iterator<SLICClusterCenter>(std::cout, "\n"));
timer.Stop();
std::cout<<"update cluster kernel"<<timer.Elapsed()<<std::endl;
} |
2f919b3bf02af0359ad2cd7757cc866dd3f90b61.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void j2d25pt (double * __restrict__ l_in, double * __restrict__ l_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x);
int i = max(i0,2) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y);
int j = max(j0,2) + 4*(int)(threadIdx.y);
double (*in)[8196] = (double (*)[8196]) l_in;
double (*out)[8196] = (double (*)[8196]) l_out;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma begin stencil1 unroll j=4,i=1
out[j][i] = 0.1*(in[j-2][i-2] + in[j-2][i+2] + in[j+2][i-2] + in[j+2][i+2]) +
0.2*(in[j-2][i-1] + in[j-2][i+1] + in[j+2][i-1] + in[j+2][i+1]) +
0.3*(in[j-2][i] + in[j+2][i]) +
1.1*(in[j-1][i-2] + in[j-1][i+2] + in[j+1][i-2] + in[j+1][i+2]) +
1.2*(in[j-1][i-1] + in[j-1][i+1] + in[j+1][i-1] + in[j+1][i+1]) +
1.3*(in[j-1][i] + in[j+1][i]) +
2.1*(in[j][i-2] + in[j][i+2]) +
2.2*(in[j][i-1] + in[j][i+1]) +
2.3*in[j][i];
#pragma end stencil1
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
hipMalloc (&in, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for in\n");
hipMemcpy (in, h_in, sizeof(double)*N*N, hipMemcpyHostToDevice);
double *out;
hipMalloc (&out, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, 4*blockconfig.y));
hipLaunchKernelGGL(( j2d25pt), dim3(gridconfig), dim3(blockconfig), 0, 0, in, out, N);
hipMemcpy (h_out, out, sizeof(double)*N*N, hipMemcpyDeviceToHost);
hipFree (in);
hipFree (out);
}
| 2f919b3bf02af0359ad2cd7757cc866dd3f90b61.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void j2d25pt (double * __restrict__ l_in, double * __restrict__ l_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x);
int i = max(i0,2) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y);
int j = max(j0,2) + 4*(int)(threadIdx.y);
double (*in)[8196] = (double (*)[8196]) l_in;
double (*out)[8196] = (double (*)[8196]) l_out;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma begin stencil1 unroll j=4,i=1
out[j][i] = 0.1*(in[j-2][i-2] + in[j-2][i+2] + in[j+2][i-2] + in[j+2][i+2]) +
0.2*(in[j-2][i-1] + in[j-2][i+1] + in[j+2][i-1] + in[j+2][i+1]) +
0.3*(in[j-2][i] + in[j+2][i]) +
1.1*(in[j-1][i-2] + in[j-1][i+2] + in[j+1][i-2] + in[j+1][i+2]) +
1.2*(in[j-1][i-1] + in[j-1][i+1] + in[j+1][i-1] + in[j+1][i+1]) +
1.3*(in[j-1][i] + in[j+1][i]) +
2.1*(in[j][i-2] + in[j][i+2]) +
2.2*(in[j][i-1] + in[j][i+1]) +
2.3*in[j][i];
#pragma end stencil1
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
cudaMalloc (&in, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for in\n");
cudaMemcpy (in, h_in, sizeof(double)*N*N, cudaMemcpyHostToDevice);
double *out;
cudaMalloc (&out, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, 4*blockconfig.y));
j2d25pt<<<gridconfig, blockconfig>>> (in, out, N);
cudaMemcpy (h_out, out, sizeof(double)*N*N, cudaMemcpyDeviceToHost);
cudaFree (in);
cudaFree (out);
}
|
beb8d98f703a704fbfae591113cfde8b888fac42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "cuda_binomial.cuh"
#include "../option_enum.h"
#include <iostream>
using namespace std;
__global__
void calcEuropeanOption(int timeSteps,
double startPrice,
double strikePrice,
double riskFree,
double delta,
double u,
double p_u,
double * cache,
int callPutModifier) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > timeSteps) return;
int colDim = timeSteps + 1;
cache[timeSteps * colDim + i] = max(callPutModifier * (startPrice * pow(u, 2 * i - timeSteps) - strikePrice), 0.0);
timeSteps--;
while (timeSteps >= i) {
__syncthreads();
cache[timeSteps * colDim + i] = (p_u * cache[(timeSteps + 1) * colDim + i + 1] +
(1 - p_u) * cache[(timeSteps + 1) * colDim + i ]) * exp(-riskFree * delta);
timeSteps--;
// new gpu doesn't syncthreads properly?? need to investigate
for (int j = 0; j < 100; ++j) {
cache[1] += 0.0001;
}
}
}
__global__
void calcAmericanOption(int timeSteps,
double startPrice,
double strikePrice,
double riskFree,
double delta,
double u,
double p_u,
double * cache,
int callPutModifier) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > timeSteps) return;
int colDim = timeSteps + 1;
cache[timeSteps * colDim + i] = max(callPutModifier * (startPrice * pow(u, 2 * i - timeSteps) - strikePrice), 0.0);
timeSteps--;
while (timeSteps >= i) {
__syncthreads();
cache[timeSteps * colDim + i] = max((p_u * cache[(timeSteps + 1) * colDim + i + 1] +
(1 - p_u) * cache[(timeSteps + 1) * colDim + i ]) * exp(-riskFree * delta),
callPutModifier * (startPrice * pow(u, 2 * i - timeSteps) - strikePrice));
timeSteps--;
}
}
LatticeBinomialCuda::LatticeBinomialCuda(int timeSteps) : AbstractValuation("LatticeCuda"), timeSteps{timeSteps} {}
LatticeBinomialCuda::~LatticeBinomialCuda() {}
double LatticeBinomialCuda::calcPrice(Option & opt) {
double delta = opt.timeToExpiry / timeSteps;
double u = exp(opt.vol * sqrt(delta));
double p_u = (exp(opt.riskFree * delta) - 1/u) / (u - 1/u);
int callPutModifier = opt.otype == OptionType::Call ? 1 : -1;
int N = timeSteps + 1;
double * d_cache;
hipMalloc(&d_cache, N * N * sizeof(double));
if (opt.ostyle == OptionStyle::European) {
hipLaunchKernelGGL(( calcEuropeanOption), dim3((timeSteps + 255)/256), dim3(256), 0, 0, timeSteps, opt.startPrice, opt.strikePrice,
opt.riskFree, delta, u, p_u, d_cache, callPutModifier);
} else if (opt.ostyle == OptionStyle::American) {
hipLaunchKernelGGL(( calcAmericanOption), dim3((timeSteps + 255)/256), dim3(256), 0, 0, timeSteps, opt.startPrice, opt.strikePrice,
opt.riskFree, delta, u, p_u, d_cache, callPutModifier);
}
double finalPrice;
hipMemcpy(&finalPrice, d_cache, sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_cache);
return finalPrice;
} | beb8d98f703a704fbfae591113cfde8b888fac42.cu | #include <algorithm>
#include "cuda_binomial.cuh"
#include "../option_enum.h"
#include <iostream>
using namespace std;
__global__
void calcEuropeanOption(int timeSteps,
double startPrice,
double strikePrice,
double riskFree,
double delta,
double u,
double p_u,
double * cache,
int callPutModifier) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > timeSteps) return;
int colDim = timeSteps + 1;
cache[timeSteps * colDim + i] = max(callPutModifier * (startPrice * pow(u, 2 * i - timeSteps) - strikePrice), 0.0);
timeSteps--;
while (timeSteps >= i) {
__syncthreads();
cache[timeSteps * colDim + i] = (p_u * cache[(timeSteps + 1) * colDim + i + 1] +
(1 - p_u) * cache[(timeSteps + 1) * colDim + i ]) * exp(-riskFree * delta);
timeSteps--;
// new gpu doesn't syncthreads properly?? need to investigate
for (int j = 0; j < 100; ++j) {
cache[1] += 0.0001;
}
}
}
__global__
void calcAmericanOption(int timeSteps,
double startPrice,
double strikePrice,
double riskFree,
double delta,
double u,
double p_u,
double * cache,
int callPutModifier) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i > timeSteps) return;
int colDim = timeSteps + 1;
cache[timeSteps * colDim + i] = max(callPutModifier * (startPrice * pow(u, 2 * i - timeSteps) - strikePrice), 0.0);
timeSteps--;
while (timeSteps >= i) {
__syncthreads();
cache[timeSteps * colDim + i] = max((p_u * cache[(timeSteps + 1) * colDim + i + 1] +
(1 - p_u) * cache[(timeSteps + 1) * colDim + i ]) * exp(-riskFree * delta),
callPutModifier * (startPrice * pow(u, 2 * i - timeSteps) - strikePrice));
timeSteps--;
}
}
LatticeBinomialCuda::LatticeBinomialCuda(int timeSteps) : AbstractValuation("LatticeCuda"), timeSteps{timeSteps} {}
LatticeBinomialCuda::~LatticeBinomialCuda() {}
double LatticeBinomialCuda::calcPrice(Option & opt) {
double delta = opt.timeToExpiry / timeSteps;
double u = exp(opt.vol * sqrt(delta));
double p_u = (exp(opt.riskFree * delta) - 1/u) / (u - 1/u);
int callPutModifier = opt.otype == OptionType::Call ? 1 : -1;
int N = timeSteps + 1;
double * d_cache;
cudaMalloc(&d_cache, N * N * sizeof(double));
if (opt.ostyle == OptionStyle::European) {
calcEuropeanOption<<<(timeSteps + 255)/256, 256>>>(timeSteps, opt.startPrice, opt.strikePrice,
opt.riskFree, delta, u, p_u, d_cache, callPutModifier);
} else if (opt.ostyle == OptionStyle::American) {
calcAmericanOption<<<(timeSteps + 255)/256, 256>>>(timeSteps, opt.startPrice, opt.strikePrice,
opt.riskFree, delta, u, p_u, d_cache, callPutModifier);
}
double finalPrice;
cudaMemcpy(&finalPrice, d_cache, sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_cache);
return finalPrice;
} |
47ade4a5f1b812f19839d9594f4bb7883f6f5e1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void findRadixOffsets(uint2* keys, uint* counters, uint* blockOffsets, uint startbit, uint numElements, uint totalBlocks)
{
__shared__ uint sStartPointers[16];
extern __shared__ uint sRadix1[];
uint groupId = blockIdx.x;
uint localId = threadIdx.x;
uint groupSize = blockDim.x;
uint2 radix2;
radix2 = keys[threadIdx.x + (blockIdx.x * blockDim.x)];
sRadix1[2 * localId] = (radix2.x >> startbit) & 0xF;
sRadix1[2 * localId + 1] = (radix2.y >> startbit) & 0xF;
// Finds the position where the sRadix1 entries differ and stores start
// index for each radix.
if(localId < 16)
{
sStartPointers[localId] = 0;
}
__syncthreads();
if((localId > 0) && (sRadix1[localId] != sRadix1[localId - 1]) )
{
sStartPointers[sRadix1[localId]] = localId;
}
if(sRadix1[localId + groupSize] != sRadix1[localId + groupSize - 1])
{
sStartPointers[sRadix1[localId + groupSize]] = localId + groupSize;
}
__syncthreads();
if(localId < 16)
{
blockOffsets[groupId*16 + localId] = sStartPointers[localId];
}
__syncthreads();
// Compute the sizes of each block.
if((localId > 0) && (sRadix1[localId] != sRadix1[localId - 1]) )
{
sStartPointers[sRadix1[localId - 1]] =
localId - sStartPointers[sRadix1[localId - 1]];
}
if(sRadix1[localId + groupSize] != sRadix1[localId + groupSize - 1] )
{
sStartPointers[sRadix1[localId + groupSize - 1]] =
localId + groupSize - sStartPointers[sRadix1[localId +
groupSize - 1]];
}
if(localId == groupSize - 1)
{
sStartPointers[sRadix1[2 * groupSize - 1]] =
2 * groupSize - sStartPointers[sRadix1[2 * groupSize - 1]];
}
__syncthreads();
if(localId < 16)
{
counters[localId * totalBlocks + groupId] = sStartPointers[localId];
}
} | 47ade4a5f1b812f19839d9594f4bb7883f6f5e1c.cu | #include "includes.h"
__global__ void findRadixOffsets(uint2* keys, uint* counters, uint* blockOffsets, uint startbit, uint numElements, uint totalBlocks)
{
__shared__ uint sStartPointers[16];
extern __shared__ uint sRadix1[];
uint groupId = blockIdx.x;
uint localId = threadIdx.x;
uint groupSize = blockDim.x;
uint2 radix2;
radix2 = keys[threadIdx.x + (blockIdx.x * blockDim.x)];
sRadix1[2 * localId] = (radix2.x >> startbit) & 0xF;
sRadix1[2 * localId + 1] = (radix2.y >> startbit) & 0xF;
// Finds the position where the sRadix1 entries differ and stores start
// index for each radix.
if(localId < 16)
{
sStartPointers[localId] = 0;
}
__syncthreads();
if((localId > 0) && (sRadix1[localId] != sRadix1[localId - 1]) )
{
sStartPointers[sRadix1[localId]] = localId;
}
if(sRadix1[localId + groupSize] != sRadix1[localId + groupSize - 1])
{
sStartPointers[sRadix1[localId + groupSize]] = localId + groupSize;
}
__syncthreads();
if(localId < 16)
{
blockOffsets[groupId*16 + localId] = sStartPointers[localId];
}
__syncthreads();
// Compute the sizes of each block.
if((localId > 0) && (sRadix1[localId] != sRadix1[localId - 1]) )
{
sStartPointers[sRadix1[localId - 1]] =
localId - sStartPointers[sRadix1[localId - 1]];
}
if(sRadix1[localId + groupSize] != sRadix1[localId + groupSize - 1] )
{
sStartPointers[sRadix1[localId + groupSize - 1]] =
localId + groupSize - sStartPointers[sRadix1[localId +
groupSize - 1]];
}
if(localId == groupSize - 1)
{
sStartPointers[sRadix1[2 * groupSize - 1]] =
2 * groupSize - sStartPointers[sRadix1[2 * groupSize - 1]];
}
__syncthreads();
if(localId < 16)
{
counters[localId * totalBlocks + groupId] = sStartPointers[localId];
}
} |
584f7a1a3a08b984c8582511e648f70906555bea.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_tgammaf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_tgammaf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_tgammaf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_tgammaf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 584f7a1a3a08b984c8582511e648f70906555bea.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_tgammaf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_tgammaf<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_tgammaf<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_tgammaf<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e7465bafb5748cff926dc55a4f511ff523943e60.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "blur.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
int *G = NULL;
hipMalloc(&G, XSIZE*YSIZE);
int *R = NULL;
hipMalloc(&R, XSIZE*YSIZE);
int *RB = NULL;
hipMalloc(&RB, XSIZE*YSIZE);
int *RG = NULL;
hipMalloc(&RG, XSIZE*YSIZE);
int *RR = NULL;
hipMalloc(&RR, XSIZE*YSIZE);
int *K = NULL;
hipMalloc(&K, XSIZE*YSIZE);
int rows = XSIZE;
int cols = YSIZE;
int krows = 1;
int kcols = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
blur), dim3(gridBlock),dim3(threadBlock), 0, 0, B,G,R,RB,RG,RR,K,rows,cols,krows,kcols);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
blur), dim3(gridBlock),dim3(threadBlock), 0, 0, B,G,R,RB,RG,RR,K,rows,cols,krows,kcols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
blur), dim3(gridBlock),dim3(threadBlock), 0, 0, B,G,R,RB,RG,RR,K,rows,cols,krows,kcols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e7465bafb5748cff926dc55a4f511ff523943e60.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "blur.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
int *G = NULL;
cudaMalloc(&G, XSIZE*YSIZE);
int *R = NULL;
cudaMalloc(&R, XSIZE*YSIZE);
int *RB = NULL;
cudaMalloc(&RB, XSIZE*YSIZE);
int *RG = NULL;
cudaMalloc(&RG, XSIZE*YSIZE);
int *RR = NULL;
cudaMalloc(&RR, XSIZE*YSIZE);
int *K = NULL;
cudaMalloc(&K, XSIZE*YSIZE);
int rows = XSIZE;
int cols = YSIZE;
int krows = 1;
int kcols = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
blur<<<gridBlock,threadBlock>>>(B,G,R,RB,RG,RR,K,rows,cols,krows,kcols);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
blur<<<gridBlock,threadBlock>>>(B,G,R,RB,RG,RR,K,rows,cols,krows,kcols);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
blur<<<gridBlock,threadBlock>>>(B,G,R,RB,RG,RR,K,rows,cols,krows,kcols);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.