hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
37d7a5a594a2c29834fde36b69be874e259a1ce9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <cstdio>
#include <ctime>
#include <cstdlib>
#include <cmath>
#include "support.h"
#include "support.cu"
#define PI 3.1415926535897932384626433832795029f
#define PIx2 6.2831853071795864769252867665590058f
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define K_ELEMS_PER_GRID 2048
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
// __constant__ struct kValues kVals_c[3072];
__global__ void SampleAll(int M, float* rPhi, float* iPhi, float* __restrict__ phiMag) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < M) {
float real = rPhi[tid];
float imag = iPhi[tid];
phiMag[tid] = real*real + imag*imag;
}
}
// inline
extern "C" void ComputePhiMag_GPU(int numK, float* phiR, float* phiI,
float* __restrict__ phiMag) {
int blk_num;
const unsigned int blksize = 1024;
blk_num = (numK - 1)/blksize + 1;
float *A_d, *B_d, *C_d;
Timer timer;
// hipError_t cuda_ret;
startTime(&timer);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables...\n"); fflush(stdout);
hipMalloc((void**) &A_d, sizeof(float)*numK );
hipMalloc((void**) &B_d, sizeof(float)*numK );
hipMalloc((void**) &C_d, sizeof(float)*numK );
hipDeviceSynchronize();
// Copy host variables to device
printf("Copying data from host to device...\n"); fflush(stdout);
hipMemcpy(A_d, phiR, sizeof(float)*numK, hipMemcpyHostToDevice );
hipMemcpy(B_d, phiI, sizeof(float)*numK, hipMemcpyHostToDevice );
hipDeviceSynchronize();
stopTime(&timer); printf("Coping data time: %f s\n", elapsedTime(timer));
//int indexK = 0; // indexK is m, numK is number of samples, 2048 for 64
// for (indexK = 0; indexK < numK; indexK++) {
// float real = phiR[indexK];
// float imag = phiI[indexK];
// phiMag[indexK] = real*real + imag*imag;
// }
// at each sample point m or indexK
printf("Launching kernel...\n"); fflush(stdout);
startTime(&timer);
hipLaunchKernelGGL(( SampleAll) , dim3(blk_num), dim3(blksize), 0, 0, numK, A_d, B_d, C_d);
hipDeviceSynchronize();
stopTime(&timer); printf("ComputePhiMag_GPU: %f s\n", elapsedTime(timer));
// Copy device variables to host
hipMemcpy(phiMag, C_d, sizeof(float)*numK, hipMemcpyDeviceToHost );
hipDeviceSynchronize();
// Free memory
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
//stopTime(&timer); printf("ComputePhiMag_GPU: %f s\n", elapsedTime(timer));
}
// __global__ void cmpQ(int numK, int numX,
// float* gx, float* gy, float* gz,
// float *__restrict__ Qr, float *__restrict__ Qi) {
// // __shared__ float ds_kVals[sizeof(kVals)];
// float expArg;
// float cosArg;
// float sinArg;
// // find the index of voxel assigned to this thread
// //threadIdx.x + blockDim.x * blockIdx.x;
// int n = blockIdx.x * blockDim.x + threadIdx.x;
// // register allocate voxel inputs and outputs
// if(n < numX) {
// float x = gx[n];
// float y = gy[n];
// float z = gz[n];
// float Qracc = 0.0f;
// float Qiacc = 0.0f;
// // m is indexK
// for(int m = 0; m < numK; m++) {
// // better to store sample data kVals[] in constant memory
// expArg = PIx2 * (kVals_c[m].Kx * x +
// kVals_c[m].Ky * y +
// kVals_c[m].Kz * z);
// cosArg = cosf(expArg);
// sinArg = sinf(expArg);
// float phi = kVals_c[m].PhiMag;
// Qracc += phi * cosArg;
// Qiacc += phi * sinArg;
// }
// __syncthreads();
// Qr[n] = Qracc;
// Qi[n] = Qiacc;
// }
// }
__global__ void cmpQ(int numK, int numX, struct kValues *kVs,
float* gx, float* gy, float* gz,
float *__restrict__ Qr, float *__restrict__ Qi) {
__shared__ struct kValues kVals[3072];
float expArg;
float cosArg;
float sinArg;
// find the index of voxel assigned to this thread
//threadIdx.x + blockDim.x * blockIdx.x;
int n = blockIdx.x * blockDim.x + threadIdx.x;
for (int ii = 0; threadIdx.x + ii*blockDim.x < numK; ii++) {
kVals[threadIdx.x + ii*blockDim.x] = kVs[threadIdx.x + ii*blockDim.x];
}
__syncthreads();
// register allocate voxel inputs and outputs
if(n < numX) {
float x = gx[n];
float y = gy[n];
float z = gz[n];
float Qracc = 0.0f;
float Qiacc = 0.0f;
// m is indexK
// if (n < numK) {
// printf("%d: kVs.Ky = %f ; kVals.Ky = %f \n",n,kVs[n].Ky, kVals[n].Ky);
// }
for(int m = 0; m < numK; m++) {
// better to store sample data kVals[] in constant memory
expArg = PIx2 * (kVs[m].Kx * x +
kVs[m].Ky * y +
kVs[m].Kz * z);
cosArg = cosf(expArg);
sinArg = sinf(expArg);
float phi = kVs[m].PhiMag;
Qracc += phi * cosArg;
Qiacc += phi * sinArg;
// if(n==2222) {
// printf(" %d: kVs.Kz = %f ; kVals.Kz = %f \n",m,kVs[m].Kz, kVals[m].Kz);
// }
}
__syncthreads();
Qr[n] = Qracc;
Qi[n] = Qiacc;
// if(n==2222) {
// printf(" %d: kVs.Ky = %f ; kVals.Ky = %f \n",n,kVs[n].Ky, kVals[n].Ky);
// printf("Qr[n] = %f, Qi[n] = %f, %d \n",Qr[n], Qi[n],n);
// }
}
// printf("*******************************************************\n");
// if (n < numK) {
// printf(" kVs.Ky = %f ; kVals.Ky = %f \n",kVs[n].Ky, kVals[n].Ky);
// }
}
extern "C" void ComputeQ_GPU(int numK, int numX, struct kValues *kVals,
float* x, float* y, float* z,
float *__restrict__ Qr, float *__restrict__ Qi) {
int blk_num;
const unsigned int blksize = 1024;
blk_num = (numX - 1)/blksize + 1;
float *x_d, *y_d, *z_d;
float *__restrict__ Qr_d;
float *__restrict__ Qi_d;
struct kValues *kVals_d;
Timer timer;
startTime(&timer);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables...\n"); fflush(stdout);
hipMalloc((void**) &x_d, sizeof(float)*numX );
hipMalloc((void**) &y_d, sizeof(float)*numX );
hipMalloc((void**) &z_d, sizeof(float)*numX );
hipMalloc((void**) &kVals_d, sizeof(struct kValues)*numK );
hipMalloc((void**) &Qr_d, sizeof(float)*numX );
hipMalloc((void**) &Qi_d, sizeof(float)*numX );
hipDeviceSynchronize();
// Copy host variables to device
printf("Copying data from host to device...\n"); fflush(stdout);
hipMemcpy(x_d, x, sizeof(float)*numX, hipMemcpyHostToDevice );
hipMemcpy(y_d, y, sizeof(float)*numX, hipMemcpyHostToDevice );
hipMemcpy(z_d, z, sizeof(float)*numX, hipMemcpyHostToDevice );
hipMemcpy(kVals_d, kVals, sizeof(struct kValues)*numK, hipMemcpyHostToDevice);
// hipMemcpyToSymbol(kVals_c, kVals, sizeof(struct kValues)*numK, hipMemcpyHostToDevice);
hipDeviceSynchronize();
stopTime(&timer); printf("Coping data to device time: %f s\n", elapsedTime(timer));
// Launch a kernel
printf("Launching kernel...\n"); fflush(stdout);
startTime(&timer);
hipLaunchKernelGGL(( cmpQ) , dim3(blk_num), dim3(blksize), 0, 0, numK, numX, kVals_d, x_d, y_d, z_d, Qr_d, Qi_d);
//hipLaunchKernelGGL(( cmpQ) , dim3(blk_num), dim3(blksize), 0, 0, numK, numX, x_d, y_d, z_d, Qr_d, Qi_d);
hipDeviceSynchronize();
stopTime(&timer); printf("ComputeQ_GPU kernel time: %f s\n", elapsedTime(timer));
// Copy device variables to host
startTime(&timer);
hipMemcpy(Qr, Qr_d, sizeof(float)*numX, hipMemcpyDeviceToHost );
hipMemcpy(Qi, Qi_d, sizeof(float)*numX, hipMemcpyDeviceToHost );
hipDeviceSynchronize();
// Free memory
hipFree(x_d);
hipFree(y_d);
hipFree(z_d);
hipFree(kVals_d);
// hipFree(kVals_c);
hipFree(Qr_d);
hipFree(Qi_d);
stopTime(&timer); printf("Copying data back time: %f s\n", elapsedTime(timer));
}
| 37d7a5a594a2c29834fde36b69be874e259a1ce9.cu | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <cstdio>
#include <ctime>
#include <cstdlib>
#include <cmath>
#include "support.h"
#include "support.cu"
#define PI 3.1415926535897932384626433832795029f
#define PIx2 6.2831853071795864769252867665590058f
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define K_ELEMS_PER_GRID 2048
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
// __constant__ struct kValues kVals_c[3072];
__global__ void SampleAll(int M, float* rPhi, float* iPhi, float* __restrict__ phiMag) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < M) {
float real = rPhi[tid];
float imag = iPhi[tid];
phiMag[tid] = real*real + imag*imag;
}
}
// inline
extern "C" void ComputePhiMag_GPU(int numK, float* phiR, float* phiI,
float* __restrict__ phiMag) {
int blk_num;
const unsigned int blksize = 1024;
blk_num = (numK - 1)/blksize + 1;
float *A_d, *B_d, *C_d;
Timer timer;
// cudaError_t cuda_ret;
startTime(&timer);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables...\n"); fflush(stdout);
cudaMalloc((void**) &A_d, sizeof(float)*numK );
cudaMalloc((void**) &B_d, sizeof(float)*numK );
cudaMalloc((void**) &C_d, sizeof(float)*numK );
cudaDeviceSynchronize();
// Copy host variables to device
printf("Copying data from host to device...\n"); fflush(stdout);
cudaMemcpy(A_d, phiR, sizeof(float)*numK, cudaMemcpyHostToDevice );
cudaMemcpy(B_d, phiI, sizeof(float)*numK, cudaMemcpyHostToDevice );
cudaDeviceSynchronize();
stopTime(&timer); printf("Coping data time: %f s\n", elapsedTime(timer));
//int indexK = 0; // indexK is m, numK is number of samples, 2048 for 64
// for (indexK = 0; indexK < numK; indexK++) {
// float real = phiR[indexK];
// float imag = phiI[indexK];
// phiMag[indexK] = real*real + imag*imag;
// }
// at each sample point m or indexK
printf("Launching kernel...\n"); fflush(stdout);
startTime(&timer);
SampleAll <<<blk_num, blksize>>> (numK, A_d, B_d, C_d);
cudaDeviceSynchronize();
stopTime(&timer); printf("ComputePhiMag_GPU: %f s\n", elapsedTime(timer));
// Copy device variables to host
cudaMemcpy(phiMag, C_d, sizeof(float)*numK, cudaMemcpyDeviceToHost );
cudaDeviceSynchronize();
// Free memory
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
//stopTime(&timer); printf("ComputePhiMag_GPU: %f s\n", elapsedTime(timer));
}
// __global__ void cmpQ(int numK, int numX,
// float* gx, float* gy, float* gz,
// float *__restrict__ Qr, float *__restrict__ Qi) {
// // __shared__ float ds_kVals[sizeof(kVals)];
// float expArg;
// float cosArg;
// float sinArg;
// // find the index of voxel assigned to this thread
// //threadIdx.x + blockDim.x * blockIdx.x;
// int n = blockIdx.x * blockDim.x + threadIdx.x;
// // register allocate voxel inputs and outputs
// if(n < numX) {
// float x = gx[n];
// float y = gy[n];
// float z = gz[n];
// float Qracc = 0.0f;
// float Qiacc = 0.0f;
// // m is indexK
// for(int m = 0; m < numK; m++) {
// // better to store sample data kVals[] in constant memory
// expArg = PIx2 * (kVals_c[m].Kx * x +
// kVals_c[m].Ky * y +
// kVals_c[m].Kz * z);
// cosArg = cosf(expArg);
// sinArg = sinf(expArg);
// float phi = kVals_c[m].PhiMag;
// Qracc += phi * cosArg;
// Qiacc += phi * sinArg;
// }
// __syncthreads();
// Qr[n] = Qracc;
// Qi[n] = Qiacc;
// }
// }
__global__ void cmpQ(int numK, int numX, struct kValues *kVs,
float* gx, float* gy, float* gz,
float *__restrict__ Qr, float *__restrict__ Qi) {
__shared__ struct kValues kVals[3072];
float expArg;
float cosArg;
float sinArg;
// find the index of voxel assigned to this thread
//threadIdx.x + blockDim.x * blockIdx.x;
int n = blockIdx.x * blockDim.x + threadIdx.x;
for (int ii = 0; threadIdx.x + ii*blockDim.x < numK; ii++) {
kVals[threadIdx.x + ii*blockDim.x] = kVs[threadIdx.x + ii*blockDim.x];
}
__syncthreads();
// register allocate voxel inputs and outputs
if(n < numX) {
float x = gx[n];
float y = gy[n];
float z = gz[n];
float Qracc = 0.0f;
float Qiacc = 0.0f;
// m is indexK
// if (n < numK) {
// printf("%d: kVs.Ky = %f ; kVals.Ky = %f \n",n,kVs[n].Ky, kVals[n].Ky);
// }
for(int m = 0; m < numK; m++) {
// better to store sample data kVals[] in constant memory
expArg = PIx2 * (kVs[m].Kx * x +
kVs[m].Ky * y +
kVs[m].Kz * z);
cosArg = cosf(expArg);
sinArg = sinf(expArg);
float phi = kVs[m].PhiMag;
Qracc += phi * cosArg;
Qiacc += phi * sinArg;
// if(n==2222) {
// printf(" %d: kVs.Kz = %f ; kVals.Kz = %f \n",m,kVs[m].Kz, kVals[m].Kz);
// }
}
__syncthreads();
Qr[n] = Qracc;
Qi[n] = Qiacc;
// if(n==2222) {
// printf(" %d: kVs.Ky = %f ; kVals.Ky = %f \n",n,kVs[n].Ky, kVals[n].Ky);
// printf("Qr[n] = %f, Qi[n] = %f, %d \n",Qr[n], Qi[n],n);
// }
}
// printf("*******************************************************\n");
// if (n < numK) {
// printf(" kVs.Ky = %f ; kVals.Ky = %f \n",kVs[n].Ky, kVals[n].Ky);
// }
}
extern "C" void ComputeQ_GPU(int numK, int numX, struct kValues *kVals,
float* x, float* y, float* z,
float *__restrict__ Qr, float *__restrict__ Qi) {
int blk_num;
const unsigned int blksize = 1024;
blk_num = (numX - 1)/blksize + 1;
float *x_d, *y_d, *z_d;
float *__restrict__ Qr_d;
float *__restrict__ Qi_d;
struct kValues *kVals_d;
Timer timer;
startTime(&timer);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables...\n"); fflush(stdout);
cudaMalloc((void**) &x_d, sizeof(float)*numX );
cudaMalloc((void**) &y_d, sizeof(float)*numX );
cudaMalloc((void**) &z_d, sizeof(float)*numX );
cudaMalloc((void**) &kVals_d, sizeof(struct kValues)*numK );
cudaMalloc((void**) &Qr_d, sizeof(float)*numX );
cudaMalloc((void**) &Qi_d, sizeof(float)*numX );
cudaDeviceSynchronize();
// Copy host variables to device
printf("Copying data from host to device...\n"); fflush(stdout);
cudaMemcpy(x_d, x, sizeof(float)*numX, cudaMemcpyHostToDevice );
cudaMemcpy(y_d, y, sizeof(float)*numX, cudaMemcpyHostToDevice );
cudaMemcpy(z_d, z, sizeof(float)*numX, cudaMemcpyHostToDevice );
cudaMemcpy(kVals_d, kVals, sizeof(struct kValues)*numK, cudaMemcpyHostToDevice);
// cudaMemcpyToSymbol(kVals_c, kVals, sizeof(struct kValues)*numK, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
stopTime(&timer); printf("Coping data to device time: %f s\n", elapsedTime(timer));
// Launch a kernel
printf("Launching kernel...\n"); fflush(stdout);
startTime(&timer);
cmpQ <<<blk_num, blksize>>> (numK, numX, kVals_d, x_d, y_d, z_d, Qr_d, Qi_d);
// cmpQ <<<blk_num, blksize>>> (numK, numX, x_d, y_d, z_d, Qr_d, Qi_d);
cudaDeviceSynchronize();
stopTime(&timer); printf("ComputeQ_GPU kernel time: %f s\n", elapsedTime(timer));
// Copy device variables to host
startTime(&timer);
cudaMemcpy(Qr, Qr_d, sizeof(float)*numX, cudaMemcpyDeviceToHost );
cudaMemcpy(Qi, Qi_d, sizeof(float)*numX, cudaMemcpyDeviceToHost );
cudaDeviceSynchronize();
// Free memory
cudaFree(x_d);
cudaFree(y_d);
cudaFree(z_d);
cudaFree(kVals_d);
// cudaFree(kVals_c);
cudaFree(Qr_d);
cudaFree(Qi_d);
stopTime(&timer); printf("Copying data back time: %f s\n", elapsedTime(timer));
}
|
3f61a5f2257c717c1a831f80ab724699e7885d5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_multidim_copy_kernel;
int xdim0_multidim_copy_kernel_h = -1;
__constant__ int ydim0_multidim_copy_kernel;
int ydim0_multidim_copy_kernel_h = -1;
__constant__ int zdim0_multidim_copy_kernel;
int zdim0_multidim_copy_kernel_h = -1;
__constant__ int xdim1_multidim_copy_kernel;
int xdim1_multidim_copy_kernel_h = -1;
__constant__ int ydim1_multidim_copy_kernel;
int ydim1_multidim_copy_kernel_h = -1;
__constant__ int zdim1_multidim_copy_kernel;
int zdim1_multidim_copy_kernel_h = -1;
#undef OPS_ACC_MD0
#undef OPS_ACC_MD1
#define OPS_ACC_MD0(d,x,y,z) ((x)+(xdim0_multidim_copy_kernel*(y))+(xdim0_multidim_copy_kernel*ydim0_multidim_copy_kernel*(z))+(d)*xdim0_multidim_copy_kernel*ydim0_multidim_copy_kernel*zdim0_multidim_copy_kernel)
#define OPS_ACC_MD1(d,x,y,z) ((x)+(xdim1_multidim_copy_kernel*(y))+(xdim1_multidim_copy_kernel*ydim1_multidim_copy_kernel*(z))+(d)*xdim1_multidim_copy_kernel*ydim1_multidim_copy_kernel*zdim1_multidim_copy_kernel)
//user function
__device__
void multidim_copy_kernel_gpu(const double *src, double *dest){
dest[OPS_ACC_MD1(0,0,0,0)] = src[OPS_ACC_MD0(0,0,0,0)];
dest[OPS_ACC_MD1(1,0,0,0)] = src[OPS_ACC_MD0(1,0,0,0)];
dest[OPS_ACC_MD1(2,0,0,0)] = src[OPS_ACC_MD0(2,0,0,0)];
}
#undef OPS_ACC_MD0
#undef OPS_ACC_MD1
__global__ void ops_multidim_copy_kernel(
const double* __restrict arg0,
double* __restrict arg1,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1+ idx_y * 1* xdim0_multidim_copy_kernel + idx_z * 1 * xdim0_multidim_copy_kernel * ydim0_multidim_copy_kernel;
arg1 += idx_x * 1+ idx_y * 1* xdim1_multidim_copy_kernel + idx_z * 1 * xdim1_multidim_copy_kernel * ydim1_multidim_copy_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
multidim_copy_kernel_gpu(arg0, arg1);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_multidim_copy_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
#else
void ops_par_loop_multidim_copy_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[2] = { arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,2,range,1)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1,"multidim_copy_kernel");
OPS_kernels[1].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int zdim0 = args[0].dat->size[2];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int zdim1 = args[1].dat->size[2];
if (xdim0 != xdim0_multidim_copy_kernel_h || ydim0 != ydim0_multidim_copy_kernel_h || zdim0 != zdim0_multidim_copy_kernel_h || xdim1 != xdim1_multidim_copy_kernel_h || ydim1 != ydim1_multidim_copy_kernel_h || zdim1 != zdim1_multidim_copy_kernel_h) {
hipMemcpyToSymbol( xdim0_multidim_copy_kernel, &xdim0, sizeof(int) );
xdim0_multidim_copy_kernel_h = xdim0;
hipMemcpyToSymbol( ydim0_multidim_copy_kernel, &ydim0, sizeof(int) );
ydim0_multidim_copy_kernel_h = ydim0;
hipMemcpyToSymbol( zdim0_multidim_copy_kernel, &zdim0, sizeof(int) );
zdim0_multidim_copy_kernel_h = zdim0;
hipMemcpyToSymbol( xdim1_multidim_copy_kernel, &xdim1, sizeof(int) );
xdim1_multidim_copy_kernel_h = xdim1;
hipMemcpyToSymbol( ydim1_multidim_copy_kernel, &ydim1, sizeof(int) );
ydim1_multidim_copy_kernel_h = ydim1;
hipMemcpyToSymbol( zdim1_multidim_copy_kernel, &zdim1, sizeof(int) );
zdim1_multidim_copy_kernel_h = zdim1;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[2];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args,2,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_multidim_copy_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[1].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_multidim_copy_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 1;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 1;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg*)malloc(2*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->function = ops_par_loop_multidim_copy_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(1,"multidim_copy_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| 3f61a5f2257c717c1a831f80ab724699e7885d5e.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_multidim_copy_kernel;
int xdim0_multidim_copy_kernel_h = -1;
__constant__ int ydim0_multidim_copy_kernel;
int ydim0_multidim_copy_kernel_h = -1;
__constant__ int zdim0_multidim_copy_kernel;
int zdim0_multidim_copy_kernel_h = -1;
__constant__ int xdim1_multidim_copy_kernel;
int xdim1_multidim_copy_kernel_h = -1;
__constant__ int ydim1_multidim_copy_kernel;
int ydim1_multidim_copy_kernel_h = -1;
__constant__ int zdim1_multidim_copy_kernel;
int zdim1_multidim_copy_kernel_h = -1;
#undef OPS_ACC_MD0
#undef OPS_ACC_MD1
#define OPS_ACC_MD0(d,x,y,z) ((x)+(xdim0_multidim_copy_kernel*(y))+(xdim0_multidim_copy_kernel*ydim0_multidim_copy_kernel*(z))+(d)*xdim0_multidim_copy_kernel*ydim0_multidim_copy_kernel*zdim0_multidim_copy_kernel)
#define OPS_ACC_MD1(d,x,y,z) ((x)+(xdim1_multidim_copy_kernel*(y))+(xdim1_multidim_copy_kernel*ydim1_multidim_copy_kernel*(z))+(d)*xdim1_multidim_copy_kernel*ydim1_multidim_copy_kernel*zdim1_multidim_copy_kernel)
//user function
__device__
void multidim_copy_kernel_gpu(const double *src, double *dest){
dest[OPS_ACC_MD1(0,0,0,0)] = src[OPS_ACC_MD0(0,0,0,0)];
dest[OPS_ACC_MD1(1,0,0,0)] = src[OPS_ACC_MD0(1,0,0,0)];
dest[OPS_ACC_MD1(2,0,0,0)] = src[OPS_ACC_MD0(2,0,0,0)];
}
#undef OPS_ACC_MD0
#undef OPS_ACC_MD1
__global__ void ops_multidim_copy_kernel(
const double* __restrict arg0,
double* __restrict arg1,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1+ idx_y * 1* xdim0_multidim_copy_kernel + idx_z * 1 * xdim0_multidim_copy_kernel * ydim0_multidim_copy_kernel;
arg1 += idx_x * 1+ idx_y * 1* xdim1_multidim_copy_kernel + idx_z * 1 * xdim1_multidim_copy_kernel * ydim1_multidim_copy_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
multidim_copy_kernel_gpu(arg0, arg1);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_multidim_copy_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
#else
void ops_par_loop_multidim_copy_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[2] = { arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,2,range,1)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1,"multidim_copy_kernel");
OPS_kernels[1].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int zdim0 = args[0].dat->size[2];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int zdim1 = args[1].dat->size[2];
if (xdim0 != xdim0_multidim_copy_kernel_h || ydim0 != ydim0_multidim_copy_kernel_h || zdim0 != zdim0_multidim_copy_kernel_h || xdim1 != xdim1_multidim_copy_kernel_h || ydim1 != ydim1_multidim_copy_kernel_h || zdim1 != zdim1_multidim_copy_kernel_h) {
cudaMemcpyToSymbol( xdim0_multidim_copy_kernel, &xdim0, sizeof(int) );
xdim0_multidim_copy_kernel_h = xdim0;
cudaMemcpyToSymbol( ydim0_multidim_copy_kernel, &ydim0, sizeof(int) );
ydim0_multidim_copy_kernel_h = ydim0;
cudaMemcpyToSymbol( zdim0_multidim_copy_kernel, &zdim0, sizeof(int) );
zdim0_multidim_copy_kernel_h = zdim0;
cudaMemcpyToSymbol( xdim1_multidim_copy_kernel, &xdim1, sizeof(int) );
xdim1_multidim_copy_kernel_h = xdim1;
cudaMemcpyToSymbol( ydim1_multidim_copy_kernel, &ydim1, sizeof(int) );
ydim1_multidim_copy_kernel_h = ydim1;
cudaMemcpyToSymbol( zdim1_multidim_copy_kernel, &zdim1, sizeof(int) );
zdim1_multidim_copy_kernel_h = zdim1;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[2];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args,2,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_multidim_copy_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[1].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[1].mpi_time += t2-t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_multidim_copy_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 1;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 1;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg*)malloc(2*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->function = ops_par_loop_multidim_copy_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(1,"multidim_copy_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
b5877866a53a9ed4a2fb394012ffcadc57779816.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "SOR_13PT_CROSS_SOR_kernel.hu"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<sys/time.h>
#include<sys/stat.h>
#include<fcntl.h>
#include<string.h>
#include<errno.h>
const int n1 = 4096, n2 = 4096;
const int nn1 = 4108, nn2 = 4108;
void SOR(int len1, int len2, int arr1[nn1][nn2], int arr2[nn1][nn2], int padd, int trial){
struct timeval tbegin, tend;
gettimeofday(&tbegin, NULL);
if (trial >= 1 && len1 >= padd + 1 && len2 >= padd + 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
int *dev_arr1;
int *dev_arr2;
cudaCheckReturn(hipMalloc((void **) &dev_arr1, (padd >= 4108 ? len1 : len1 + 3) * (4108) * sizeof(int)));
cudaCheckReturn(hipMalloc((void **) &dev_arr2, (padd >= 4108 ? len1 : len1 + 3) * (4108) * sizeof(int)));
if (padd <= 4110) {
cudaCheckReturn(hipMemcpy(dev_arr1, arr1, (padd >= 4108 ? len1 : len1 + 3) * (4108) * sizeof(int), hipMemcpyHostToDevice));
cudaCheckReturn(hipMemcpy(dev_arr2, arr2, (padd >= 4108 ? len1 : len1 + 3) * (4108) * sizeof(int), hipMemcpyHostToDevice));
}
struct timeval t1, t2;
gettimeofday(&t1, NULL);
for (int c0 = 0; c0 < trial; c0 += 2) {
{
dim3 k0_dimBlock(16, 32);
dim3 k0_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), len1 + 30 >= ((len1 + 31) % 8192) + padd ? 256 : (len1 + 31) / 32 - 256 * ((len1 + 31) / 8192));
hipLaunchKernelGGL(( kernel0) , dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_arr1, dev_arr2, trial, padd, len1, len2, c0);
cudaCheckKernel();
}
hipDeviceSynchronize();
{
dim3 k1_dimBlock(16, 32);
dim3 k1_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), len1 + 30 >= ((len1 + 31) % 8192) + padd ? 256 : (len1 + 31) / 32 - 256 * ((len1 + 31) / 8192));
hipLaunchKernelGGL(( kernel1) , dim3(k1_dimGrid), dim3(k1_dimBlock), 0, 0, dev_arr1, dev_arr2, trial, padd, len1, len2, c0);
cudaCheckKernel();
}
hipDeviceSynchronize();
gettimeofday(&t2, NULL);
double t3 = (double)(t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
printf("execution time: %lf\n", t3);
}
if (padd <= 4110) {
cudaCheckReturn(hipMemcpy(arr1, dev_arr1, (padd >= 4108 ? len1 : len1 + 3) * (4108) * sizeof(int), hipMemcpyDeviceToHost));
cudaCheckReturn(hipMemcpy(arr2, dev_arr2, (padd >= 4108 ? len1 : len1 + 3) * (4108) * sizeof(int), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_arr1));
cudaCheckReturn(hipFree(dev_arr2));
}
gettimeofday(&tend, NULL);
double tt = (double)(tend.tv_sec - tbegin.tv_sec) + (double)(tend.tv_usec - tbegin.tv_usec) / 1000000.0;
printf("execution time: %lf s\n", tt);
}
int main(){
int trial = 64;
int padd = 6;
static int arr1[nn1][nn2];
static int arr2[nn1][nn2];
for (int row = 0; row < nn1; row++){
for (int col = 0; col < nn2; col++){
arr1[row][col] = rand() % 100;
arr2[row][col] = arr1[row][col];
}
}
SOR(n1 + padd, n2 + padd, arr1, arr2, padd, trial);
return 0;
}
| b5877866a53a9ed4a2fb394012ffcadc57779816.cu | #include <assert.h>
#include <stdio.h>
#include "SOR_13PT_CROSS_SOR_kernel.hu"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<sys/time.h>
#include<sys/stat.h>
#include<fcntl.h>
#include<string.h>
#include<errno.h>
const int n1 = 4096, n2 = 4096;
const int nn1 = 4108, nn2 = 4108;
void SOR(int len1, int len2, int arr1[nn1][nn2], int arr2[nn1][nn2], int padd, int trial){
struct timeval tbegin, tend;
gettimeofday(&tbegin, NULL);
if (trial >= 1 && len1 >= padd + 1 && len2 >= padd + 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
int *dev_arr1;
int *dev_arr2;
cudaCheckReturn(cudaMalloc((void **) &dev_arr1, (padd >= 4108 ? len1 : len1 + 3) * (4108) * sizeof(int)));
cudaCheckReturn(cudaMalloc((void **) &dev_arr2, (padd >= 4108 ? len1 : len1 + 3) * (4108) * sizeof(int)));
if (padd <= 4110) {
cudaCheckReturn(cudaMemcpy(dev_arr1, arr1, (padd >= 4108 ? len1 : len1 + 3) * (4108) * sizeof(int), cudaMemcpyHostToDevice));
cudaCheckReturn(cudaMemcpy(dev_arr2, arr2, (padd >= 4108 ? len1 : len1 + 3) * (4108) * sizeof(int), cudaMemcpyHostToDevice));
}
struct timeval t1, t2;
gettimeofday(&t1, NULL);
for (int c0 = 0; c0 < trial; c0 += 2) {
{
dim3 k0_dimBlock(16, 32);
dim3 k0_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), len1 + 30 >= ((len1 + 31) % 8192) + padd ? 256 : (len1 + 31) / 32 - 256 * ((len1 + 31) / 8192));
kernel0 <<<k0_dimGrid, k0_dimBlock>>> (dev_arr1, dev_arr2, trial, padd, len1, len2, c0);
cudaCheckKernel();
}
cudaDeviceSynchronize();
{
dim3 k1_dimBlock(16, 32);
dim3 k1_dimGrid(len2 + 30 >= ((len2 + 31) % 8192) + padd ? 256 : (len2 + 31) / 32 - 256 * ((len2 + 31) / 8192), len1 + 30 >= ((len1 + 31) % 8192) + padd ? 256 : (len1 + 31) / 32 - 256 * ((len1 + 31) / 8192));
kernel1 <<<k1_dimGrid, k1_dimBlock>>> (dev_arr1, dev_arr2, trial, padd, len1, len2, c0);
cudaCheckKernel();
}
cudaDeviceSynchronize();
gettimeofday(&t2, NULL);
double t3 = (double)(t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec) / 1000000.0;
printf("execution time: %lf\n", t3);
}
if (padd <= 4110) {
cudaCheckReturn(cudaMemcpy(arr1, dev_arr1, (padd >= 4108 ? len1 : len1 + 3) * (4108) * sizeof(int), cudaMemcpyDeviceToHost));
cudaCheckReturn(cudaMemcpy(arr2, dev_arr2, (padd >= 4108 ? len1 : len1 + 3) * (4108) * sizeof(int), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_arr1));
cudaCheckReturn(cudaFree(dev_arr2));
}
gettimeofday(&tend, NULL);
double tt = (double)(tend.tv_sec - tbegin.tv_sec) + (double)(tend.tv_usec - tbegin.tv_usec) / 1000000.0;
printf("execution time: %lf s\n", tt);
}
int main(){
int trial = 64;
int padd = 6;
static int arr1[nn1][nn2];
static int arr2[nn1][nn2];
for (int row = 0; row < nn1; row++){
for (int col = 0; col < nn2; col++){
arr1[row][col] = rand() % 100;
arr2[row][col] = arr1[row][col];
}
}
SOR(n1 + padd, n2 + padd, arr1, arr2, padd, trial);
return 0;
}
|
2ddc27ccb4b31586018d6816525ca1c2d07c9708.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions mixed zc -> ds
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to clat2z and zlaset.
*/
__global__
void clag2z_kernel(
int m, int n,
const magmaFloatComplex *SA, int ldsa,
magmaDoubleComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ));
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ));
}
}
}
}
/***************************************************************************//**
Purpose
-------
CLAG2Z converts a single-complex matrix, SA,
to a double-complex matrix, A.
Note that while it is possible to overflow while converting
from double to single, it is not possible to overflow when
converting from single to double.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
SA COMPLEX array, dimension (LDSA,N)
On entry, the M-by-N coefficient matrix SA.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[out]
A COMPLEX_16 array, dimension (LDA,N)
On exit, the M-by-N coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lag2
*******************************************************************************/
extern "C" void
magmablas_clag2z(
magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr SA, magma_int_t ldsa,
magmaDoubleComplex_ptr A, magma_int_t lda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( ldsa < max(1,m) )
*info = -4;
else if ( lda < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipLaunchKernelGGL(( clag2z_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, SA, ldsa, A, lda );
}
| 2ddc27ccb4b31586018d6816525ca1c2d07c9708.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions mixed zc -> ds
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to clat2z and zlaset.
*/
__global__
void clag2z_kernel(
int m, int n,
const magmaFloatComplex *SA, int ldsa,
magmaDoubleComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ));
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ));
}
}
}
}
/***************************************************************************//**
Purpose
-------
CLAG2Z converts a single-complex matrix, SA,
to a double-complex matrix, A.
Note that while it is possible to overflow while converting
from double to single, it is not possible to overflow when
converting from single to double.
Arguments
---------
@param[in]
m INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
SA COMPLEX array, dimension (LDSA,N)
On entry, the M-by-N coefficient matrix SA.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[out]
A COMPLEX_16 array, dimension (LDA,N)
On exit, the M-by-N coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lag2
*******************************************************************************/
extern "C" void
magmablas_clag2z(
magma_int_t m, magma_int_t n,
magmaFloatComplex_const_ptr SA, magma_int_t ldsa,
magmaDoubleComplex_ptr A, magma_int_t lda,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( m < 0 )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( ldsa < max(1,m) )
*info = -4;
else if ( lda < max(1,m) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
clag2z_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, SA, ldsa, A, lda );
}
|
603f232d6c6e1c47ad4ce01462b6809eaf51b9e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel2 (dtype *input, dtype *output, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
if(i < n) {
scratch[threadIdx.x] = input[i];
} else {
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = blockDim.x >> 1; s >= 1; s = s >> 1) {
if(threadIdx.x < s) {
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads ();
}
if(threadIdx.x == 0) {
output[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_2, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 2;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype),
hipMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(16, ((blocks + 16 - 1) / 16), 1);
dim3 tb(threads, 1, 1);
/* warm up */
hipLaunchKernelGGL(( kernel2) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
hipDeviceSynchronize ();
stopwatch_start (timer);
/* execute kernel */
hipLaunchKernelGGL(( kernel2) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(16, (blocks + 16 - 1) / 16, 1);
dim3 tb(threads, 1, 1);
hipLaunchKernelGGL(( kernel2) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s);
s = (s + threads - 1) / threads;
}
hipDeviceSynchronize ();
t_kernel_2 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute sequential index GPU reduction kernel: %Lg secs\n", t_kernel_2);
double bw = (N * sizeof(dtype)) / (t_kernel_2 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype),
hipMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
| 603f232d6c6e1c47ad4ce01462b6809eaf51b9e9.cu | #include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel2 (dtype *input, dtype *output, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
if(i < n) {
scratch[threadIdx.x] = input[i];
} else {
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = blockDim.x >> 1; s >= 1; s = s >> 1) {
if(threadIdx.x < s) {
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads ();
}
if(threadIdx.x == 0) {
output[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_2, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 2;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype),
cudaMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(16, ((blocks + 16 - 1) / 16), 1);
dim3 tb(threads, 1, 1);
/* warm up */
kernel2 <<<gb, tb>>> (d_idata, d_odata, N);
cudaThreadSynchronize ();
stopwatch_start (timer);
/* execute kernel */
kernel2 <<<gb, tb>>> (d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(16, (blocks + 16 - 1) / 16, 1);
dim3 tb(threads, 1, 1);
kernel2 <<<gb, tb>>> (d_odata, d_odata, s);
s = (s + threads - 1) / threads;
}
cudaThreadSynchronize ();
t_kernel_2 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute sequential index GPU reduction kernel: %Lg secs\n", t_kernel_2);
double bw = (N * sizeof(dtype)) / (t_kernel_2 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype),
cudaMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
|
43f49331d1c6477acaf7b1125e451b94287d4a2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "warpPerPointKernel.cuh"
template <typename T>
__device__ T min(T a, T b, int offset)
{
return a < b ? a : b;
}
__global__ void findNearestWarpPerPointKernel(const value_t* __restrict__ means, value_t *meansSums, const value_t* __restrict__ data, uint32_t* counts, const my_size_t dimension)
{
extern __shared__ value_t sharedArray[];
value_t* distances = (value_t*)&sharedArray[blockDim.x * threadIdx.y]; // array of distances to all means
//int sizeCoef = sizeof(unsigned int) / sizeof(value_t);
//unsigned int* minMeansIds = (unsigned int*)&sharedArray[blockDim.y * dimension + blockDim.x * blockDim.y + blockDim.x * threadIdx.y * sizeCoef + threadIdx.x * sizeCoef];
unsigned int pointID = threadIdx.y + blockIdx.x * blockDim.x * blockDim.y
, minMeanId = threadIdx.x;
value_t distance = 0
, difference = 0;
//for (my_size_t m = threadIdx.x; m < meansSize; m += blockDim.x)
//{
for (my_size_t d = 0; d < dimension; ++d)
{
difference = means[threadIdx.x * dimension + d] - data[pointID * dimension + d];
distance += difference * difference;
}
//if (minDistance > distance)
//{
// minDistance = distance;
// minMeanID = m;
//}
//}
// copy distance to mean to shared memory
distances[threadIdx.x] = distance;
//minMeansIds[threadIdx.x] = minMeanID;
// find the nearest mean id
for (my_size_t t = 0; t < blockDim.x; ++t)
{
// >= guarantee that all threads will compute with same mean (means with higher id are preffered)
if (distance >= distances[t])
{
distance = distances[t];
minMeanId = t;
}
}
// store values to global memory
if (threadIdx.x == 0)
{
atomicInc(&counts[minMeanId], INT32_MAX);
}
// utilize all threads
for (my_size_t d = threadIdx.x; d < dimension; d += blockDim.x)
{
atomicAdd(&meansSums[minMeanId * dimension + d], data[pointID * dimension + d]);
}
}
__global__ void findNearestWarpPerPointSMKernel(const value_t* __restrict__ means, value_t* __restrict__ meansSums, const value_t* __restrict__ data, uint32_t* counts, const my_size_t dimension)
{
extern __shared__ value_t sharedArray[];
value_t* point = (value_t*)&sharedArray[threadIdx.y * dimension];
value_t* distances = (value_t*)&sharedArray[blockDim.y * dimension + blockDim.x * threadIdx.y];
unsigned int pointID = threadIdx.y + blockIdx.x * blockDim.x * blockDim.y
, minMeanId = threadIdx.x;
// point is copied to shared memory - coalesced acces to global memory, bank-safe save to shared
for (my_size_t d = threadIdx.x; d < dimension; d += blockDim.x)
{
point[d] = data[pointID * dimension + d];
}
value_t distance = 0
, difference = 0;
for (my_size_t d = 0; d < dimension; ++d)
{
// all threads read the same value - multicast
difference = means[threadIdx.x * dimension + d] - point[d];
distance += difference * difference;
}
distances[threadIdx.x] = distance;
__syncthreads();
for (my_size_t t = 0; t < blockDim.x; ++t)
{
// >= guarantee that all threads will compute with same mean (mean with higher id is preffered)
if (distance >= distances[t])
{
distance = distances[t];
minMeanId = t;
}
}
if (threadIdx.x == 0)
{
atomicInc(&counts[minMeanId], INT_MAX);
}
// utilize all threads
for (my_size_t d = threadIdx.x; d < dimension; d += blockDim.x)
{
atomicAdd(&meansSums[minMeanId * dimension + d], data[pointID * dimension + d]);
}
}
#if __CUDA_ARCH__ >= 300
__global__ void findNearestWarpPerPointShuffleKernel(const value_t* __restrict__ means, value_t *meansSums, const value_t* __restrict__ data, uint32_t* counts, const my_size_t dimension)
{
int pointID = threadIdx.y + blockIdx.x * blockDim.x * blockDim.y;
unsigned int minMeanId = threadIdx.x
, tempMean = 0;
value_t distance = 0
, difference = 0
, tempDistance = 0;
//for (my_size_t m = threadIdx.x; m < meansSize; m += blockDim.x)
//{
for (my_size_t d = 0; d < dimension; ++d)
{
difference = means[threadIdx.x * dimension + d] - data[pointID * dimension + d];
distance += difference * difference;
}
//}
//int nearestMeanID = threadIdx.x;
for (size_t i = 1; i < warpSize / 2; i <<= 1)
{
tempDistance = min(distance, __shfl_xor(distance, i));
tempMean = __shfl_xor(minMeanId, i);
if (tempDistance < distance)
{
distance = tempDistance;
minMeanId = tempMean;
}
else if (tempDistance == distance)
{
minMeanId = min(tempMean, minMeanId);
}
}
//distance = min(distance, __shfl_xor(distance, 2));
//distance = min(distance, __shfl_xor(distance, 4));
//distance = min(distance, __shfl_xor(distance, 8));
//distance = min(distance, __shfl_xor(distance, 16));
//if (warpSize > 32) distance = min(distance, __shfl_xor(distance, 32));
if (threadIdx.x == 0)
{
atomicInc(&counts[minMeanId], INT_MAX);
}
// utilize all threads
for (my_size_t d = threadIdx.x; d < dimension; d += blockDim.x)
{
atomicAdd(&meansSums[minMeanId * dimension + d], data[pointID * dimension + d]);
}
}
#endif
| 43f49331d1c6477acaf7b1125e451b94287d4a2b.cu | #include "warpPerPointKernel.cuh"
template <typename T>
__device__ T min(T a, T b, int offset)
{
return a < b ? a : b;
}
__global__ void findNearestWarpPerPointKernel(const value_t* __restrict__ means, value_t *meansSums, const value_t* __restrict__ data, uint32_t* counts, const my_size_t dimension)
{
extern __shared__ value_t sharedArray[];
value_t* distances = (value_t*)&sharedArray[blockDim.x * threadIdx.y]; // array of distances to all means
//int sizeCoef = sizeof(unsigned int) / sizeof(value_t);
//unsigned int* minMeansIds = (unsigned int*)&sharedArray[blockDim.y * dimension + blockDim.x * blockDim.y + blockDim.x * threadIdx.y * sizeCoef + threadIdx.x * sizeCoef];
unsigned int pointID = threadIdx.y + blockIdx.x * blockDim.x * blockDim.y
, minMeanId = threadIdx.x;
value_t distance = 0
, difference = 0;
//for (my_size_t m = threadIdx.x; m < meansSize; m += blockDim.x)
//{
for (my_size_t d = 0; d < dimension; ++d)
{
difference = means[threadIdx.x * dimension + d] - data[pointID * dimension + d];
distance += difference * difference;
}
//if (minDistance > distance)
//{
// minDistance = distance;
// minMeanID = m;
//}
//}
// copy distance to mean to shared memory
distances[threadIdx.x] = distance;
//minMeansIds[threadIdx.x] = minMeanID;
// find the nearest mean id
for (my_size_t t = 0; t < blockDim.x; ++t)
{
// >= guarantee that all threads will compute with same mean (means with higher id are preffered)
if (distance >= distances[t])
{
distance = distances[t];
minMeanId = t;
}
}
// store values to global memory
if (threadIdx.x == 0)
{
atomicInc(&counts[minMeanId], INT32_MAX);
}
// utilize all threads
for (my_size_t d = threadIdx.x; d < dimension; d += blockDim.x)
{
atomicAdd(&meansSums[minMeanId * dimension + d], data[pointID * dimension + d]);
}
}
__global__ void findNearestWarpPerPointSMKernel(const value_t* __restrict__ means, value_t* __restrict__ meansSums, const value_t* __restrict__ data, uint32_t* counts, const my_size_t dimension)
{
extern __shared__ value_t sharedArray[];
value_t* point = (value_t*)&sharedArray[threadIdx.y * dimension];
value_t* distances = (value_t*)&sharedArray[blockDim.y * dimension + blockDim.x * threadIdx.y];
unsigned int pointID = threadIdx.y + blockIdx.x * blockDim.x * blockDim.y
, minMeanId = threadIdx.x;
// point is copied to shared memory - coalesced acces to global memory, bank-safe save to shared
for (my_size_t d = threadIdx.x; d < dimension; d += blockDim.x)
{
point[d] = data[pointID * dimension + d];
}
value_t distance = 0
, difference = 0;
for (my_size_t d = 0; d < dimension; ++d)
{
// all threads read the same value - multicast
difference = means[threadIdx.x * dimension + d] - point[d];
distance += difference * difference;
}
distances[threadIdx.x] = distance;
__syncthreads();
for (my_size_t t = 0; t < blockDim.x; ++t)
{
// >= guarantee that all threads will compute with same mean (mean with higher id is preffered)
if (distance >= distances[t])
{
distance = distances[t];
minMeanId = t;
}
}
if (threadIdx.x == 0)
{
atomicInc(&counts[minMeanId], INT_MAX);
}
// utilize all threads
for (my_size_t d = threadIdx.x; d < dimension; d += blockDim.x)
{
atomicAdd(&meansSums[minMeanId * dimension + d], data[pointID * dimension + d]);
}
}
#if __CUDA_ARCH__ >= 300
__global__ void findNearestWarpPerPointShuffleKernel(const value_t* __restrict__ means, value_t *meansSums, const value_t* __restrict__ data, uint32_t* counts, const my_size_t dimension)
{
int pointID = threadIdx.y + blockIdx.x * blockDim.x * blockDim.y;
unsigned int minMeanId = threadIdx.x
, tempMean = 0;
value_t distance = 0
, difference = 0
, tempDistance = 0;
//for (my_size_t m = threadIdx.x; m < meansSize; m += blockDim.x)
//{
for (my_size_t d = 0; d < dimension; ++d)
{
difference = means[threadIdx.x * dimension + d] - data[pointID * dimension + d];
distance += difference * difference;
}
//}
//int nearestMeanID = threadIdx.x;
for (size_t i = 1; i < warpSize / 2; i <<= 1)
{
tempDistance = min(distance, __shfl_xor(distance, i));
tempMean = __shfl_xor(minMeanId, i);
if (tempDistance < distance)
{
distance = tempDistance;
minMeanId = tempMean;
}
else if (tempDistance == distance)
{
minMeanId = min(tempMean, minMeanId);
}
}
//distance = min(distance, __shfl_xor(distance, 2));
//distance = min(distance, __shfl_xor(distance, 4));
//distance = min(distance, __shfl_xor(distance, 8));
//distance = min(distance, __shfl_xor(distance, 16));
//if (warpSize > 32) distance = min(distance, __shfl_xor(distance, 32));
if (threadIdx.x == 0)
{
atomicInc(&counts[minMeanId], INT_MAX);
}
// utilize all threads
for (my_size_t d = threadIdx.x; d < dimension; d += blockDim.x)
{
atomicAdd(&meansSums[minMeanId * dimension + d], data[pointID * dimension + d]);
}
}
#endif
|
1cc5ee36410c88be77bfcfeed3aa0cff2e931113.hip | // !!! This is a file automatically generated by hipify!!!
// DACRT node
// -----------------------------------------------------------------------------
// Copyright (C) 2012, See authors
//
// This program is open source and distributed under the New BSD License. See
// license for more detail.
// -----------------------------------------------------------------------------
#include <Rendering/DacrtNode.h>
#include <Kernels/ForEachWithOwners.h>
#include <HyperCubes.h>
#include <Primitives/SphereCone.h>
#include <Primitives/HyperCube.h>
#include <Rendering/Rays.h>
#include <Rendering/RayContainer.h>
#include <SphereGeometry.h>
#include <SphereContainer.h>
#include <Utils/ToString.h>
#include <sstream>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform_scan.h>
#include <thrust/transform.h>
// *** DacrtNode ***
namespace Rendering {
std::string DacrtNode::ToString() const {
std::ostringstream out;
out << "[rays: [" << rayStart << " -> " << rayEnd << "], spheres: [" << sphereStart << " -> " << sphereEnd << "]]";
return out.str();
}
// *** DacrtNodes ***
DacrtNodes::DacrtNodes(const size_t capacity)
: scan1(capacity+1), scan2(capacity+1),
rays(NULL), sphereIndices(NULL),
rayPartitions(capacity), spherePartitions(capacity),
nextRayPartitions(capacity), nextSpherePartitions(capacity),
doneRayPartitions(capacity), doneSpherePartitions(capacity) {
scan1[0] = scan2[0] = 0;
rayPartitions.resize(0); spherePartitions.resize(0);
nextRayPartitions.resize(0); nextSpherePartitions.resize(0);
doneRayPartitions.resize(0); doneSpherePartitions.resize(0);
}
DacrtNodes::~DacrtNodes() {
if (sphereIndices) delete sphereIndices;
}
void DacrtNodes::Reset() {
rayPartitions.resize(0);
nextRayPartitions.resize(0);
spherePartitions.resize(0);
nextSpherePartitions.resize(0);
doneRayPartitions.resize(0);
doneSpherePartitions.resize(0);
}
void DacrtNodes::Create(RayContainer& rs, SpheresGeometry& spheres) {
this->rays = &rs;
rs.Convert(Rays::HyperRayRepresentation);
// Partition rays according to their major axis
uint rayPartitionStart[7];
rs.PartitionByAxis(rayPartitionStart);
std::cout << "ray partitions: ";
for (int p = 0; p < 7; ++p)
std::cout << rayPartitionStart[p] << ", ";
std::cout << std::endl;
static thrust::device_vector<uint2> initialRayPartitions(6);
int activePartitions = 0;
for (int a = 0; a < 6; ++a) {
const size_t rayCount = rayPartitionStart[a+1] - rayPartitionStart[a];
initialRayPartitions[a] = make_uint2(rayPartitionStart[a], rayPartitionStart[a+1]);
activePartitions += rayCount > 0 ? 1 : 0;
}
// Reduce the cube bounds
HyperCubes cubes = HyperCubes(128);
cubes.ReduceCubes(rs.BeginInnerRays(), rs.EndInnerRays(),
initialRayPartitions, activePartitions);
std::cout << cubes << std::endl;
uint spherePartitionStart[activePartitions+1];
if (sphereIndices) delete sphereIndices; // TODO reuse allocated sphere indices instead of detroying
sphereIndices = new SphereContainer(cubes, spheres, spherePartitionStart);
std::cout << "sphere partitions: ";
for (int p = 0; p < activePartitions+1; ++p)
std::cout << spherePartitionStart[p] << ", ";
std::cout << std::endl;
Reset();
int nodeIndex = 0;
for (int a = 0; a < 6; ++a) {
const int rayStart = rayPartitionStart[a];
const size_t rayEnd = rayPartitionStart[a+1];
if (rayStart == rayEnd) continue;
const int sphereStart = spherePartitionStart[nodeIndex];
const size_t sphereEnd = spherePartitionStart[nodeIndex+1];
SetUnfinished(nodeIndex, DacrtNode(rayStart, rayEnd, sphereStart, sphereEnd));
++nodeIndex;
}
unsigned int i = 0;
while (UnfinishedNodes() > 0) {
// std::cout << "\n *** PARTITION NODES (" << i << ") ***\n" << ToString(rs, *sphereIndices) << "\n ***\n" << std::endl;
Partition(rs, *sphereIndices, cubes);
// std::cout << "\n *** PARTITION LEAFS (" << i << ") ***\n" << ToString(rs, *sphereIndices) << "\n ***\n" << std::endl;
if (PartitionLeafs(rs, *sphereIndices))
;//cout << "\n *** AFTER PARTITIONING (" << i << ") ***\n" << nodes/*.ToString(rs, sphereIndices)*/ << "\n ***\n" << endl;
else
;//cout << "\n *** NO LEAFS CREATED (" << i << ") ***\n" << endl;
if (UnfinishedNodes() > 0) {
// Prepare cubes for next round.
cubes.ReduceCubes(rs.BeginInnerRays(), rs.EndInnerRays(),
rayPartitions, UnfinishedNodes());
// std::cout << "cubes after reduction:\n" << cubes << std::endl;
}
++i;
// if (i == 2) exit(0);
}
rs.Convert(Rays::RayRepresentation);
}
struct CalcSplitInfo {
__host__ __device__
thrust::tuple<Axis, float> operator()(thrust::tuple<float2, float2, float2, float2, float2> val) {
float2 x = thrust::get<0>(val);
float range = x.y - x.x;
Axis axis = X;
float split = (x.y + x.x) * 0.5f;
float2 y = thrust::get<1>(val);
float yRange = y.y - y.x;
if (range < yRange) {
axis = Y;
split = (y.y + y.x) * 0.5f;
}
float2 z = thrust::get<2>(val);
float zRange = z.y - z.x;
if (range < zRange) {
axis = Z;
split = (z.y + z.x) * 0.5f;
}
float2 u = thrust::get<3>(val);
float uRange = u.y - u.x;
if (range < uRange) {
axis = U;
split = (u.y + u.x) * 0.5f;
}
float2 v = thrust::get<4>(val);
float vRange = v.y - v.x;
if (range < vRange) {
axis = V;
split = (v.y + v.x) * 0.5f;
}
return thrust::tuple<Axis, float>(axis, split);
}
};
struct RayPartitionSide {
float4 *rayOrigins, *rayAxisUVs;
Axis *splitAxis;
float *splitValues;
// result
PartitionSide *partitionSides;
RayPartitionSide(thrust::device_vector<Axis>& axis, thrust::device_vector<float>& values)
: splitAxis(thrust::raw_pointer_cast(axis.data())),
splitValues(thrust::raw_pointer_cast(values.data())) {}
RayPartitionSide(Rays::Iterator rays,
thrust::device_vector<Axis>& axis, thrust::device_vector<float>& values,
thrust::device_vector<PartitionSide>& sides)
: rayOrigins(RawPointer(Rays::GetOrigins(rays))),
rayAxisUVs(RawPointer(Rays::GetAxisUVs(rays))),
splitAxis(RawPointer(axis)),
splitValues(RawPointer(values)),
partitionSides(RawPointer(sides)) {}
__host__ __device__
PartitionSide operator()(thrust::tuple<thrust::tuple<float4, float4>, unsigned int> ray) {
int owner = thrust::get<1>(ray);
Axis axis = splitAxis[owner];
float splitVal = splitValues[owner];
float rayVals[5];
float4 origin = thrust::get<0>(thrust::get<0>(ray));
rayVals[0] = origin.x;
rayVals[1] = origin.y;
rayVals[2] = origin.z;
float4 UV = thrust::get<1>(thrust::get<0>(ray));
rayVals[3] = UV.y;
rayVals[4] = UV.z;
return rayVals[axis] <= splitVal ? LEFT : RIGHT;
}
__host__ __device__
void operator()(const unsigned int index, const unsigned int owner) const {
const Axis axis = splitAxis[owner];
const float splitVal = splitValues[owner];
// IDEA since most owners will cover a warp or more, perhaps it will be
// slightly faster to branch on (axis < 3) and avoid a memory lookup?
// Only ever so slightly though.
float rayVals[5];
const float3 origin = make_float3(rayOrigins[index]);
rayVals[0] = origin.x;
rayVals[1] = origin.y;
rayVals[2] = origin.z;
const float3 axisUV = make_float3(rayAxisUVs[index]);
rayVals[3] = axisUV.y;
rayVals[4] = axisUV.z;
partitionSides[index] = rayVals[axis] <= splitVal ? LEFT : RIGHT;
}
};
template <int S>
struct SideToOne {
__host__ __device__ unsigned int operator()(PartitionSide s) { return s & S ? 1 : 0; }
};
static SideToOne<LEFT> leftToOne;
static SideToOne<RIGHT> rightToOne;
static thrust::plus<unsigned int> plus;
struct BoolToInt { __host__ __device__ unsigned int operator()(bool b) { return (int)b; } };
struct CreateCones {
__host__ __device__
SphereCone operator()(const thrust::tuple<SignedAxis, float2, float2, float2, float2, float2> c) const {
const HyperCube cube(thrust::get<0>(c), thrust::get<1>(c), thrust::get<2>(c),
thrust::get<3>(c), thrust::get<4>(c), thrust::get<5>(c));
return SphereCone::FromCube(cube);
}
};
__constant__ unsigned int d_oldCubeCount;
struct CubesFromSplitPlanes {
SignedAxis* a;
float2 *x, *y, *z, *u, *v;
Axis* splitAxis;
float* splitValues;
CubesFromSplitPlanes(HyperCubes& cubes, thrust::device_vector<Axis>& sAxis,
thrust::device_vector<float>& sValues)
: a(thrust::raw_pointer_cast(cubes.a.data())),
x(thrust::raw_pointer_cast(cubes.x.data())),
y(thrust::raw_pointer_cast(cubes.y.data())),
z(thrust::raw_pointer_cast(cubes.z.data())),
u(thrust::raw_pointer_cast(cubes.u.data())),
v(thrust::raw_pointer_cast(cubes.v.data())),
splitAxis(thrust::raw_pointer_cast(sAxis.data())),
splitValues(thrust::raw_pointer_cast(sValues.data())) {
unsigned int oldCubeCount = cubes.Size();
hipMemcpyToSymbol(d_oldCubeCount, &oldCubeCount, sizeof(unsigned int));
}
__device__
thrust::tuple<SignedAxis, float2, float2, float2, float2, float2> operator()(const unsigned int threadId) const {
const unsigned int oldCubeId = threadId % d_oldCubeCount;
const PartitionSide side = threadId < d_oldCubeCount ? LEFT : RIGHT;
const Axis sAxis = splitAxis[oldCubeId];
const float splitValue = splitValues[oldCubeId];
return thrust::tuple<SignedAxis, float2, float2, float2, float2, float2>
(a[oldCubeId],
CalcBounds(sAxis == X, side, x[oldCubeId], splitValue),
CalcBounds(sAxis == Y, side, y[oldCubeId], splitValue),
CalcBounds(sAxis == Z, side, z[oldCubeId], splitValue),
CalcBounds(sAxis == U, side, u[oldCubeId], splitValue),
CalcBounds(sAxis == V, side, v[oldCubeId], splitValue));
}
__host__ __device__
inline float2 CalcBounds(const bool split, const PartitionSide side, const float2 bounds, const float splitVal) const {
return split ? make_float2(side == LEFT ? bounds.x : splitVal,
side == RIGHT ? bounds.y : splitVal) : bounds;
}
};
struct SpherePartitioningByCones {
SphereCone* cones;
Sphere* spheres;
SpherePartitioningByCones(thrust::device_vector<SphereCone>& cs,
thrust::device_vector<Sphere>& ss)
: cones(thrust::raw_pointer_cast(cs.data())),
spheres(thrust::raw_pointer_cast(ss.data())) {}
__device__
PartitionSide operator()(const unsigned int sphereId, const unsigned int owner) const {
const Sphere sphere = spheres[sphereId];
const SphereCone leftCone = cones[owner];
PartitionSide side = leftCone.DoesIntersect(sphere) ? LEFT : NONE;
const SphereCone rightCone = cones[owner + d_oldCubeCount];
return (PartitionSide)(side | (rightCone.DoesIntersect(sphere) ? RIGHT : NONE));
}
};
__constant__ unsigned int d_spheresMovedLeft;
struct AddSpheresMovedLeft {
AddSpheresMovedLeft(thrust::device_vector<unsigned int>& leftIndices){
unsigned int* spheresMovedLeft = thrust::raw_pointer_cast(leftIndices.data()) + leftIndices.size()-1;
hipMemcpyToSymbol(d_spheresMovedLeft, spheresMovedLeft, sizeof(unsigned int), 0, hipMemcpyDeviceToDevice);
}
__device__
unsigned int operator()(const unsigned int v) const {
return v + d_spheresMovedLeft;
}
};
__constant__ unsigned int d_raysMovedLeft;
struct ComputeNewNodePartitions {
unsigned int* rayLeftIndices;
unsigned int *sphereLeftIndices, *sphereRightIndices;
uint2 *rayPartitions, *spherePartitions;
ComputeNewNodePartitions(thrust::device_vector<unsigned int>& rLeftIndices,
thrust::device_vector<unsigned int>& sLeftIndices,
thrust::device_vector<unsigned int>& sRightIndices)
: rayLeftIndices(thrust::raw_pointer_cast(rLeftIndices.data())),
sphereLeftIndices(thrust::raw_pointer_cast(sLeftIndices.data())),
sphereRightIndices(thrust::raw_pointer_cast(sRightIndices.data())) {
unsigned int* data = thrust::raw_pointer_cast(rLeftIndices.data()) + rLeftIndices.size()-1;
hipMemcpyToSymbol(d_raysMovedLeft, data, sizeof(unsigned int), 0, hipMemcpyDeviceToDevice);
}
__device__
thrust::tuple<uint4, uint4> operator()(const uint2 rayPartition, const uint2 spherePartition) const {
uint4 rays;
const unsigned int rBegin = rays.x = rayLeftIndices[rayPartition.x];
const unsigned int rEnd = rays.y = rayLeftIndices[rayPartition.y];
rays.z = rayPartition.x - rBegin + d_raysMovedLeft;
rays.w = rayPartition.y - rEnd + d_raysMovedLeft;
uint4 sphere;
sphere.x = sphereLeftIndices[spherePartition.x];
sphere.y = sphereLeftIndices[spherePartition.y];
sphere.z = sphereRightIndices[spherePartition.x];
sphere.w = sphereRightIndices[spherePartition.y];
return thrust::tuple<uint4, uint4>(rays, sphere);
}
};
struct ComputeNewLeftNodePartitions {
unsigned int* rayLeftIndices;
unsigned int* sphereLeftIndices;
unsigned int* sphereRightIndices;
ComputeNewLeftNodePartitions(thrust::device_vector<unsigned int>& rLeftIndices,
thrust::device_vector<unsigned int>& sLeftIndices,
thrust::device_vector<unsigned int>& sRightIndices)
: rayLeftIndices(thrust::raw_pointer_cast(rLeftIndices.data())),
sphereLeftIndices(thrust::raw_pointer_cast(sLeftIndices.data())),
sphereRightIndices(thrust::raw_pointer_cast(sRightIndices.data())) {
unsigned int* data = thrust::raw_pointer_cast(rLeftIndices.data()) + rLeftIndices.size()-1;
hipMemcpyToSymbol(d_raysMovedLeft, data, sizeof(unsigned int), 0, hipMemcpyDeviceToDevice);
}
__device__
thrust::tuple<uint2, uint2> operator()(const uint2 rayPartition, const uint2 spherePartition) const {
uint2 rays;
rays.x = rayLeftIndices[rayPartition.x];
rays.y = rayLeftIndices[rayPartition.y];
uint2 sphere;
sphere.x = sphereLeftIndices[spherePartition.x];
sphere.y = sphereLeftIndices[spherePartition.y];
return thrust::tuple<uint2, uint2>(rays, sphere);
}
};
struct ComputeNewRightNodePartitions {
unsigned int* rayLeftIndices;
unsigned int* sphereLeftIndices;
unsigned int* sphereRightIndices;
ComputeNewRightNodePartitions(thrust::device_vector<unsigned int>& rLeftIndices,
thrust::device_vector<unsigned int>& sLeftIndices,
thrust::device_vector<unsigned int>& sRightIndices)
: rayLeftIndices(thrust::raw_pointer_cast(rLeftIndices.data())),
sphereLeftIndices(thrust::raw_pointer_cast(sLeftIndices.data())),
sphereRightIndices(thrust::raw_pointer_cast(sRightIndices.data())) {
unsigned int* data = thrust::raw_pointer_cast(rLeftIndices.data()) + rLeftIndices.size()-1;
hipMemcpyToSymbol(d_raysMovedLeft, data, sizeof(unsigned int), 0, hipMemcpyDeviceToDevice);
}
__device__
thrust::tuple<uint2, uint2> operator()(const uint2 rayPartition, const uint2 spherePartition) const {
uint2 rays;
const unsigned int rBegin = rayLeftIndices[rayPartition.x];
const unsigned int rEnd = rayLeftIndices[rayPartition.y];
rays.x = rayPartition.x - rBegin + d_raysMovedLeft;
rays.y = rayPartition.y - rEnd + d_raysMovedLeft;
uint2 sphere;
sphere.x = sphereRightIndices[spherePartition.x];
sphere.y = sphereRightIndices[spherePartition.y];
return thrust::tuple<uint2, uint2>(rays, sphere);
}
};
void DacrtNodes::Partition(RayContainer& rays, SphereContainer& spheres,
HyperCubes& cubes) {
// TODO move static left and right indices vectors to global scope? Do I
// need more than one at a time?
/// No! But I need to split next ray and sphere partition creation.
size_t rayCount = rays.InnerSize();
// Calculate splitting info
static thrust::device_vector<Axis> splitAxis(cubes.Size());
splitAxis.resize(cubes.Size());
static thrust::device_vector<float> splitValues(cubes.Size());
splitValues.resize(cubes.Size());
thrust::zip_iterator<thrust::tuple<AxisIterator, FloatIterator> > axisInfo
= thrust::make_zip_iterator(thrust::make_tuple(splitAxis.begin(), splitValues.begin()));
CalcSplitInfo calcSplitInfo;
thrust::transform(cubes.BeginBounds(), cubes.EndBounds(), axisInfo, calcSplitInfo);
// Calculate the partition side
static thrust::device_vector<PartitionSide> rayPartitionSides(rayCount);
rayPartitionSides.resize(rayCount);
// Calculate current ray owners. Is apparently faster with old
// implementation instead of a workqueue.
#if 0
static thrust::device_vector<unsigned int> rayOwners(rayCount);
rayOwners.resize(rayCount);
CalcOwners(rayPartitions.begin(), rayPartitions.end(), rayOwners);
thrust::zip_iterator<thrust::tuple<Rays::Iterator, UintIterator> > raysWithOwners
= thrust::make_zip_iterator(thrust::make_tuple(rays.BeginInnerRays(), rayOwners.begin()));
RayPartitionSide rayPartitionSide = RayPartitionSide(splitAxis, splitValues);
thrust::transform(raysWithOwners, raysWithOwners + rayCount,
rayPartitionSides.begin(), rayPartitionSide);
#else
RayPartitionSide rayPartitionSide = RayPartitionSide(rays.BeginInnerRays(), splitAxis, splitValues,
rayPartitionSides);
ForEachWithOwners(rayCount,
rayPartitions.begin(), rayPartitions.end(),
rayPartitionSide);
#endif
// Calculate the indices for the rays moved left using scan
static thrust::device_vector<unsigned int> rayLeftIndices(rayCount+1);
rayLeftIndices.resize(rayCount+1);
rayLeftIndices[0] = 0; // Should be handled by resize not being destructive.
thrust::transform_inclusive_scan(rayPartitionSides.begin(), rayPartitionSides.end(),
rayLeftIndices.begin()+1, leftToOne, plus);
// Scatter the rays
rays.Partition(rayPartitionSides, rayLeftIndices);
// Calculate the new hypercubes
/// IDEA: Since the rays have been scattered, just reduce them, but that
// would mean also scattering the hypercubes when creating leaves.
static HyperCubes splitCubes(cubes.Size() * 2);
splitCubes.Resize(cubes.Size() * 2);
CubesFromSplitPlanes cubesFromSplitPlanes(cubes, splitAxis, splitValues);
thrust::transform(thrust::counting_iterator<unsigned int>(0), thrust::counting_iterator<unsigned int>(cubes.Size() * 2),
splitCubes.Begin(), cubesFromSplitPlanes);
// Calculate the cones used for splitting
// TODO using knowledge about the cube split, the resulting two cones can be
// computed faster if computed together in one thread.
static thrust::device_vector<SphereCone> cones(cubes.Size());
cones.resize(cubes.Size() * 2);
thrust::transform(splitCubes.Begin(), splitCubes.End(), cones.begin(), CreateCones());
// Calculate current sphere owners. TODO Use a work queue instead
static thrust::device_vector<unsigned int> sphereOwners(spheres.CurrentSize());
sphereOwners.resize(spheres.CurrentSize());
CalcOwners(spherePartitions.begin(), spherePartitions.end(), sphereOwners);
// Calculate sphere partitions
static thrust::device_vector<PartitionSide> spherePartitionSides(spheres.CurrentSize());
spherePartitionSides.resize(spheres.CurrentSize());
SpherePartitioningByCones spherePartitioningByCones(cones, spheres.SphereGeometry().spheres);
thrust::transform(spheres.BeginCurrentIndices(), spheres.EndCurrentIndices(), sphereOwners.begin(),
spherePartitionSides.begin(), spherePartitioningByCones);
static thrust::device_vector<unsigned int> sphereLeftIndices(spheres.CurrentSize()+1);
sphereLeftIndices.resize(spheres.CurrentSize()+1);
sphereLeftIndices[0] = 0; // Should be handled by resize not being destructive.
static thrust::device_vector<unsigned int> sphereRightIndices(spheres.CurrentSize()+1);
sphereRightIndices.resize(spheres.CurrentSize()+1);
sphereRightIndices[0] = 0; // Should be handled by resize not being destructive.
thrust::transform_inclusive_scan(spherePartitionSides.begin(), spherePartitionSides.end(),
sphereLeftIndices.begin()+1, leftToOne, plus);
thrust::transform_inclusive_scan(spherePartitionSides.begin(), spherePartitionSides.end(),
sphereRightIndices.begin()+1, rightToOne, plus);
AddSpheresMovedLeft addSpheresMovedLeft(sphereLeftIndices);
thrust::transform(sphereRightIndices.begin(), sphereRightIndices.end(), sphereRightIndices.begin(), addSpheresMovedLeft);
// Scatter spheres
spheres.Partition(spherePartitionSides, sphereLeftIndices, sphereRightIndices);
// Compute new dacrt node partitions
unsigned int nextUnfinishedNodes = UnfinishedNodes() * 2;
nextRayPartitions.resize(nextUnfinishedNodes);
nextSpherePartitions.resize(nextUnfinishedNodes);
// Wrap partitions in uint4 to be able to store both left and right
// simultaneously and coallesced. (Hackish, and won't work due to alignment)
// thrust::device_ptr<uint4> nextRays((uint4*)(void*)thrust::raw_pointer_cast(nextRayPartitions.data()));
// thrust::device_ptr<uint4> nextSpheres((uint4*)(void*)thrust::raw_pointer_cast(nextSpherePartitions.data()));
// thrust::zip_iterator<thrust::tuple<thrust::device_ptr<uint4>, thrust::device_ptr<uint4> > > partitionWrapper =
// thrust::make_zip_iterator(thrust::make_tuple(nextRays, nextSpheres));
// ComputeNewNodePartitions computeNewNodePartitions(rayLeftIndices, sphereLeftIndices, sphereRightIndices);
// thrust::transform(BeginUnfinishedRayPartitions(), EndUnfinishedRayPartitions(), BeginUnfinishedSpherePartitions(),
// partitionWrapper, computeNewNodePartitions);
thrust::zip_iterator<thrust::tuple<Uint2Iterator, Uint2Iterator > > partitionWrapper =
thrust::make_zip_iterator(thrust::make_tuple(nextRayPartitions.begin(), nextSpherePartitions.begin()));
ComputeNewLeftNodePartitions computeNewLeftNodePartitions(rayLeftIndices, sphereLeftIndices, sphereRightIndices);
thrust::transform(BeginUnfinishedRayPartitions(), EndUnfinishedRayPartitions(), BeginUnfinishedSpherePartitions(),
partitionWrapper, computeNewLeftNodePartitions);
ComputeNewRightNodePartitions computeNewRightNodePartitions(rayLeftIndices, sphereLeftIndices, sphereRightIndices);
thrust::transform(BeginUnfinishedRayPartitions(), EndUnfinishedRayPartitions(), BeginUnfinishedSpherePartitions(),
partitionWrapper+UnfinishedNodes(), computeNewRightNodePartitions);
rayPartitions.swap(nextRayPartitions);
spherePartitions.swap(nextSpherePartitions);
}
// *** LEAF PARTITIONING ***
struct IsNodeLeaf {
__host__ __device__
bool operator()(const uint2 rayPartition, const uint2 spherePartition) const {
const float rayCount = (float)(rayPartition.y - rayPartition.x);
const float sphereCount = (float)(spherePartition.y - spherePartition.x);
return rayCount * sphereCount <= 32.0f * (rayCount + sphereCount);
}
};
struct MarkLeafSize {
__host__ __device__
unsigned int operator()(const thrust::tuple<bool, uint2> input) const {
bool isLeaf = thrust::get<0>(input);
uint2 rayPartition = thrust::get<1>(input);
return isLeaf ? rayPartition.y - rayPartition.x : 0;
}
};
__constant__ unsigned int d_leafPartitionOffset;
struct NewPrimPartitions {
uint2 *oldPartitions;
unsigned int *leafIndices;
bool *isLeafs;
unsigned int *newBegins;
uint2 *nextPartitions, *leafPartitions;
NewPrimPartitions(thrust::device_vector<uint2>::iterator oPartitions,
thrust::device_vector<unsigned int>& lIndices,
thrust::device_vector<bool>& isLeafs,
thrust::device_vector<unsigned int>& nBegins,
thrust::device_vector<uint2>& nPartitions,
const unsigned int leafPartitionOffset,
thrust::device_vector<uint2>& lPartitions,
const unsigned int leafOffset)
: oldPartitions(RawPointer(oPartitions)),
leafIndices(RawPointer(lIndices)),
isLeafs(RawPointer(isLeafs)),
newBegins(RawPointer(nBegins)),
nextPartitions(RawPointer(nPartitions)),
leafPartitions(RawPointer(lPartitions) + leafOffset) {
hipMemcpyToSymbol(d_leafPartitionOffset, &leafPartitionOffset, sizeof(unsigned int));
}
__device__
void operator()(const unsigned int threadId) const {
const uint2 oldPartition = oldPartitions[threadId];
const unsigned int range = oldPartition.y - oldPartition.x;
const bool isLeaf = isLeafs[threadId];
unsigned int newBegin = newBegins[oldPartition.x];
newBegin += isLeaf ? d_leafPartitionOffset : 0;
const uint2 partition = make_uint2(newBegin, newBegin + range);
const unsigned int leafIndex = leafIndices[threadId];
const unsigned int index = isLeaf ? leafIndex : threadId - leafIndex;
uint2* output = isLeaf ? leafPartitions : nextPartitions;
output[index] = partition;
}
};
bool DacrtNodes::PartitionLeafs(RayContainer& rays, SphereContainer& spheres) {
static thrust::device_vector<bool> isLeaf(UnfinishedNodes());
isLeaf.resize(UnfinishedNodes());
size_t unfinishedNodes = UnfinishedNodes();
// TODO make isLeaf unsigned int and reuse for indices? isLeaf info is
// stored in an index and it's neighbour.
thrust::transform(BeginUnfinishedRayPartitions(), EndUnfinishedRayPartitions(), BeginUnfinishedSpherePartitions(),
isLeaf.begin(), IsNodeLeaf());
// std::cout << "Leaf nodes:\n" << isLeaf << std::endl;
static thrust::device_vector<unsigned int> leafIndices(UnfinishedNodes()+1);
leafIndices.resize(unfinishedNodes+1);
leafIndices[0] = 0;
thrust::transform_inclusive_scan(isLeaf.begin(), isLeaf.end(), leafIndices.begin()+1,
BoolToInt(), plus);
const unsigned int newLeafNodes = leafIndices[leafIndices.size()-1];
const unsigned int oldLeafNodes = DoneNodes();
if (newLeafNodes == 0) return false;
// Partition rays
static thrust::device_vector<unsigned int> rayLeafNodeIndices(unfinishedNodes+1); // TODO could be a globally static vector
rayLeafNodeIndices.resize(unfinishedNodes+1);
rayLeafNodeIndices[0] = 0;
thrust::zip_iterator<thrust::tuple<BoolIterator, Uint2Iterator> > leafNodeValues =
thrust::make_zip_iterator(thrust::make_tuple(isLeaf.begin(), BeginUnfinishedRayPartitions()));
thrust::transform_inclusive_scan(leafNodeValues, leafNodeValues + unfinishedNodes,
rayLeafNodeIndices.begin()+1, MarkLeafSize(), plus);
// std::cout << "Ray Leaf Node Indices:\n" << rayLeafNodeIndices << std::endl;
static thrust::device_vector<unsigned int> owners(rays.InnerSize());
owners.resize(rays.InnerSize());
CalcOwners(rayPartitions.begin(), rayPartitions.end(), owners);
const unsigned int oldRayLeafs = rays.LeafRays();
rays.PartitionLeafs(isLeaf, rayLeafNodeIndices, rayPartitions, owners);
// Owners now hold the new ray begin indices
thrust::device_vector<unsigned int>& newRayIndices = owners;
// New node ray partitions
nextRayPartitions.resize(rayPartitions.size() - newLeafNodes);
doneRayPartitions.resize(doneRayPartitions.size() + newLeafNodes);
thrust::zip_iterator<thrust::tuple<Uint2Iterator, UintIterator, BoolIterator> > nodePartitionsInput =
thrust::make_zip_iterator(thrust::make_tuple(BeginUnfinishedRayPartitions(), leafIndices.begin(), isLeaf.begin()));
NewPrimPartitions newPrimPartitions(BeginUnfinishedRayPartitions(), leafIndices, isLeaf,
newRayIndices, nextRayPartitions, oldRayLeafs, doneRayPartitions, oldLeafNodes);
thrust::for_each(thrust::counting_iterator<unsigned int>(0),
thrust::counting_iterator<unsigned int>(unfinishedNodes),
newPrimPartitions);
rayPartitions.swap(nextRayPartitions);
// Partition spheres
static thrust::device_vector<unsigned int> sphereLeafNodeIndices(unfinishedNodes+1); // TODO could be a globally static vector
sphereLeafNodeIndices.resize(unfinishedNodes+1);
sphereLeafNodeIndices[0] = 0;
leafNodeValues = thrust::make_zip_iterator(thrust::make_tuple(isLeaf.begin(), BeginUnfinishedSpherePartitions()));
thrust::transform_inclusive_scan(leafNodeValues, leafNodeValues + unfinishedNodes,
sphereLeafNodeIndices.begin()+1, MarkLeafSize(), plus);
owners.resize(spheres.CurrentSize());
CalcOwners(spherePartitions.begin(), spherePartitions.end(), owners);
const unsigned int oldSphereLeafs = spheres.DoneSize();
spheres.PartitionLeafs(isLeaf, sphereLeafNodeIndices, spherePartitions, owners);
// New node sphere partitions
nextSpherePartitions.resize(spherePartitions.size() - newLeafNodes);
doneSpherePartitions.resize(doneSpherePartitions.size() + newLeafNodes);
nodePartitionsInput = thrust::make_zip_iterator(thrust::make_tuple(BeginUnfinishedSpherePartitions(), leafIndices.begin(), isLeaf.begin()));
newPrimPartitions = NewPrimPartitions(BeginUnfinishedSpherePartitions(), leafIndices, isLeaf,
owners, nextSpherePartitions, oldSphereLeafs, doneSpherePartitions, oldLeafNodes);
thrust::for_each(thrust::counting_iterator<unsigned int>(0),
thrust::counting_iterator<unsigned int>(unfinishedNodes),
newPrimPartitions);
spherePartitions.swap(nextSpherePartitions);
return true;
}
// *** EXHAUSTIVE INTERSECTION ***
struct ExhaustiveIntersection {
float4 *rayOrigins, *rayAxisUVs;
uint2 *spherePartitions;
unsigned int *sphereIndices;
Sphere *spheres;
unsigned int *hitIDs;
ExhaustiveIntersection(Rays::Iterator raysBegin,
thrust::device_vector<uint2>& sPartitions,
thrust::device_vector<unsigned int>& sIndices,
thrust::device_vector<Sphere>& ss,
thrust::device_vector<unsigned int>& hits)
: rayOrigins(RawPointer(Rays::GetOrigins(raysBegin))),
rayAxisUVs(RawPointer(Rays::GetDirections(raysBegin))),
spherePartitions(RawPointer(sPartitions)),
sphereIndices(RawPointer(sIndices)),
spheres(RawPointer(ss)),
hitIDs(RawPointer(hits)) {}
/**
* Takes a ray as argument and intersects it against all spheres referenced
* by its parent DacrtNode.
*
* Returns the index of the intersected sphere and stores the distance to it
* in the w component of the ray's direction.
*/
__host__ __device__
void operator()(const unsigned int index, const unsigned int owner) const {
const float3 origin = make_float3(rayOrigins[index]);
const float3 dir = make_float3(rayAxisUVs[index]);
const uint2 spherePartition = spherePartitions[owner];
float hitT = 1e30f;
unsigned int hitID = SpheresGeometry::MISSED;
for (unsigned int g = spherePartition.x; g < spherePartition.y; ++g) {
const unsigned int sphereId = sphereIndices[g];
const Sphere s = spheres[sphereId];
const float t = s.Intersect(origin, dir);
if (0 < t && t < hitT) {
hitID = sphereId;
hitT = t;
}
}
rayAxisUVs[index] = make_float4(dir, hitT);
hitIDs[index] = hitID;
}
};
void DacrtNodes::FindIntersections(thrust::device_vector<unsigned int>& hits) {
// std::cout << "FindIntersections" << std::endl;
hits.resize(rays->LeafRays());
// std::cout << "doneRayPartitions:\n" << doneRayPartitions << std::endl;
ExhaustiveIntersection exhaustive(rays->BeginLeafRays(),
doneSpherePartitions,
sphereIndices->doneIndices,
sphereIndices->spheres.spheres,
hits);
ForEachWithOwners(hits.size(),
doneRayPartitions.begin(), doneRayPartitions.end(),
exhaustive);
// std::cout << "hits:\n" << hits << std::endl;
}
// *** CALC OWNERS ***
struct SetMarkers {
unsigned int* owners;
uint2* partitions;
SetMarkers(thrust::device_vector<unsigned int>& owners,
thrust::device_vector<uint2>::iterator partitions)
: owners(RawPointer(owners)),
partitions(RawPointer(partitions)) {}
__host__ __device__
void operator()(const unsigned int threadId) const {
const uint2 part = partitions[threadId];
owners[part.x] = threadId == 0 ? 0 : 1;
}
};
void DacrtNodes::CalcOwners(thrust::device_vector<uint2>::iterator beginPartition,
thrust::device_vector<uint2>::iterator endPartition,
thrust::device_vector<unsigned int>& owners) {
// std::cout << "owner nodes: " << nodes << std::endl;
thrust::fill(owners.begin(), owners.end(), 0);
size_t nodes = endPartition - beginPartition;
if (nodes == 1) return;
// TODO Start the scan at first marker? The decision wether or not to do
// this would be
/// owners.size() / nodes > X
// for some sane X.
SetMarkers setMarkers(owners, beginPartition);
thrust::counting_iterator<unsigned int> threadIds(0);
thrust::for_each(threadIds, threadIds + nodes, setMarkers);
// std::cout << "markers:\n" << owners << std::endl;
thrust::inclusive_scan(owners.begin(), owners.end(), owners.begin());
// std::cout << "owners:\n" << owners << std::endl;
}
void DacrtNodes::ResizeUnfinished(const size_t size) {
rayPartitions.resize(size);
spherePartitions.resize(size);
}
std::string DacrtNodes::ToString() const {
std::ostringstream out;
if (UnfinishedNodes() > 0) {
out << "Unfinished DacrtNodes:";
for (size_t i = 0; i < UnfinishedNodes(); ++i)
out << "\n" << i << ": " << GetUnfinished(i);
if (DoneNodes() > 0) out << "\n";
}
if (DoneNodes() > 0) {
out << "Done DacrtNodes:";
for (size_t i = 0; i < DoneNodes(); ++i)
out << "\n" << i << ": " << GetDone(i);
}
return out.str();
}
std::string DacrtNodes::ToString(RayContainer& rays, SphereContainer& spheres) const {
std::ostringstream out;
if (UnfinishedNodes() > 0) {
out << "Unfinished DacrtNodes:";
for (size_t i = 0; i < UnfinishedNodes(); ++i) {
DacrtNode node = GetUnfinished(i);
out << "\n" << i << ": " << node << "\n Rays: ";
for (unsigned int r = node.rayStart; r < node.rayEnd; ++r){
float4 origins = *(Rays::GetOrigins(rays.BeginInnerRays()) + r);
out << origins.w << ", ";
}
out << "\n Spheres: ";
for (unsigned int s = node.sphereStart; s < node.sphereEnd; ++s){
unsigned int sphereId = *(spheres.BeginCurrentIndices() + s);
out << sphereId << ", ";
}
}
if (DoneNodes() > 0) out << "\n";
}
if (DoneNodes() > 0) {
out << "Done DacrtNodes:";
for (size_t i = 0; i < DoneNodes(); ++i) {
DacrtNode node = GetDone(i);
out << "\n" << i << ": " << node << "\n Rays: ";
for (unsigned int r = node.rayStart; r < node.rayEnd; ++r){
float4 origins = *(Rays::GetOrigins(rays.BeginLeafRays()) + r);
out << origins.w << ", ";
}
out << "\n Spheres: ";
for (unsigned int s = node.sphereStart; s < node.sphereEnd; ++s){
unsigned int sphereId = *(spheres.BeginDoneIndices() + s);
out << sphereId << ", ";
}
}
}
return out.str();
}
} // NS Rendering
| 1cc5ee36410c88be77bfcfeed3aa0cff2e931113.cu | // DACRT node
// -----------------------------------------------------------------------------
// Copyright (C) 2012, See authors
//
// This program is open source and distributed under the New BSD License. See
// license for more detail.
// -----------------------------------------------------------------------------
#include <Rendering/DacrtNode.h>
#include <Kernels/ForEachWithOwners.h>
#include <HyperCubes.h>
#include <Primitives/SphereCone.h>
#include <Primitives/HyperCube.h>
#include <Rendering/Rays.h>
#include <Rendering/RayContainer.h>
#include <SphereGeometry.h>
#include <SphereContainer.h>
#include <Utils/ToString.h>
#include <sstream>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform_scan.h>
#include <thrust/transform.h>
// *** DacrtNode ***
namespace Rendering {
std::string DacrtNode::ToString() const {
std::ostringstream out;
out << "[rays: [" << rayStart << " -> " << rayEnd << "], spheres: [" << sphereStart << " -> " << sphereEnd << "]]";
return out.str();
}
// *** DacrtNodes ***
DacrtNodes::DacrtNodes(const size_t capacity)
: scan1(capacity+1), scan2(capacity+1),
rays(NULL), sphereIndices(NULL),
rayPartitions(capacity), spherePartitions(capacity),
nextRayPartitions(capacity), nextSpherePartitions(capacity),
doneRayPartitions(capacity), doneSpherePartitions(capacity) {
scan1[0] = scan2[0] = 0;
rayPartitions.resize(0); spherePartitions.resize(0);
nextRayPartitions.resize(0); nextSpherePartitions.resize(0);
doneRayPartitions.resize(0); doneSpherePartitions.resize(0);
}
DacrtNodes::~DacrtNodes() {
if (sphereIndices) delete sphereIndices;
}
void DacrtNodes::Reset() {
rayPartitions.resize(0);
nextRayPartitions.resize(0);
spherePartitions.resize(0);
nextSpherePartitions.resize(0);
doneRayPartitions.resize(0);
doneSpherePartitions.resize(0);
}
void DacrtNodes::Create(RayContainer& rs, SpheresGeometry& spheres) {
this->rays = &rs;
rs.Convert(Rays::HyperRayRepresentation);
// Partition rays according to their major axis
uint rayPartitionStart[7];
rs.PartitionByAxis(rayPartitionStart);
std::cout << "ray partitions: ";
for (int p = 0; p < 7; ++p)
std::cout << rayPartitionStart[p] << ", ";
std::cout << std::endl;
static thrust::device_vector<uint2> initialRayPartitions(6);
int activePartitions = 0;
for (int a = 0; a < 6; ++a) {
const size_t rayCount = rayPartitionStart[a+1] - rayPartitionStart[a];
initialRayPartitions[a] = make_uint2(rayPartitionStart[a], rayPartitionStart[a+1]);
activePartitions += rayCount > 0 ? 1 : 0;
}
// Reduce the cube bounds
HyperCubes cubes = HyperCubes(128);
cubes.ReduceCubes(rs.BeginInnerRays(), rs.EndInnerRays(),
initialRayPartitions, activePartitions);
std::cout << cubes << std::endl;
uint spherePartitionStart[activePartitions+1];
if (sphereIndices) delete sphereIndices; // TODO reuse allocated sphere indices instead of detroying
sphereIndices = new SphereContainer(cubes, spheres, spherePartitionStart);
std::cout << "sphere partitions: ";
for (int p = 0; p < activePartitions+1; ++p)
std::cout << spherePartitionStart[p] << ", ";
std::cout << std::endl;
Reset();
int nodeIndex = 0;
for (int a = 0; a < 6; ++a) {
const int rayStart = rayPartitionStart[a];
const size_t rayEnd = rayPartitionStart[a+1];
if (rayStart == rayEnd) continue;
const int sphereStart = spherePartitionStart[nodeIndex];
const size_t sphereEnd = spherePartitionStart[nodeIndex+1];
SetUnfinished(nodeIndex, DacrtNode(rayStart, rayEnd, sphereStart, sphereEnd));
++nodeIndex;
}
unsigned int i = 0;
while (UnfinishedNodes() > 0) {
// std::cout << "\n *** PARTITION NODES (" << i << ") ***\n" << ToString(rs, *sphereIndices) << "\n ***\n" << std::endl;
Partition(rs, *sphereIndices, cubes);
// std::cout << "\n *** PARTITION LEAFS (" << i << ") ***\n" << ToString(rs, *sphereIndices) << "\n ***\n" << std::endl;
if (PartitionLeafs(rs, *sphereIndices))
;//cout << "\n *** AFTER PARTITIONING (" << i << ") ***\n" << nodes/*.ToString(rs, sphereIndices)*/ << "\n ***\n" << endl;
else
;//cout << "\n *** NO LEAFS CREATED (" << i << ") ***\n" << endl;
if (UnfinishedNodes() > 0) {
// Prepare cubes for next round.
cubes.ReduceCubes(rs.BeginInnerRays(), rs.EndInnerRays(),
rayPartitions, UnfinishedNodes());
// std::cout << "cubes after reduction:\n" << cubes << std::endl;
}
++i;
// if (i == 2) exit(0);
}
rs.Convert(Rays::RayRepresentation);
}
struct CalcSplitInfo {
__host__ __device__
thrust::tuple<Axis, float> operator()(thrust::tuple<float2, float2, float2, float2, float2> val) {
float2 x = thrust::get<0>(val);
float range = x.y - x.x;
Axis axis = X;
float split = (x.y + x.x) * 0.5f;
float2 y = thrust::get<1>(val);
float yRange = y.y - y.x;
if (range < yRange) {
axis = Y;
split = (y.y + y.x) * 0.5f;
}
float2 z = thrust::get<2>(val);
float zRange = z.y - z.x;
if (range < zRange) {
axis = Z;
split = (z.y + z.x) * 0.5f;
}
float2 u = thrust::get<3>(val);
float uRange = u.y - u.x;
if (range < uRange) {
axis = U;
split = (u.y + u.x) * 0.5f;
}
float2 v = thrust::get<4>(val);
float vRange = v.y - v.x;
if (range < vRange) {
axis = V;
split = (v.y + v.x) * 0.5f;
}
return thrust::tuple<Axis, float>(axis, split);
}
};
struct RayPartitionSide {
float4 *rayOrigins, *rayAxisUVs;
Axis *splitAxis;
float *splitValues;
// result
PartitionSide *partitionSides;
RayPartitionSide(thrust::device_vector<Axis>& axis, thrust::device_vector<float>& values)
: splitAxis(thrust::raw_pointer_cast(axis.data())),
splitValues(thrust::raw_pointer_cast(values.data())) {}
RayPartitionSide(Rays::Iterator rays,
thrust::device_vector<Axis>& axis, thrust::device_vector<float>& values,
thrust::device_vector<PartitionSide>& sides)
: rayOrigins(RawPointer(Rays::GetOrigins(rays))),
rayAxisUVs(RawPointer(Rays::GetAxisUVs(rays))),
splitAxis(RawPointer(axis)),
splitValues(RawPointer(values)),
partitionSides(RawPointer(sides)) {}
__host__ __device__
PartitionSide operator()(thrust::tuple<thrust::tuple<float4, float4>, unsigned int> ray) {
int owner = thrust::get<1>(ray);
Axis axis = splitAxis[owner];
float splitVal = splitValues[owner];
float rayVals[5];
float4 origin = thrust::get<0>(thrust::get<0>(ray));
rayVals[0] = origin.x;
rayVals[1] = origin.y;
rayVals[2] = origin.z;
float4 UV = thrust::get<1>(thrust::get<0>(ray));
rayVals[3] = UV.y;
rayVals[4] = UV.z;
return rayVals[axis] <= splitVal ? LEFT : RIGHT;
}
__host__ __device__
void operator()(const unsigned int index, const unsigned int owner) const {
const Axis axis = splitAxis[owner];
const float splitVal = splitValues[owner];
// IDEA since most owners will cover a warp or more, perhaps it will be
// slightly faster to branch on (axis < 3) and avoid a memory lookup?
// Only ever so slightly though.
float rayVals[5];
const float3 origin = make_float3(rayOrigins[index]);
rayVals[0] = origin.x;
rayVals[1] = origin.y;
rayVals[2] = origin.z;
const float3 axisUV = make_float3(rayAxisUVs[index]);
rayVals[3] = axisUV.y;
rayVals[4] = axisUV.z;
partitionSides[index] = rayVals[axis] <= splitVal ? LEFT : RIGHT;
}
};
template <int S>
struct SideToOne {
__host__ __device__ unsigned int operator()(PartitionSide s) { return s & S ? 1 : 0; }
};
static SideToOne<LEFT> leftToOne;
static SideToOne<RIGHT> rightToOne;
static thrust::plus<unsigned int> plus;
struct BoolToInt { __host__ __device__ unsigned int operator()(bool b) { return (int)b; } };
struct CreateCones {
__host__ __device__
SphereCone operator()(const thrust::tuple<SignedAxis, float2, float2, float2, float2, float2> c) const {
const HyperCube cube(thrust::get<0>(c), thrust::get<1>(c), thrust::get<2>(c),
thrust::get<3>(c), thrust::get<4>(c), thrust::get<5>(c));
return SphereCone::FromCube(cube);
}
};
__constant__ unsigned int d_oldCubeCount;
struct CubesFromSplitPlanes {
SignedAxis* a;
float2 *x, *y, *z, *u, *v;
Axis* splitAxis;
float* splitValues;
CubesFromSplitPlanes(HyperCubes& cubes, thrust::device_vector<Axis>& sAxis,
thrust::device_vector<float>& sValues)
: a(thrust::raw_pointer_cast(cubes.a.data())),
x(thrust::raw_pointer_cast(cubes.x.data())),
y(thrust::raw_pointer_cast(cubes.y.data())),
z(thrust::raw_pointer_cast(cubes.z.data())),
u(thrust::raw_pointer_cast(cubes.u.data())),
v(thrust::raw_pointer_cast(cubes.v.data())),
splitAxis(thrust::raw_pointer_cast(sAxis.data())),
splitValues(thrust::raw_pointer_cast(sValues.data())) {
unsigned int oldCubeCount = cubes.Size();
cudaMemcpyToSymbol(d_oldCubeCount, &oldCubeCount, sizeof(unsigned int));
}
__device__
thrust::tuple<SignedAxis, float2, float2, float2, float2, float2> operator()(const unsigned int threadId) const {
const unsigned int oldCubeId = threadId % d_oldCubeCount;
const PartitionSide side = threadId < d_oldCubeCount ? LEFT : RIGHT;
const Axis sAxis = splitAxis[oldCubeId];
const float splitValue = splitValues[oldCubeId];
return thrust::tuple<SignedAxis, float2, float2, float2, float2, float2>
(a[oldCubeId],
CalcBounds(sAxis == X, side, x[oldCubeId], splitValue),
CalcBounds(sAxis == Y, side, y[oldCubeId], splitValue),
CalcBounds(sAxis == Z, side, z[oldCubeId], splitValue),
CalcBounds(sAxis == U, side, u[oldCubeId], splitValue),
CalcBounds(sAxis == V, side, v[oldCubeId], splitValue));
}
__host__ __device__
inline float2 CalcBounds(const bool split, const PartitionSide side, const float2 bounds, const float splitVal) const {
return split ? make_float2(side == LEFT ? bounds.x : splitVal,
side == RIGHT ? bounds.y : splitVal) : bounds;
}
};
struct SpherePartitioningByCones {
SphereCone* cones;
Sphere* spheres;
SpherePartitioningByCones(thrust::device_vector<SphereCone>& cs,
thrust::device_vector<Sphere>& ss)
: cones(thrust::raw_pointer_cast(cs.data())),
spheres(thrust::raw_pointer_cast(ss.data())) {}
__device__
PartitionSide operator()(const unsigned int sphereId, const unsigned int owner) const {
const Sphere sphere = spheres[sphereId];
const SphereCone leftCone = cones[owner];
PartitionSide side = leftCone.DoesIntersect(sphere) ? LEFT : NONE;
const SphereCone rightCone = cones[owner + d_oldCubeCount];
return (PartitionSide)(side | (rightCone.DoesIntersect(sphere) ? RIGHT : NONE));
}
};
__constant__ unsigned int d_spheresMovedLeft;
struct AddSpheresMovedLeft {
AddSpheresMovedLeft(thrust::device_vector<unsigned int>& leftIndices){
unsigned int* spheresMovedLeft = thrust::raw_pointer_cast(leftIndices.data()) + leftIndices.size()-1;
cudaMemcpyToSymbol(d_spheresMovedLeft, spheresMovedLeft, sizeof(unsigned int), 0, cudaMemcpyDeviceToDevice);
}
__device__
unsigned int operator()(const unsigned int v) const {
return v + d_spheresMovedLeft;
}
};
__constant__ unsigned int d_raysMovedLeft;
struct ComputeNewNodePartitions {
unsigned int* rayLeftIndices;
unsigned int *sphereLeftIndices, *sphereRightIndices;
uint2 *rayPartitions, *spherePartitions;
ComputeNewNodePartitions(thrust::device_vector<unsigned int>& rLeftIndices,
thrust::device_vector<unsigned int>& sLeftIndices,
thrust::device_vector<unsigned int>& sRightIndices)
: rayLeftIndices(thrust::raw_pointer_cast(rLeftIndices.data())),
sphereLeftIndices(thrust::raw_pointer_cast(sLeftIndices.data())),
sphereRightIndices(thrust::raw_pointer_cast(sRightIndices.data())) {
unsigned int* data = thrust::raw_pointer_cast(rLeftIndices.data()) + rLeftIndices.size()-1;
cudaMemcpyToSymbol(d_raysMovedLeft, data, sizeof(unsigned int), 0, cudaMemcpyDeviceToDevice);
}
__device__
thrust::tuple<uint4, uint4> operator()(const uint2 rayPartition, const uint2 spherePartition) const {
uint4 rays;
const unsigned int rBegin = rays.x = rayLeftIndices[rayPartition.x];
const unsigned int rEnd = rays.y = rayLeftIndices[rayPartition.y];
rays.z = rayPartition.x - rBegin + d_raysMovedLeft;
rays.w = rayPartition.y - rEnd + d_raysMovedLeft;
uint4 sphere;
sphere.x = sphereLeftIndices[spherePartition.x];
sphere.y = sphereLeftIndices[spherePartition.y];
sphere.z = sphereRightIndices[spherePartition.x];
sphere.w = sphereRightIndices[spherePartition.y];
return thrust::tuple<uint4, uint4>(rays, sphere);
}
};
struct ComputeNewLeftNodePartitions {
unsigned int* rayLeftIndices;
unsigned int* sphereLeftIndices;
unsigned int* sphereRightIndices;
ComputeNewLeftNodePartitions(thrust::device_vector<unsigned int>& rLeftIndices,
thrust::device_vector<unsigned int>& sLeftIndices,
thrust::device_vector<unsigned int>& sRightIndices)
: rayLeftIndices(thrust::raw_pointer_cast(rLeftIndices.data())),
sphereLeftIndices(thrust::raw_pointer_cast(sLeftIndices.data())),
sphereRightIndices(thrust::raw_pointer_cast(sRightIndices.data())) {
unsigned int* data = thrust::raw_pointer_cast(rLeftIndices.data()) + rLeftIndices.size()-1;
cudaMemcpyToSymbol(d_raysMovedLeft, data, sizeof(unsigned int), 0, cudaMemcpyDeviceToDevice);
}
__device__
thrust::tuple<uint2, uint2> operator()(const uint2 rayPartition, const uint2 spherePartition) const {
uint2 rays;
rays.x = rayLeftIndices[rayPartition.x];
rays.y = rayLeftIndices[rayPartition.y];
uint2 sphere;
sphere.x = sphereLeftIndices[spherePartition.x];
sphere.y = sphereLeftIndices[spherePartition.y];
return thrust::tuple<uint2, uint2>(rays, sphere);
}
};
struct ComputeNewRightNodePartitions {
unsigned int* rayLeftIndices;
unsigned int* sphereLeftIndices;
unsigned int* sphereRightIndices;
ComputeNewRightNodePartitions(thrust::device_vector<unsigned int>& rLeftIndices,
thrust::device_vector<unsigned int>& sLeftIndices,
thrust::device_vector<unsigned int>& sRightIndices)
: rayLeftIndices(thrust::raw_pointer_cast(rLeftIndices.data())),
sphereLeftIndices(thrust::raw_pointer_cast(sLeftIndices.data())),
sphereRightIndices(thrust::raw_pointer_cast(sRightIndices.data())) {
unsigned int* data = thrust::raw_pointer_cast(rLeftIndices.data()) + rLeftIndices.size()-1;
cudaMemcpyToSymbol(d_raysMovedLeft, data, sizeof(unsigned int), 0, cudaMemcpyDeviceToDevice);
}
__device__
thrust::tuple<uint2, uint2> operator()(const uint2 rayPartition, const uint2 spherePartition) const {
uint2 rays;
const unsigned int rBegin = rayLeftIndices[rayPartition.x];
const unsigned int rEnd = rayLeftIndices[rayPartition.y];
rays.x = rayPartition.x - rBegin + d_raysMovedLeft;
rays.y = rayPartition.y - rEnd + d_raysMovedLeft;
uint2 sphere;
sphere.x = sphereRightIndices[spherePartition.x];
sphere.y = sphereRightIndices[spherePartition.y];
return thrust::tuple<uint2, uint2>(rays, sphere);
}
};
void DacrtNodes::Partition(RayContainer& rays, SphereContainer& spheres,
HyperCubes& cubes) {
// TODO move static left and right indices vectors to global scope? Do I
// need more than one at a time?
/// No! But I need to split next ray and sphere partition creation.
size_t rayCount = rays.InnerSize();
// Calculate splitting info
static thrust::device_vector<Axis> splitAxis(cubes.Size());
splitAxis.resize(cubes.Size());
static thrust::device_vector<float> splitValues(cubes.Size());
splitValues.resize(cubes.Size());
thrust::zip_iterator<thrust::tuple<AxisIterator, FloatIterator> > axisInfo
= thrust::make_zip_iterator(thrust::make_tuple(splitAxis.begin(), splitValues.begin()));
CalcSplitInfo calcSplitInfo;
thrust::transform(cubes.BeginBounds(), cubes.EndBounds(), axisInfo, calcSplitInfo);
// Calculate the partition side
static thrust::device_vector<PartitionSide> rayPartitionSides(rayCount);
rayPartitionSides.resize(rayCount);
// Calculate current ray owners. Is apparently faster with old
// implementation instead of a workqueue.
#if 0
static thrust::device_vector<unsigned int> rayOwners(rayCount);
rayOwners.resize(rayCount);
CalcOwners(rayPartitions.begin(), rayPartitions.end(), rayOwners);
thrust::zip_iterator<thrust::tuple<Rays::Iterator, UintIterator> > raysWithOwners
= thrust::make_zip_iterator(thrust::make_tuple(rays.BeginInnerRays(), rayOwners.begin()));
RayPartitionSide rayPartitionSide = RayPartitionSide(splitAxis, splitValues);
thrust::transform(raysWithOwners, raysWithOwners + rayCount,
rayPartitionSides.begin(), rayPartitionSide);
#else
RayPartitionSide rayPartitionSide = RayPartitionSide(rays.BeginInnerRays(), splitAxis, splitValues,
rayPartitionSides);
ForEachWithOwners(rayCount,
rayPartitions.begin(), rayPartitions.end(),
rayPartitionSide);
#endif
// Calculate the indices for the rays moved left using scan
static thrust::device_vector<unsigned int> rayLeftIndices(rayCount+1);
rayLeftIndices.resize(rayCount+1);
rayLeftIndices[0] = 0; // Should be handled by resize not being destructive.
thrust::transform_inclusive_scan(rayPartitionSides.begin(), rayPartitionSides.end(),
rayLeftIndices.begin()+1, leftToOne, plus);
// Scatter the rays
rays.Partition(rayPartitionSides, rayLeftIndices);
// Calculate the new hypercubes
/// IDEA: Since the rays have been scattered, just reduce them, but that
// would mean also scattering the hypercubes when creating leaves.
static HyperCubes splitCubes(cubes.Size() * 2);
splitCubes.Resize(cubes.Size() * 2);
CubesFromSplitPlanes cubesFromSplitPlanes(cubes, splitAxis, splitValues);
thrust::transform(thrust::counting_iterator<unsigned int>(0), thrust::counting_iterator<unsigned int>(cubes.Size() * 2),
splitCubes.Begin(), cubesFromSplitPlanes);
// Calculate the cones used for splitting
// TODO using knowledge about the cube split, the resulting two cones can be
// computed faster if computed together in one thread.
static thrust::device_vector<SphereCone> cones(cubes.Size());
cones.resize(cubes.Size() * 2);
thrust::transform(splitCubes.Begin(), splitCubes.End(), cones.begin(), CreateCones());
// Calculate current sphere owners. TODO Use a work queue instead
static thrust::device_vector<unsigned int> sphereOwners(spheres.CurrentSize());
sphereOwners.resize(spheres.CurrentSize());
CalcOwners(spherePartitions.begin(), spherePartitions.end(), sphereOwners);
// Calculate sphere partitions
static thrust::device_vector<PartitionSide> spherePartitionSides(spheres.CurrentSize());
spherePartitionSides.resize(spheres.CurrentSize());
SpherePartitioningByCones spherePartitioningByCones(cones, spheres.SphereGeometry().spheres);
thrust::transform(spheres.BeginCurrentIndices(), spheres.EndCurrentIndices(), sphereOwners.begin(),
spherePartitionSides.begin(), spherePartitioningByCones);
static thrust::device_vector<unsigned int> sphereLeftIndices(spheres.CurrentSize()+1);
sphereLeftIndices.resize(spheres.CurrentSize()+1);
sphereLeftIndices[0] = 0; // Should be handled by resize not being destructive.
static thrust::device_vector<unsigned int> sphereRightIndices(spheres.CurrentSize()+1);
sphereRightIndices.resize(spheres.CurrentSize()+1);
sphereRightIndices[0] = 0; // Should be handled by resize not being destructive.
thrust::transform_inclusive_scan(spherePartitionSides.begin(), spherePartitionSides.end(),
sphereLeftIndices.begin()+1, leftToOne, plus);
thrust::transform_inclusive_scan(spherePartitionSides.begin(), spherePartitionSides.end(),
sphereRightIndices.begin()+1, rightToOne, plus);
AddSpheresMovedLeft addSpheresMovedLeft(sphereLeftIndices);
thrust::transform(sphereRightIndices.begin(), sphereRightIndices.end(), sphereRightIndices.begin(), addSpheresMovedLeft);
// Scatter spheres
spheres.Partition(spherePartitionSides, sphereLeftIndices, sphereRightIndices);
// Compute new dacrt node partitions
unsigned int nextUnfinishedNodes = UnfinishedNodes() * 2;
nextRayPartitions.resize(nextUnfinishedNodes);
nextSpherePartitions.resize(nextUnfinishedNodes);
// Wrap partitions in uint4 to be able to store both left and right
// simultaneously and coallesced. (Hackish, and won't work due to alignment)
// thrust::device_ptr<uint4> nextRays((uint4*)(void*)thrust::raw_pointer_cast(nextRayPartitions.data()));
// thrust::device_ptr<uint4> nextSpheres((uint4*)(void*)thrust::raw_pointer_cast(nextSpherePartitions.data()));
// thrust::zip_iterator<thrust::tuple<thrust::device_ptr<uint4>, thrust::device_ptr<uint4> > > partitionWrapper =
// thrust::make_zip_iterator(thrust::make_tuple(nextRays, nextSpheres));
// ComputeNewNodePartitions computeNewNodePartitions(rayLeftIndices, sphereLeftIndices, sphereRightIndices);
// thrust::transform(BeginUnfinishedRayPartitions(), EndUnfinishedRayPartitions(), BeginUnfinishedSpherePartitions(),
// partitionWrapper, computeNewNodePartitions);
thrust::zip_iterator<thrust::tuple<Uint2Iterator, Uint2Iterator > > partitionWrapper =
thrust::make_zip_iterator(thrust::make_tuple(nextRayPartitions.begin(), nextSpherePartitions.begin()));
ComputeNewLeftNodePartitions computeNewLeftNodePartitions(rayLeftIndices, sphereLeftIndices, sphereRightIndices);
thrust::transform(BeginUnfinishedRayPartitions(), EndUnfinishedRayPartitions(), BeginUnfinishedSpherePartitions(),
partitionWrapper, computeNewLeftNodePartitions);
ComputeNewRightNodePartitions computeNewRightNodePartitions(rayLeftIndices, sphereLeftIndices, sphereRightIndices);
thrust::transform(BeginUnfinishedRayPartitions(), EndUnfinishedRayPartitions(), BeginUnfinishedSpherePartitions(),
partitionWrapper+UnfinishedNodes(), computeNewRightNodePartitions);
rayPartitions.swap(nextRayPartitions);
spherePartitions.swap(nextSpherePartitions);
}
// *** LEAF PARTITIONING ***
struct IsNodeLeaf {
__host__ __device__
bool operator()(const uint2 rayPartition, const uint2 spherePartition) const {
const float rayCount = (float)(rayPartition.y - rayPartition.x);
const float sphereCount = (float)(spherePartition.y - spherePartition.x);
return rayCount * sphereCount <= 32.0f * (rayCount + sphereCount);
}
};
struct MarkLeafSize {
__host__ __device__
unsigned int operator()(const thrust::tuple<bool, uint2> input) const {
bool isLeaf = thrust::get<0>(input);
uint2 rayPartition = thrust::get<1>(input);
return isLeaf ? rayPartition.y - rayPartition.x : 0;
}
};
__constant__ unsigned int d_leafPartitionOffset;
struct NewPrimPartitions {
uint2 *oldPartitions;
unsigned int *leafIndices;
bool *isLeafs;
unsigned int *newBegins;
uint2 *nextPartitions, *leafPartitions;
NewPrimPartitions(thrust::device_vector<uint2>::iterator oPartitions,
thrust::device_vector<unsigned int>& lIndices,
thrust::device_vector<bool>& isLeafs,
thrust::device_vector<unsigned int>& nBegins,
thrust::device_vector<uint2>& nPartitions,
const unsigned int leafPartitionOffset,
thrust::device_vector<uint2>& lPartitions,
const unsigned int leafOffset)
: oldPartitions(RawPointer(oPartitions)),
leafIndices(RawPointer(lIndices)),
isLeafs(RawPointer(isLeafs)),
newBegins(RawPointer(nBegins)),
nextPartitions(RawPointer(nPartitions)),
leafPartitions(RawPointer(lPartitions) + leafOffset) {
cudaMemcpyToSymbol(d_leafPartitionOffset, &leafPartitionOffset, sizeof(unsigned int));
}
__device__
void operator()(const unsigned int threadId) const {
const uint2 oldPartition = oldPartitions[threadId];
const unsigned int range = oldPartition.y - oldPartition.x;
const bool isLeaf = isLeafs[threadId];
unsigned int newBegin = newBegins[oldPartition.x];
newBegin += isLeaf ? d_leafPartitionOffset : 0;
const uint2 partition = make_uint2(newBegin, newBegin + range);
const unsigned int leafIndex = leafIndices[threadId];
const unsigned int index = isLeaf ? leafIndex : threadId - leafIndex;
uint2* output = isLeaf ? leafPartitions : nextPartitions;
output[index] = partition;
}
};
bool DacrtNodes::PartitionLeafs(RayContainer& rays, SphereContainer& spheres) {
static thrust::device_vector<bool> isLeaf(UnfinishedNodes());
isLeaf.resize(UnfinishedNodes());
size_t unfinishedNodes = UnfinishedNodes();
// TODO make isLeaf unsigned int and reuse for indices? isLeaf info is
// stored in an index and it's neighbour.
thrust::transform(BeginUnfinishedRayPartitions(), EndUnfinishedRayPartitions(), BeginUnfinishedSpherePartitions(),
isLeaf.begin(), IsNodeLeaf());
// std::cout << "Leaf nodes:\n" << isLeaf << std::endl;
static thrust::device_vector<unsigned int> leafIndices(UnfinishedNodes()+1);
leafIndices.resize(unfinishedNodes+1);
leafIndices[0] = 0;
thrust::transform_inclusive_scan(isLeaf.begin(), isLeaf.end(), leafIndices.begin()+1,
BoolToInt(), plus);
const unsigned int newLeafNodes = leafIndices[leafIndices.size()-1];
const unsigned int oldLeafNodes = DoneNodes();
if (newLeafNodes == 0) return false;
// Partition rays
static thrust::device_vector<unsigned int> rayLeafNodeIndices(unfinishedNodes+1); // TODO could be a globally static vector
rayLeafNodeIndices.resize(unfinishedNodes+1);
rayLeafNodeIndices[0] = 0;
thrust::zip_iterator<thrust::tuple<BoolIterator, Uint2Iterator> > leafNodeValues =
thrust::make_zip_iterator(thrust::make_tuple(isLeaf.begin(), BeginUnfinishedRayPartitions()));
thrust::transform_inclusive_scan(leafNodeValues, leafNodeValues + unfinishedNodes,
rayLeafNodeIndices.begin()+1, MarkLeafSize(), plus);
// std::cout << "Ray Leaf Node Indices:\n" << rayLeafNodeIndices << std::endl;
static thrust::device_vector<unsigned int> owners(rays.InnerSize());
owners.resize(rays.InnerSize());
CalcOwners(rayPartitions.begin(), rayPartitions.end(), owners);
const unsigned int oldRayLeafs = rays.LeafRays();
rays.PartitionLeafs(isLeaf, rayLeafNodeIndices, rayPartitions, owners);
// Owners now hold the new ray begin indices
thrust::device_vector<unsigned int>& newRayIndices = owners;
// New node ray partitions
nextRayPartitions.resize(rayPartitions.size() - newLeafNodes);
doneRayPartitions.resize(doneRayPartitions.size() + newLeafNodes);
thrust::zip_iterator<thrust::tuple<Uint2Iterator, UintIterator, BoolIterator> > nodePartitionsInput =
thrust::make_zip_iterator(thrust::make_tuple(BeginUnfinishedRayPartitions(), leafIndices.begin(), isLeaf.begin()));
NewPrimPartitions newPrimPartitions(BeginUnfinishedRayPartitions(), leafIndices, isLeaf,
newRayIndices, nextRayPartitions, oldRayLeafs, doneRayPartitions, oldLeafNodes);
thrust::for_each(thrust::counting_iterator<unsigned int>(0),
thrust::counting_iterator<unsigned int>(unfinishedNodes),
newPrimPartitions);
rayPartitions.swap(nextRayPartitions);
// Partition spheres
static thrust::device_vector<unsigned int> sphereLeafNodeIndices(unfinishedNodes+1); // TODO could be a globally static vector
sphereLeafNodeIndices.resize(unfinishedNodes+1);
sphereLeafNodeIndices[0] = 0;
leafNodeValues = thrust::make_zip_iterator(thrust::make_tuple(isLeaf.begin(), BeginUnfinishedSpherePartitions()));
thrust::transform_inclusive_scan(leafNodeValues, leafNodeValues + unfinishedNodes,
sphereLeafNodeIndices.begin()+1, MarkLeafSize(), plus);
owners.resize(spheres.CurrentSize());
CalcOwners(spherePartitions.begin(), spherePartitions.end(), owners);
const unsigned int oldSphereLeafs = spheres.DoneSize();
spheres.PartitionLeafs(isLeaf, sphereLeafNodeIndices, spherePartitions, owners);
// New node sphere partitions
nextSpherePartitions.resize(spherePartitions.size() - newLeafNodes);
doneSpherePartitions.resize(doneSpherePartitions.size() + newLeafNodes);
nodePartitionsInput = thrust::make_zip_iterator(thrust::make_tuple(BeginUnfinishedSpherePartitions(), leafIndices.begin(), isLeaf.begin()));
newPrimPartitions = NewPrimPartitions(BeginUnfinishedSpherePartitions(), leafIndices, isLeaf,
owners, nextSpherePartitions, oldSphereLeafs, doneSpherePartitions, oldLeafNodes);
thrust::for_each(thrust::counting_iterator<unsigned int>(0),
thrust::counting_iterator<unsigned int>(unfinishedNodes),
newPrimPartitions);
spherePartitions.swap(nextSpherePartitions);
return true;
}
// *** EXHAUSTIVE INTERSECTION ***
struct ExhaustiveIntersection {
float4 *rayOrigins, *rayAxisUVs;
uint2 *spherePartitions;
unsigned int *sphereIndices;
Sphere *spheres;
unsigned int *hitIDs;
ExhaustiveIntersection(Rays::Iterator raysBegin,
thrust::device_vector<uint2>& sPartitions,
thrust::device_vector<unsigned int>& sIndices,
thrust::device_vector<Sphere>& ss,
thrust::device_vector<unsigned int>& hits)
: rayOrigins(RawPointer(Rays::GetOrigins(raysBegin))),
rayAxisUVs(RawPointer(Rays::GetDirections(raysBegin))),
spherePartitions(RawPointer(sPartitions)),
sphereIndices(RawPointer(sIndices)),
spheres(RawPointer(ss)),
hitIDs(RawPointer(hits)) {}
/**
* Takes a ray as argument and intersects it against all spheres referenced
* by its parent DacrtNode.
*
* Returns the index of the intersected sphere and stores the distance to it
* in the w component of the ray's direction.
*/
__host__ __device__
void operator()(const unsigned int index, const unsigned int owner) const {
const float3 origin = make_float3(rayOrigins[index]);
const float3 dir = make_float3(rayAxisUVs[index]);
const uint2 spherePartition = spherePartitions[owner];
float hitT = 1e30f;
unsigned int hitID = SpheresGeometry::MISSED;
for (unsigned int g = spherePartition.x; g < spherePartition.y; ++g) {
const unsigned int sphereId = sphereIndices[g];
const Sphere s = spheres[sphereId];
const float t = s.Intersect(origin, dir);
if (0 < t && t < hitT) {
hitID = sphereId;
hitT = t;
}
}
rayAxisUVs[index] = make_float4(dir, hitT);
hitIDs[index] = hitID;
}
};
void DacrtNodes::FindIntersections(thrust::device_vector<unsigned int>& hits) {
// std::cout << "FindIntersections" << std::endl;
hits.resize(rays->LeafRays());
// std::cout << "doneRayPartitions:\n" << doneRayPartitions << std::endl;
ExhaustiveIntersection exhaustive(rays->BeginLeafRays(),
doneSpherePartitions,
sphereIndices->doneIndices,
sphereIndices->spheres.spheres,
hits);
ForEachWithOwners(hits.size(),
doneRayPartitions.begin(), doneRayPartitions.end(),
exhaustive);
// std::cout << "hits:\n" << hits << std::endl;
}
// *** CALC OWNERS ***
struct SetMarkers {
unsigned int* owners;
uint2* partitions;
SetMarkers(thrust::device_vector<unsigned int>& owners,
thrust::device_vector<uint2>::iterator partitions)
: owners(RawPointer(owners)),
partitions(RawPointer(partitions)) {}
__host__ __device__
void operator()(const unsigned int threadId) const {
const uint2 part = partitions[threadId];
owners[part.x] = threadId == 0 ? 0 : 1;
}
};
void DacrtNodes::CalcOwners(thrust::device_vector<uint2>::iterator beginPartition,
thrust::device_vector<uint2>::iterator endPartition,
thrust::device_vector<unsigned int>& owners) {
// std::cout << "owner nodes: " << nodes << std::endl;
thrust::fill(owners.begin(), owners.end(), 0);
size_t nodes = endPartition - beginPartition;
if (nodes == 1) return;
// TODO Start the scan at first marker? The decision wether or not to do
// this would be
/// owners.size() / nodes > X
// for some sane X.
SetMarkers setMarkers(owners, beginPartition);
thrust::counting_iterator<unsigned int> threadIds(0);
thrust::for_each(threadIds, threadIds + nodes, setMarkers);
// std::cout << "markers:\n" << owners << std::endl;
thrust::inclusive_scan(owners.begin(), owners.end(), owners.begin());
// std::cout << "owners:\n" << owners << std::endl;
}
void DacrtNodes::ResizeUnfinished(const size_t size) {
rayPartitions.resize(size);
spherePartitions.resize(size);
}
std::string DacrtNodes::ToString() const {
std::ostringstream out;
if (UnfinishedNodes() > 0) {
out << "Unfinished DacrtNodes:";
for (size_t i = 0; i < UnfinishedNodes(); ++i)
out << "\n" << i << ": " << GetUnfinished(i);
if (DoneNodes() > 0) out << "\n";
}
if (DoneNodes() > 0) {
out << "Done DacrtNodes:";
for (size_t i = 0; i < DoneNodes(); ++i)
out << "\n" << i << ": " << GetDone(i);
}
return out.str();
}
std::string DacrtNodes::ToString(RayContainer& rays, SphereContainer& spheres) const {
std::ostringstream out;
if (UnfinishedNodes() > 0) {
out << "Unfinished DacrtNodes:";
for (size_t i = 0; i < UnfinishedNodes(); ++i) {
DacrtNode node = GetUnfinished(i);
out << "\n" << i << ": " << node << "\n Rays: ";
for (unsigned int r = node.rayStart; r < node.rayEnd; ++r){
float4 origins = *(Rays::GetOrigins(rays.BeginInnerRays()) + r);
out << origins.w << ", ";
}
out << "\n Spheres: ";
for (unsigned int s = node.sphereStart; s < node.sphereEnd; ++s){
unsigned int sphereId = *(spheres.BeginCurrentIndices() + s);
out << sphereId << ", ";
}
}
if (DoneNodes() > 0) out << "\n";
}
if (DoneNodes() > 0) {
out << "Done DacrtNodes:";
for (size_t i = 0; i < DoneNodes(); ++i) {
DacrtNode node = GetDone(i);
out << "\n" << i << ": " << node << "\n Rays: ";
for (unsigned int r = node.rayStart; r < node.rayEnd; ++r){
float4 origins = *(Rays::GetOrigins(rays.BeginLeafRays()) + r);
out << origins.w << ", ";
}
out << "\n Spheres: ";
for (unsigned int s = node.sphereStart; s < node.sphereEnd; ++s){
unsigned int sphereId = *(spheres.BeginDoneIndices() + s);
out << sphereId << ", ";
}
}
}
return out.str();
}
} // NS Rendering
|
2911bf90f8a99f7513d014144ec342208dd4bf5a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "calc.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_D = NULL;
hipMalloc(&d_D, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
calc), dim3(gridBlock),dim3(threadBlock), 0, 0, d_D,n,k);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
calc), dim3(gridBlock),dim3(threadBlock), 0, 0, d_D,n,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
calc), dim3(gridBlock),dim3(threadBlock), 0, 0, d_D,n,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 2911bf90f8a99f7513d014144ec342208dd4bf5a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "calc.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_D = NULL;
cudaMalloc(&d_D, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
calc<<<gridBlock,threadBlock>>>(d_D,n,k);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
calc<<<gridBlock,threadBlock>>>(d_D,n,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
calc<<<gridBlock,threadBlock>>>(d_D,n,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
acd9bb24ce0ff1edfe0550af99d6befdd569fcd2.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <time.h>
#include <hip/hip_runtime.h>
using namespace std;
struct Address
{
int numa;
int numb;
};
__global__ void pairhmm( Address * address, int * result_d)
{
clock_t start_time=clock();
int warp_index=threadIdx.x/32;
int numa=address[warp_index]. numa;
int numb=address[warp_index]. numb;
int result=0;
int round=0;
for(round=0;round<2;round++)
{
for(int i=0;i<numa;i++)
{
if(threadIdx.x%32==0) printf("round=%d warp %d numa=%d i=%d \n",round, warp_index, numa,i);
for(int j=0;j<numb;j++)
{
if(threadIdx.x%32==0) printf("warp %d numb=%d j=%d \n", warp_index, numb,j);
result+=i+j*2;
}
}
if(threadIdx.x%32==0) printf("round=%d warp %d endendend \n",round, warp_index);
result_d[threadIdx.x]=result;
}
clock_t finish_time=clock();
int time=(int)( finish_time-start_time);
if(threadIdx.x%32==0) printf("%d\n", time);
}
int main()
{
Address * address;
address=(Address *)malloc(sizeof(Address)* 4);
address[0].numa=2;
address[0].numb=2;
address[1].numa=4;
address[1].numb=4;
address[2].numa=6;
address[2].numb=6;
address[3].numa=8;
address[3].numb=8;
Address * address_d;
hipMalloc( (Address **)&address_d,sizeof(int) *100 );
hipMemcpy(address_d,address,4*sizeof(Address), hipMemcpyHostToDevice);
int blocksize=64;
int gridsize=1;
int *result_h;
int *result_d;
result_h=(int *) malloc( sizeof(int)* 128);
hipMalloc( (int **)&result_d,sizeof(int) *128);
hipLaunchKernelGGL(( pairhmm), dim3(gridsize),dim3(blocksize), 0, 0, address_d,result_d);
hipMemcpy(result_h,result_d,128*sizeof(int), hipMemcpyDeviceToHost);
// for(int i=0;i<128;i++)
// printf("index= %d %d\n", i, result_h[i]);
hipDeviceSynchronize();
return 0;
}
| acd9bb24ce0ff1edfe0550af99d6befdd569fcd2.cu | #include <iostream>
#include <stdio.h>
#include <time.h>
#include <cuda.h>
using namespace std;
struct Address
{
int numa;
int numb;
};
__global__ void pairhmm( Address * address, int * result_d)
{
clock_t start_time=clock();
int warp_index=threadIdx.x/32;
int numa=address[warp_index]. numa;
int numb=address[warp_index]. numb;
int result=0;
int round=0;
for(round=0;round<2;round++)
{
for(int i=0;i<numa;i++)
{
if(threadIdx.x%32==0) printf("round=%d warp %d numa=%d i=%d \n",round, warp_index, numa,i);
for(int j=0;j<numb;j++)
{
if(threadIdx.x%32==0) printf("warp %d numb=%d j=%d \n", warp_index, numb,j);
result+=i+j*2;
}
}
if(threadIdx.x%32==0) printf("round=%d warp %d endendend \n",round, warp_index);
result_d[threadIdx.x]=result;
}
clock_t finish_time=clock();
int time=(int)( finish_time-start_time);
if(threadIdx.x%32==0) printf("%d\n", time);
}
int main()
{
Address * address;
address=(Address *)malloc(sizeof(Address)* 4);
address[0].numa=2;
address[0].numb=2;
address[1].numa=4;
address[1].numb=4;
address[2].numa=6;
address[2].numb=6;
address[3].numa=8;
address[3].numb=8;
Address * address_d;
cudaMalloc( (Address **)&address_d,sizeof(int) *100 );
cudaMemcpy(address_d,address,4*sizeof(Address), cudaMemcpyHostToDevice);
int blocksize=64;
int gridsize=1;
int *result_h;
int *result_d;
result_h=(int *) malloc( sizeof(int)* 128);
cudaMalloc( (int **)&result_d,sizeof(int) *128);
pairhmm<<<gridsize,blocksize>>>(address_d,result_d);
cudaMemcpy(result_h,result_d,128*sizeof(int), cudaMemcpyDeviceToHost);
// for(int i=0;i<128;i++)
// printf("index= %d %d\n", i, result_h[i]);
cudaDeviceSynchronize();
return 0;
}
|
92f466cbdc811f26010dc619c5e8741aa644bb12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_minus_2_back;
int xdim0_update_halo_kernel2_zvel_minus_2_back_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_minus_2_back;
int ydim0_update_halo_kernel2_zvel_minus_2_back_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_minus_2_back;
int xdim1_update_halo_kernel2_zvel_minus_2_back_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_minus_2_back;
int ydim1_update_halo_kernel2_zvel_minus_2_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_minus_2_back*(y)+xdim0_update_halo_kernel2_zvel_minus_2_back*ydim0_update_halo_kernel2_zvel_minus_2_back*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_minus_2_back*(y)+xdim1_update_halo_kernel2_zvel_minus_2_back*ydim1_update_halo_kernel2_zvel_minus_2_back*(z))
//user function
__device__
inline void update_halo_kernel2_zvel_minus_2_back_gpu(double *zvel0, double *zvel1, const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = -zvel0[OPS_ACC0(0,0,2)];
if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = -zvel1[OPS_ACC1(0,0,2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_minus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_zvel_minus_2_back + idx_z * 1*1 * xdim0_update_halo_kernel2_zvel_minus_2_back * ydim0_update_halo_kernel2_zvel_minus_2_back;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_zvel_minus_2_back + idx_z * 1*1 * xdim1_update_halo_kernel2_zvel_minus_2_back * ydim1_update_halo_kernel2_zvel_minus_2_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_minus_2_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_minus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_minus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,57)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(57,"update_halo_kernel2_zvel_minus_2_back");
OPS_kernels[57].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_minus_2_back_h || ydim0 != ydim0_update_halo_kernel2_zvel_minus_2_back_h || xdim1 != xdim1_update_halo_kernel2_zvel_minus_2_back_h || ydim1 != ydim1_update_halo_kernel2_zvel_minus_2_back_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_minus_2_back, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_zvel_minus_2_back_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_minus_2_back, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_zvel_minus_2_back_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_minus_2_back, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_zvel_minus_2_back_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_minus_2_back, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_zvel_minus_2_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[57].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_minus_2_back), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[57].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[57].mpi_time += t2-t1;
OPS_kernels[57].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[57].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_minus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 57;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 57;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_minus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(57,"update_halo_kernel2_zvel_minus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
| 92f466cbdc811f26010dc619c5e8741aa644bb12.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_minus_2_back;
int xdim0_update_halo_kernel2_zvel_minus_2_back_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_minus_2_back;
int ydim0_update_halo_kernel2_zvel_minus_2_back_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_minus_2_back;
int xdim1_update_halo_kernel2_zvel_minus_2_back_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_minus_2_back;
int ydim1_update_halo_kernel2_zvel_minus_2_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_minus_2_back*(y)+xdim0_update_halo_kernel2_zvel_minus_2_back*ydim0_update_halo_kernel2_zvel_minus_2_back*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_minus_2_back*(y)+xdim1_update_halo_kernel2_zvel_minus_2_back*ydim1_update_halo_kernel2_zvel_minus_2_back*(z))
//user function
__device__
inline void update_halo_kernel2_zvel_minus_2_back_gpu(double *zvel0, double *zvel1, const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = -zvel0[OPS_ACC0(0,0,2)];
if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = -zvel1[OPS_ACC1(0,0,2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_minus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_zvel_minus_2_back + idx_z * 1*1 * xdim0_update_halo_kernel2_zvel_minus_2_back * ydim0_update_halo_kernel2_zvel_minus_2_back;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_zvel_minus_2_back + idx_z * 1*1 * xdim1_update_halo_kernel2_zvel_minus_2_back * ydim1_update_halo_kernel2_zvel_minus_2_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_minus_2_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_minus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_minus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,57)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(57,"update_halo_kernel2_zvel_minus_2_back");
OPS_kernels[57].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_minus_2_back_h || ydim0 != ydim0_update_halo_kernel2_zvel_minus_2_back_h || xdim1 != xdim1_update_halo_kernel2_zvel_minus_2_back_h || ydim1 != ydim1_update_halo_kernel2_zvel_minus_2_back_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_minus_2_back, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_zvel_minus_2_back_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_minus_2_back, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_zvel_minus_2_back_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_minus_2_back, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_zvel_minus_2_back_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_minus_2_back, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_zvel_minus_2_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[57].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_zvel_minus_2_back<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[57].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[57].mpi_time += t2-t1;
OPS_kernels[57].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[57].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_minus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 57;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 57;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_minus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(57,"update_halo_kernel2_zvel_minus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
|
a67fdcd6088baf21e107d487dc9f3228e7a6a004.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <string>
#include <assert.h>
#include <unistd.h>
#define MPI_SUPPORT
#include "nvshmem.h"
#include "nvshmemx.h"
#ifdef MPI_SUPPORT
#include "mpi.h"
#endif
#include "../util/util.cuh"
#include "../util/error_util.cuh"
#include "../util/nvshmem_util.cuh"
#include "../util/time.cuh"
#include "../comm/csr.cuh"
#include "../comm/partition.cuh"
#include "bfs.cuh"
#include "../comm/agent_maxcount.cuh"
#include "validation.cuh"
//#define INTER_BATCH_SIZE (8)
//#define WAIT_TIMES (4)
//#define FETCH_SIZE (32)
#define BLOCK_SIZE (512)
#define PADDING_SIZE (32)
uint32_t sum(uint32_t *array, int n) {
uint32_t total=0;
for(int i=0; i<n; i++)
total += array[i];
return total;
}
void print_recv(int my_pe, uint32_t *array, int n) {
printf("pe %d receive:", my_pe);
for(int i=0; i<n; i++)
printf(" %6d", array[i]);
printf("\n");
}
int main(int argc, char *argv[])
{
//---------------------Pass from command ---------------//
char *input_file = NULL;
int min_iter = -1;
int source = 0;
int option = 1;
int num_queue=1;
int rtq_pe = 1;
bool verbose = 0;
float ratio = 1;
int partition_idx = 0;
int device = 0;
char * metis_file=NULL;
bool ifcheck=false;
int rounds=1;
if(argc == 1)
{
std::cout<< "./test -file <file> -r <runtime queue per pe=1> -iter <min iteration for queue=2500> -source <source node to start=0> \
-q <number of queues used=1> -v <verbose=true> -ratio <ratio=0> \
-partition <partition 0=vertex partition, 1=edge partition, 2=random partition, 3=metis> -d <start device=0>\n";
exit(0);
}
if(argc > 1)
for(int i=1; i<argc; i++) {
if(std::string(argv[i]) == "-file")
input_file = argv[i+1];
else if(std::string(argv[i]) == "-source")
source = std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-iter")
min_iter = std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-o")
option = std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-r")
rtq_pe = std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-q")
num_queue= std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-v")
verbose= std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-ratio")
ratio = std::stof(argv[i+1]);
else if(std::string(argv[i]) == "-partition")
partition_idx = std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-d")
device = std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-fmetis")
metis_file = argv[i+1];
else if(std::string(argv[i]) == "-check")
ifcheck = std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-rounds")
rounds = std::stoi(argv[i+1]);
}
if(input_file == NULL)
{
std::cout << "input file is needed\n";
std::cout<< "./test -f <file> -r <runtime queue per pe=1> -i <min iteration for queue=2500> -s <source node to start=0> \
-q <number of queues used=1> -v <verbose=true> -a <ratio=3> \
-m <partition 0=vertex partition, 1=edge partition, 2=random partition, 3=metis> -d <start device=0>\n";
exit(0);
}
//-------------------- initialize nvshmem environment ------------/
int n_pes, my_pe, group_id, group_size, local_id, local_size;
nvshm_mpi_init(my_pe, n_pes, group_id, group_size, local_id, local_size, &argc, &argv);
hipDeviceProp_t prop;
int dev_count;
CUDA_CHECK(hipGetDeviceCount(&dev_count));
CUDA_CHECK(hipGetDeviceProperties(&prop, my_pe%dev_count));
if(verbose) {
if(my_pe == 0) std::cout << "graph " << input_file << " partition scheme "<< partition_idx<< " iteration "<< min_iter << " source "<< source <<
" num worklist "<< num_queue << " rounds "<< rounds <<
" FETCH SIZE "<< FETCHSIZE << " PADDING SIZE " << PADDING_SIZE <<
" INTER BATCH SIZE "<< INTERBATCHSIZE << " WAIT TIMES " << WAITTIMES <<
" iteration ratio " << ratio <<std::endl;
std::cout << "PE: "<< my_pe << " deviceCount " << dev_count << " set on device " << my_pe%dev_count<<" device name " << prop.name << std::endl;
}
CUDA_CHECK(hipMemcpyToSymbol(clockrate, (void *)&prop.clockRate, sizeof(int), 0, hipMemcpyHostToDevice));
//----------------------------------------------------------------/
//-------------------- Read CSR and partition --------------------/
std::string str_file(input_file);
Csr<int, int> csr;
if(str_file.substr(str_file.length()-4) == ".csr") {
csr.ReadFromBinary(input_file);
}
else {
std::cout << "Generate csr file binary file first\n";
exit(1);
}
if(my_pe == 0) csr.PrintCsr();
nvshmem_barrier_all();
Csr<int, int> my_csr;
int partition_scheme[n_pes+1];
int * new_labels_old;
int new_source = source;
if(partition_idx == 2) {
CUDA_CHECK(hipMallocManaged(&new_labels_old, sizeof(int)*csr.nodes));
new_source = partitioner::random(n_pes, my_pe, csr, my_csr, new_labels_old, partition_scheme, source);
}
else if(partition_idx == 0)
partitioner::vertices(n_pes, my_pe, csr, my_csr, partition_scheme);
else if(partition_idx == 1)
partitioner::edges(n_pes, my_pe, csr, my_csr, partition_scheme);
else if(partition_idx == 3) {
CUDA_CHECK(hipMallocManaged(&new_labels_old, sizeof(int)*csr.nodes));
char file_metis[256];
std::string file_name = str_file.substr(0, str_file.length()-4);
sprintf(file_metis, "%s_%d_metis_mega.txt", file_name.c_str(), n_pes);
if(exists_file(file_metis) == false) {
std::cout << "didn't find file: "<< file_metis << std::endl;
new_source = partitioner::metis(n_pes, my_pe, csr, my_csr, new_labels_old, partition_scheme, source, (my_pe == 0), file_metis);
}
else {
std::cout << "read metis file: "<< file_metis << std::endl;
new_source = partitioner::metis(n_pes, my_pe, csr, my_csr, new_labels_old, partition_scheme, source, file_metis);
}
}
if(verbose) {
SERIALIZE_PRINT(my_pe, n_pes, my_csr.PrintCsr());
if(my_pe == 0) {
std::cout << "Partition table:\n";
for(int i=0; i<n_pes+1; i++)
std::cout << partition_scheme[i] << " ";
std::cout <<std::endl;
}
}
nvshmem_barrier_all();
//----------------------------------------------------------------/
//--------------------- initialize BFS ---------------------------/
if(!(new_source >= partition_scheme[my_pe] && new_source < partition_scheme[my_pe+1]))
min_iter = min_iter*ratio;
BFS<int, int, uint32_t, PADDING_SIZE>
bfs(my_csr, my_pe, n_pes, group_id, group_size, local_id, local_size, partition_scheme, num_queue, 4*my_csr.nodes, (1<<20), min_iter);
Atos::MAXCOUNT::Agent<BFSEntry<int>, uint32_t, INTERBATCHSIZE, PADDING_SIZE> agent(bfs.worklists);
if(verbose)
SERIALIZE_PRINT(my_pe, n_pes, bfs.print());
printf("PE %d, new_source %d\n", my_pe, new_source);
hipStream_t agent_stream;
hipStream_t app_stream;
CUDA_CHECK(hipStreamCreateWithFlags(&agent_stream, hipStreamNonBlocking));
CUDA_CHECK(hipStreamCreateWithFlags(&app_stream, hipStreamNonBlocking));
std::vector<float> times;
std::vector<uint32_t> workloads;
for(int round = 0; round < rounds; round++) {
bfs.reset();
agent.resetAgent();
CUDA_CHECK(hipDeviceSynchronize());
bfs.BFSInit(new_source, 158);
//SERIALIZE_PRINT(my_pe, n_pes, bfs.worklists.print());
nvshmem_barrier_all();
//----------------------- warm up ---------------------------------/
warmup_bfs(bfs);
CUDA_CHECK(hipDeviceSynchronize());
agent.launchAgent<WAITTIMES>(agent_stream, NULL);
nvshmem_barrier_all();
//------------------------- start BFS ------------------------------/
GpuTimer timer;
nvshmem_barrier_all();
timer.Start();
bfs.BFSStart_persistent<FETCHSIZE, BLOCK_SIZE>(158, BLOCK_SIZE, 0, app_stream);
CUDA_CHECK(hipStreamSynchronize(app_stream));
timer.Stop();
agent.stopAgent(app_stream);
CUDA_CHECK(hipDeviceSynchronize());
nvshmem_barrier_all();
float elapsed = timer.ElapsedMillis();
SERIALIZE_PRINT(my_pe, n_pes, printf("time %8.2f\n", elapsed));
uint32_t check_record[num_queue+n_pes-1];
for(int i=0; i<bfs.worklists.num_local_queues+n_pes-1; i++)
CUDA_CHECK(hipMemcpy(check_record+i, (uint32_t *)(bfs.worklists.end+PADDING_SIZE*i), sizeof(uint32_t), hipMemcpyDeviceToHost));
uint32_t totalworkload = sum(check_record, n_pes-1+num_queue);
if(verbose) {
SERIALIZE_PRINT(my_pe, n_pes, bfs.worklists.print());
}
times.push_back(elapsed);
workloads.push_back(totalworkload);
}
SERIALIZE_PRINT(my_pe, n_pes, printf("ave time: %8.2f\n", std::accumulate(times.begin(), times.end(), 0.0)/times.size()));
SERIALIZE_PRINT(my_pe, n_pes, printf("ave workload: %lld\n", std::accumulate(workloads.begin(), workloads.end(), (long long)(0))/workloads.size()));
//----------------------------------------------------------------/
nvshmem_barrier_all();
if(ifcheck)
for(int i=0; i<n_pes; i++) {
if(my_pe == i)
{
std::cout << "[PE "<< my_pe << "]\n";
host::BFSValid<int, int>(csr, bfs, source, partition_idx, new_labels_old);
}
nvshmem_barrier_all();
}
//----------------------------------------------------------------/
nvshmem_barrier_all();
SERIALIZE_PRINT(my_pe, n_pes,std::cout << "End program "<< my_pe << std::endl);
nvshmem_barrier_all();
csr.release();
my_csr.release();
nvshm_mpi_finalize();
return 0;
}
| a67fdcd6088baf21e107d487dc9f3228e7a6a004.cu | #include <iostream>
#include <string>
#include <assert.h>
#include <unistd.h>
#define MPI_SUPPORT
#include "nvshmem.h"
#include "nvshmemx.h"
#ifdef MPI_SUPPORT
#include "mpi.h"
#endif
#include "../util/util.cuh"
#include "../util/error_util.cuh"
#include "../util/nvshmem_util.cuh"
#include "../util/time.cuh"
#include "../comm/csr.cuh"
#include "../comm/partition.cuh"
#include "bfs.cuh"
#include "../comm/agent_maxcount.cuh"
#include "validation.cuh"
//#define INTER_BATCH_SIZE (8)
//#define WAIT_TIMES (4)
//#define FETCH_SIZE (32)
#define BLOCK_SIZE (512)
#define PADDING_SIZE (32)
uint32_t sum(uint32_t *array, int n) {
uint32_t total=0;
for(int i=0; i<n; i++)
total += array[i];
return total;
}
void print_recv(int my_pe, uint32_t *array, int n) {
printf("pe %d receive:", my_pe);
for(int i=0; i<n; i++)
printf(" %6d", array[i]);
printf("\n");
}
int main(int argc, char *argv[])
{
//---------------------Pass from command ---------------//
char *input_file = NULL;
int min_iter = -1;
int source = 0;
int option = 1;
int num_queue=1;
int rtq_pe = 1;
bool verbose = 0;
float ratio = 1;
int partition_idx = 0;
int device = 0;
char * metis_file=NULL;
bool ifcheck=false;
int rounds=1;
if(argc == 1)
{
std::cout<< "./test -file <file> -r <runtime queue per pe=1> -iter <min iteration for queue=2500> -source <source node to start=0> \
-q <number of queues used=1> -v <verbose=true> -ratio <ratio=0> \
-partition <partition 0=vertex partition, 1=edge partition, 2=random partition, 3=metis> -d <start device=0>\n";
exit(0);
}
if(argc > 1)
for(int i=1; i<argc; i++) {
if(std::string(argv[i]) == "-file")
input_file = argv[i+1];
else if(std::string(argv[i]) == "-source")
source = std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-iter")
min_iter = std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-o")
option = std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-r")
rtq_pe = std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-q")
num_queue= std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-v")
verbose= std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-ratio")
ratio = std::stof(argv[i+1]);
else if(std::string(argv[i]) == "-partition")
partition_idx = std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-d")
device = std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-fmetis")
metis_file = argv[i+1];
else if(std::string(argv[i]) == "-check")
ifcheck = std::stoi(argv[i+1]);
else if(std::string(argv[i]) == "-rounds")
rounds = std::stoi(argv[i+1]);
}
if(input_file == NULL)
{
std::cout << "input file is needed\n";
std::cout<< "./test -f <file> -r <runtime queue per pe=1> -i <min iteration for queue=2500> -s <source node to start=0> \
-q <number of queues used=1> -v <verbose=true> -a <ratio=3> \
-m <partition 0=vertex partition, 1=edge partition, 2=random partition, 3=metis> -d <start device=0>\n";
exit(0);
}
//-------------------- initialize nvshmem environment ------------/
int n_pes, my_pe, group_id, group_size, local_id, local_size;
nvshm_mpi_init(my_pe, n_pes, group_id, group_size, local_id, local_size, &argc, &argv);
cudaDeviceProp prop;
int dev_count;
CUDA_CHECK(cudaGetDeviceCount(&dev_count));
CUDA_CHECK(cudaGetDeviceProperties(&prop, my_pe%dev_count));
if(verbose) {
if(my_pe == 0) std::cout << "graph " << input_file << " partition scheme "<< partition_idx<< " iteration "<< min_iter << " source "<< source <<
" num worklist "<< num_queue << " rounds "<< rounds <<
" FETCH SIZE "<< FETCHSIZE << " PADDING SIZE " << PADDING_SIZE <<
" INTER BATCH SIZE "<< INTERBATCHSIZE << " WAIT TIMES " << WAITTIMES <<
" iteration ratio " << ratio <<std::endl;
std::cout << "PE: "<< my_pe << " deviceCount " << dev_count << " set on device " << my_pe%dev_count<<" device name " << prop.name << std::endl;
}
CUDA_CHECK(cudaMemcpyToSymbol(clockrate, (void *)&prop.clockRate, sizeof(int), 0, cudaMemcpyHostToDevice));
//----------------------------------------------------------------/
//-------------------- Read CSR and partition --------------------/
std::string str_file(input_file);
Csr<int, int> csr;
if(str_file.substr(str_file.length()-4) == ".csr") {
csr.ReadFromBinary(input_file);
}
else {
std::cout << "Generate csr file binary file first\n";
exit(1);
}
if(my_pe == 0) csr.PrintCsr();
nvshmem_barrier_all();
Csr<int, int> my_csr;
int partition_scheme[n_pes+1];
int * new_labels_old;
int new_source = source;
if(partition_idx == 2) {
CUDA_CHECK(cudaMallocManaged(&new_labels_old, sizeof(int)*csr.nodes));
new_source = partitioner::random(n_pes, my_pe, csr, my_csr, new_labels_old, partition_scheme, source);
}
else if(partition_idx == 0)
partitioner::vertices(n_pes, my_pe, csr, my_csr, partition_scheme);
else if(partition_idx == 1)
partitioner::edges(n_pes, my_pe, csr, my_csr, partition_scheme);
else if(partition_idx == 3) {
CUDA_CHECK(cudaMallocManaged(&new_labels_old, sizeof(int)*csr.nodes));
char file_metis[256];
std::string file_name = str_file.substr(0, str_file.length()-4);
sprintf(file_metis, "%s_%d_metis_mega.txt", file_name.c_str(), n_pes);
if(exists_file(file_metis) == false) {
std::cout << "didn't find file: "<< file_metis << std::endl;
new_source = partitioner::metis(n_pes, my_pe, csr, my_csr, new_labels_old, partition_scheme, source, (my_pe == 0), file_metis);
}
else {
std::cout << "read metis file: "<< file_metis << std::endl;
new_source = partitioner::metis(n_pes, my_pe, csr, my_csr, new_labels_old, partition_scheme, source, file_metis);
}
}
if(verbose) {
SERIALIZE_PRINT(my_pe, n_pes, my_csr.PrintCsr());
if(my_pe == 0) {
std::cout << "Partition table:\n";
for(int i=0; i<n_pes+1; i++)
std::cout << partition_scheme[i] << " ";
std::cout <<std::endl;
}
}
nvshmem_barrier_all();
//----------------------------------------------------------------/
//--------------------- initialize BFS ---------------------------/
if(!(new_source >= partition_scheme[my_pe] && new_source < partition_scheme[my_pe+1]))
min_iter = min_iter*ratio;
BFS<int, int, uint32_t, PADDING_SIZE>
bfs(my_csr, my_pe, n_pes, group_id, group_size, local_id, local_size, partition_scheme, num_queue, 4*my_csr.nodes, (1<<20), min_iter);
Atos::MAXCOUNT::Agent<BFSEntry<int>, uint32_t, INTERBATCHSIZE, PADDING_SIZE> agent(bfs.worklists);
if(verbose)
SERIALIZE_PRINT(my_pe, n_pes, bfs.print());
printf("PE %d, new_source %d\n", my_pe, new_source);
cudaStream_t agent_stream;
cudaStream_t app_stream;
CUDA_CHECK(cudaStreamCreateWithFlags(&agent_stream, cudaStreamNonBlocking));
CUDA_CHECK(cudaStreamCreateWithFlags(&app_stream, cudaStreamNonBlocking));
std::vector<float> times;
std::vector<uint32_t> workloads;
for(int round = 0; round < rounds; round++) {
bfs.reset();
agent.resetAgent();
CUDA_CHECK(cudaDeviceSynchronize());
bfs.BFSInit(new_source, 158);
//SERIALIZE_PRINT(my_pe, n_pes, bfs.worklists.print());
nvshmem_barrier_all();
//----------------------- warm up ---------------------------------/
warmup_bfs(bfs);
CUDA_CHECK(cudaDeviceSynchronize());
agent.launchAgent<WAITTIMES>(agent_stream, NULL);
nvshmem_barrier_all();
//------------------------- start BFS ------------------------------/
GpuTimer timer;
nvshmem_barrier_all();
timer.Start();
bfs.BFSStart_persistent<FETCHSIZE, BLOCK_SIZE>(158, BLOCK_SIZE, 0, app_stream);
CUDA_CHECK(cudaStreamSynchronize(app_stream));
timer.Stop();
agent.stopAgent(app_stream);
CUDA_CHECK(cudaDeviceSynchronize());
nvshmem_barrier_all();
float elapsed = timer.ElapsedMillis();
SERIALIZE_PRINT(my_pe, n_pes, printf("time %8.2f\n", elapsed));
uint32_t check_record[num_queue+n_pes-1];
for(int i=0; i<bfs.worklists.num_local_queues+n_pes-1; i++)
CUDA_CHECK(cudaMemcpy(check_record+i, (uint32_t *)(bfs.worklists.end+PADDING_SIZE*i), sizeof(uint32_t), cudaMemcpyDeviceToHost));
uint32_t totalworkload = sum(check_record, n_pes-1+num_queue);
if(verbose) {
SERIALIZE_PRINT(my_pe, n_pes, bfs.worklists.print());
}
times.push_back(elapsed);
workloads.push_back(totalworkload);
}
SERIALIZE_PRINT(my_pe, n_pes, printf("ave time: %8.2f\n", std::accumulate(times.begin(), times.end(), 0.0)/times.size()));
SERIALIZE_PRINT(my_pe, n_pes, printf("ave workload: %lld\n", std::accumulate(workloads.begin(), workloads.end(), (long long)(0))/workloads.size()));
//----------------------------------------------------------------/
nvshmem_barrier_all();
if(ifcheck)
for(int i=0; i<n_pes; i++) {
if(my_pe == i)
{
std::cout << "[PE "<< my_pe << "]\n";
host::BFSValid<int, int>(csr, bfs, source, partition_idx, new_labels_old);
}
nvshmem_barrier_all();
}
//----------------------------------------------------------------/
nvshmem_barrier_all();
SERIALIZE_PRINT(my_pe, n_pes,std::cout << "End program "<< my_pe << std::endl);
nvshmem_barrier_all();
csr.release();
my_csr.release();
nvshm_mpi_finalize();
return 0;
}
|
cde24d402d6a9ba1f07566b5a431d24b14149a5d.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2018~2023 by XGBoost contributors
*/
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <xgboost/logging.h>
#include <cstddef> // for size_t
#include <memory>
#include <mutex>
#include <utility>
#include <vector>
#include "categorical.h"
#include "device_helpers_hip.cuh"
#include "hist_util_hip.cuh"
#include "hist_util.h"
#include "math.h" // NOLINT
#include "quantile.h"
#include "xgboost/host_device_vector.h"
namespace xgboost {
namespace common {
constexpr float SketchContainer::kFactor;
namespace detail {
size_t RequiredSampleCutsPerColumn(int max_bins, size_t num_rows) {
double eps = 1.0 / (WQSketch::kFactor * max_bins);
size_t dummy_nlevel;
size_t num_cuts;
WQuantileSketch<bst_float, bst_float>::LimitSizeLevel(
num_rows, eps, &dummy_nlevel, &num_cuts);
return ::min(num_cuts, num_rows);
}
size_t RequiredSampleCuts(bst_row_t num_rows, bst_feature_t num_columns,
size_t max_bins, size_t nnz) {
auto per_column = RequiredSampleCutsPerColumn(max_bins, num_rows);
auto if_dense = num_columns * per_column;
auto result = ::min(nnz, if_dense);
return result;
}
size_t RequiredMemory(bst_row_t num_rows, bst_feature_t num_columns, size_t nnz,
size_t num_bins, bool with_weights) {
size_t peak = 0;
// 0. Allocate cut pointer in quantile container by increasing: n_columns + 1
size_t total = (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 1. Copy and sort: 2 * bytes_per_element * shape
total += BytesPerElement(with_weights) * num_rows * num_columns;
peak = ::max(peak, total);
// 2. Deallocate bytes_per_element * shape due to reusing memory in sort.
total -= BytesPerElement(with_weights) * num_rows * num_columns / 2;
// 3. Allocate colomn size scan by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 4. Allocate cut pointer by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 5. Allocate cuts: assuming rows is greater than bins: n_columns * limit_size
total += RequiredSampleCuts(num_rows, num_bins, num_bins, nnz) * sizeof(SketchEntry);
// 6. Deallocate copied entries by reducing: bytes_per_element * shape.
peak = ::max(peak, total);
total -= (BytesPerElement(with_weights) * num_rows * num_columns) / 2;
// 7. Deallocate column size scan.
peak = ::max(peak, total);
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 8. Deallocate cut size scan.
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 9. Allocate final cut values, min values, cut ptrs: ::min(rows, bins + 1) *
// n_columns + n_columns + n_columns + 1
total += ::min(num_rows, num_bins) * num_columns * sizeof(float);
total += num_columns *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().MinValues())>::value_type);
total += (num_columns + 1) *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().Ptrs())>::value_type);
peak = ::max(peak, total);
return peak;
}
size_t SketchBatchNumElements(size_t sketch_batch_num_elements,
bst_row_t num_rows, bst_feature_t columns,
size_t nnz, int device,
size_t num_cuts, bool has_weight) {
#if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
// device available memory is not accurate when rmm is used.
return nnz;
#endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
if (sketch_batch_num_elements == 0) {
auto required_memory = RequiredMemory(num_rows, columns, nnz, num_cuts, has_weight);
// use up to 80% of available space
auto avail = dh::AvailableMemory(device) * 0.8;
if (required_memory > avail) {
sketch_batch_num_elements = avail / BytesPerElement(has_weight);
} else {
sketch_batch_num_elements = ::min(num_rows * static_cast<size_t>(columns), nnz);
}
}
return sketch_batch_num_elements;
}
void SortByWeight(dh::device_vector<float>* weights,
dh::device_vector<Entry>* sorted_entries) {
// Sort both entries and wegihts.
dh::XGBDeviceAllocator<char> alloc;
thrust::sort_by_key(thrust::hip::par(alloc), sorted_entries->begin(),
sorted_entries->end(), weights->begin(),
detail::EntryCompareOp());
// Scan weights
dh::XGBCachingDeviceAllocator<char> caching;
thrust::inclusive_scan_by_key(thrust::hip::par(caching),
sorted_entries->begin(), sorted_entries->end(),
weights->begin(), weights->begin(),
[=] __device__(const Entry& a, const Entry& b) {
return a.index == b.index;
});
}
void RemoveDuplicatedCategories(int32_t device, MetaInfo const& info, Span<bst_row_t> d_cuts_ptr,
dh::device_vector<Entry>* p_sorted_entries,
dh::device_vector<float>* p_sorted_weights,
dh::caching_device_vector<size_t>* p_column_sizes_scan) {
info.feature_types.SetDevice(device);
auto d_feature_types = info.feature_types.ConstDeviceSpan();
CHECK(!d_feature_types.empty());
auto& column_sizes_scan = *p_column_sizes_scan;
auto& sorted_entries = *p_sorted_entries;
// Removing duplicated entries in categorical features.
// We don't need to accumulate weight for duplicated entries as there's no weighted
// sketching for categorical features, the categories are the cut values.
dh::caching_device_vector<size_t> new_column_scan(column_sizes_scan.size());
std::size_t n_uniques{0};
if (p_sorted_weights) {
using Pair = thrust::tuple<Entry, float>;
auto d_sorted_entries = dh::ToSpan(sorted_entries);
auto d_sorted_weights = dh::ToSpan(*p_sorted_weights);
auto val_in_it = thrust::make_zip_iterator(d_sorted_entries.data(), d_sorted_weights.data());
auto val_out_it = thrust::make_zip_iterator(d_sorted_entries.data(), d_sorted_weights.data());
n_uniques = dh::SegmentedUnique(
column_sizes_scan.data().get(), column_sizes_scan.data().get() + column_sizes_scan.size(),
val_in_it, val_in_it + sorted_entries.size(), new_column_scan.data().get(), val_out_it,
[=] __device__(Pair const& l, Pair const& r) {
Entry const& le = thrust::get<0>(l);
Entry const& re = thrust::get<0>(r);
if (le.index == re.index && IsCat(d_feature_types, le.index)) {
return le.fvalue == re.fvalue;
}
return false;
});
p_sorted_weights->resize(n_uniques);
} else {
n_uniques = dh::SegmentedUnique(
column_sizes_scan.data().get(), column_sizes_scan.data().get() + column_sizes_scan.size(),
sorted_entries.begin(), sorted_entries.end(), new_column_scan.data().get(),
sorted_entries.begin(), [=] __device__(Entry const& l, Entry const& r) {
if (l.index == r.index) {
if (IsCat(d_feature_types, l.index)) {
return l.fvalue == r.fvalue;
}
}
return false;
});
}
sorted_entries.resize(n_uniques);
// Renew the column scan and cut scan based on categorical data.
auto d_old_column_sizes_scan = dh::ToSpan(column_sizes_scan);
dh::caching_device_vector<SketchContainer::OffsetT> new_cuts_size(info.num_col_ + 1);
CHECK_EQ(new_column_scan.size(), new_cuts_size.size());
dh::LaunchN(new_column_scan.size(),
[=, d_new_cuts_size = dh::ToSpan(new_cuts_size),
d_old_column_sizes_scan = dh::ToSpan(column_sizes_scan),
d_new_columns_ptr = dh::ToSpan(new_column_scan)] __device__(size_t idx) {
d_old_column_sizes_scan[idx] = d_new_columns_ptr[idx];
if (idx == d_new_columns_ptr.size() - 1) {
return;
}
if (IsCat(d_feature_types, idx)) {
// Cut size is the same as number of categories in input.
d_new_cuts_size[idx] = d_new_columns_ptr[idx + 1] - d_new_columns_ptr[idx];
} else {
d_new_cuts_size[idx] = d_cuts_ptr[idx + 1] - d_cuts_ptr[idx];
}
});
// Turn size into ptr.
thrust::exclusive_scan(thrust::device, new_cuts_size.cbegin(), new_cuts_size.cend(),
d_cuts_ptr.data());
}
} // namespace detail
void ProcessBatch(int device, MetaInfo const &info, const SparsePage &page,
size_t begin, size_t end, SketchContainer *sketch_container,
int num_cuts_per_feature, size_t num_columns) {
dh::XGBCachingDeviceAllocator<char> alloc;
dh::device_vector<Entry> sorted_entries;
if (page.data.DeviceCanRead()) {
const auto& device_data = page.data.ConstDevicePointer();
sorted_entries = dh::device_vector<Entry>(device_data + begin, device_data + end);
} else {
const auto& host_data = page.data.ConstHostVector();
sorted_entries = dh::device_vector<Entry>(host_data.begin() + begin,
host_data.begin() + end);
}
thrust::sort(thrust::hip::par(alloc), sorted_entries.begin(),
sorted_entries.end(), detail::EntryCompareOp());
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scanning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
IterSpan{batch_it, sorted_entries.size()}, dummy_is_valid, &cuts_ptr,
&column_sizes_scan);
auto d_cuts_ptr = cuts_ptr.DeviceSpan();
if (sketch_container->HasCategorical()) {
detail::RemoveDuplicatedCategories(device, info, d_cuts_ptr, &sorted_entries, nullptr,
&column_sizes_scan);
}
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
CHECK_EQ(d_cuts_ptr.size(), column_sizes_scan.size());
// add cuts into sketches
sketch_container->Push(dh::ToSpan(sorted_entries), dh::ToSpan(column_sizes_scan),
d_cuts_ptr, h_cuts_ptr.back());
sorted_entries.clear();
sorted_entries.shrink_to_fit();
CHECK_EQ(sorted_entries.capacity(), 0);
CHECK_NE(cuts_ptr.Size(), 0);
}
void ProcessWeightedBatch(int device, const SparsePage& page,
MetaInfo const& info, size_t begin, size_t end,
SketchContainer* sketch_container, int num_cuts_per_feature,
size_t num_columns,
bool is_ranking, Span<bst_group_t const> d_group_ptr) {
auto weights = info.weights_.ConstDeviceSpan();
dh::XGBCachingDeviceAllocator<char> alloc;
const auto& host_data = page.data.ConstHostVector();
dh::device_vector<Entry> sorted_entries(host_data.begin() + begin,
host_data.begin() + end);
// Binary search to assign weights to each element
dh::device_vector<float> temp_weights(sorted_entries.size());
auto d_temp_weights = temp_weights.data().get();
page.offset.SetDevice(device);
auto row_ptrs = page.offset.ConstDeviceSpan();
size_t base_rowid = page.base_rowid;
if (is_ranking) {
CHECK_GE(d_group_ptr.size(), 2)
<< "Must have at least 1 group for ranking.";
CHECK_EQ(weights.size(), d_group_ptr.size() - 1)
<< "Weight size should equal to number of groups.";
dh::LaunchN(temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = dh::SegmentId(row_ptrs, element_idx);
bst_group_t group_idx = dh::SegmentId(d_group_ptr, ridx + base_rowid);
d_temp_weights[idx] = weights[group_idx];
});
} else {
dh::LaunchN(temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = dh::SegmentId(row_ptrs, element_idx);
d_temp_weights[idx] = weights[ridx + base_rowid];
});
}
detail::SortByWeight(&temp_weights, &sorted_entries);
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scaning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
IterSpan{batch_it, sorted_entries.size()}, dummy_is_valid, &cuts_ptr,
&column_sizes_scan);
auto d_cuts_ptr = cuts_ptr.DeviceSpan();
if (sketch_container->HasCategorical()) {
detail::RemoveDuplicatedCategories(device, info, d_cuts_ptr, &sorted_entries, &temp_weights,
&column_sizes_scan);
}
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
// Extract cuts
sketch_container->Push(dh::ToSpan(sorted_entries),
dh::ToSpan(column_sizes_scan), d_cuts_ptr,
h_cuts_ptr.back(), dh::ToSpan(temp_weights));
sorted_entries.clear();
sorted_entries.shrink_to_fit();
}
HistogramCuts DeviceSketch(int device, DMatrix* dmat, int max_bins,
size_t sketch_batch_num_elements) {
dmat->Info().feature_types.SetDevice(device);
dmat->Info().feature_types.ConstDevicePointer(); // pull to device early
// Configure batch size based on available memory
bool has_weights = dmat->Info().weights_.Size() > 0;
size_t num_cuts_per_feature =
detail::RequiredSampleCutsPerColumn(max_bins, dmat->Info().num_row_);
sketch_batch_num_elements = detail::SketchBatchNumElements(
sketch_batch_num_elements,
dmat->Info().num_row_,
dmat->Info().num_col_,
dmat->Info().num_nonzero_,
device, num_cuts_per_feature, has_weights);
HistogramCuts cuts;
SketchContainer sketch_container(dmat->Info().feature_types, max_bins, dmat->Info().num_col_,
dmat->Info().num_row_, device);
dmat->Info().weights_.SetDevice(device);
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
size_t batch_nnz = batch.data.Size();
auto const& info = dmat->Info();
for (auto begin = 0ull; begin < batch_nnz; begin += sketch_batch_num_elements) {
size_t end = ::min(batch_nnz, static_cast<std::size_t>(begin + sketch_batch_num_elements));
if (has_weights) {
bool is_ranking = HostSketchContainer::UseGroup(dmat->Info());
dh::caching_device_vector<uint32_t> groups(info.group_ptr_.cbegin(),
info.group_ptr_.cend());
ProcessWeightedBatch(
device, batch, dmat->Info(), begin, end,
&sketch_container,
num_cuts_per_feature,
dmat->Info().num_col_,
is_ranking, dh::ToSpan(groups));
} else {
ProcessBatch(device, dmat->Info(), batch, begin, end, &sketch_container,
num_cuts_per_feature, dmat->Info().num_col_);
}
}
}
sketch_container.MakeCuts(&cuts);
return cuts;
}
} // namespace common
} // namespace xgboost
| cde24d402d6a9ba1f07566b5a431d24b14149a5d.cu | /**
* Copyright 2018~2023 by XGBoost contributors
*/
#include <thrust/binary_search.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <xgboost/logging.h>
#include <cstddef> // for size_t
#include <memory>
#include <mutex>
#include <utility>
#include <vector>
#include "categorical.h"
#include "device_helpers.cuh"
#include "hist_util.cuh"
#include "hist_util.h"
#include "math.h" // NOLINT
#include "quantile.h"
#include "xgboost/host_device_vector.h"
namespace xgboost {
namespace common {
constexpr float SketchContainer::kFactor;
namespace detail {
size_t RequiredSampleCutsPerColumn(int max_bins, size_t num_rows) {
double eps = 1.0 / (WQSketch::kFactor * max_bins);
size_t dummy_nlevel;
size_t num_cuts;
WQuantileSketch<bst_float, bst_float>::LimitSizeLevel(
num_rows, eps, &dummy_nlevel, &num_cuts);
return std::min(num_cuts, num_rows);
}
size_t RequiredSampleCuts(bst_row_t num_rows, bst_feature_t num_columns,
size_t max_bins, size_t nnz) {
auto per_column = RequiredSampleCutsPerColumn(max_bins, num_rows);
auto if_dense = num_columns * per_column;
auto result = std::min(nnz, if_dense);
return result;
}
size_t RequiredMemory(bst_row_t num_rows, bst_feature_t num_columns, size_t nnz,
size_t num_bins, bool with_weights) {
size_t peak = 0;
// 0. Allocate cut pointer in quantile container by increasing: n_columns + 1
size_t total = (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 1. Copy and sort: 2 * bytes_per_element * shape
total += BytesPerElement(with_weights) * num_rows * num_columns;
peak = std::max(peak, total);
// 2. Deallocate bytes_per_element * shape due to reusing memory in sort.
total -= BytesPerElement(with_weights) * num_rows * num_columns / 2;
// 3. Allocate colomn size scan by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 4. Allocate cut pointer by increasing: n_columns + 1
total += (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 5. Allocate cuts: assuming rows is greater than bins: n_columns * limit_size
total += RequiredSampleCuts(num_rows, num_bins, num_bins, nnz) * sizeof(SketchEntry);
// 6. Deallocate copied entries by reducing: bytes_per_element * shape.
peak = std::max(peak, total);
total -= (BytesPerElement(with_weights) * num_rows * num_columns) / 2;
// 7. Deallocate column size scan.
peak = std::max(peak, total);
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 8. Deallocate cut size scan.
total -= (num_columns + 1) * sizeof(SketchContainer::OffsetT);
// 9. Allocate final cut values, min values, cut ptrs: std::min(rows, bins + 1) *
// n_columns + n_columns + n_columns + 1
total += std::min(num_rows, num_bins) * num_columns * sizeof(float);
total += num_columns *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().MinValues())>::value_type);
total += (num_columns + 1) *
sizeof(std::remove_reference_t<decltype(
std::declval<HistogramCuts>().Ptrs())>::value_type);
peak = std::max(peak, total);
return peak;
}
size_t SketchBatchNumElements(size_t sketch_batch_num_elements,
bst_row_t num_rows, bst_feature_t columns,
size_t nnz, int device,
size_t num_cuts, bool has_weight) {
#if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
// device available memory is not accurate when rmm is used.
return nnz;
#endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
if (sketch_batch_num_elements == 0) {
auto required_memory = RequiredMemory(num_rows, columns, nnz, num_cuts, has_weight);
// use up to 80% of available space
auto avail = dh::AvailableMemory(device) * 0.8;
if (required_memory > avail) {
sketch_batch_num_elements = avail / BytesPerElement(has_weight);
} else {
sketch_batch_num_elements = std::min(num_rows * static_cast<size_t>(columns), nnz);
}
}
return sketch_batch_num_elements;
}
void SortByWeight(dh::device_vector<float>* weights,
dh::device_vector<Entry>* sorted_entries) {
// Sort both entries and wegihts.
dh::XGBDeviceAllocator<char> alloc;
thrust::sort_by_key(thrust::cuda::par(alloc), sorted_entries->begin(),
sorted_entries->end(), weights->begin(),
detail::EntryCompareOp());
// Scan weights
dh::XGBCachingDeviceAllocator<char> caching;
thrust::inclusive_scan_by_key(thrust::cuda::par(caching),
sorted_entries->begin(), sorted_entries->end(),
weights->begin(), weights->begin(),
[=] __device__(const Entry& a, const Entry& b) {
return a.index == b.index;
});
}
void RemoveDuplicatedCategories(int32_t device, MetaInfo const& info, Span<bst_row_t> d_cuts_ptr,
dh::device_vector<Entry>* p_sorted_entries,
dh::device_vector<float>* p_sorted_weights,
dh::caching_device_vector<size_t>* p_column_sizes_scan) {
info.feature_types.SetDevice(device);
auto d_feature_types = info.feature_types.ConstDeviceSpan();
CHECK(!d_feature_types.empty());
auto& column_sizes_scan = *p_column_sizes_scan;
auto& sorted_entries = *p_sorted_entries;
// Removing duplicated entries in categorical features.
// We don't need to accumulate weight for duplicated entries as there's no weighted
// sketching for categorical features, the categories are the cut values.
dh::caching_device_vector<size_t> new_column_scan(column_sizes_scan.size());
std::size_t n_uniques{0};
if (p_sorted_weights) {
using Pair = thrust::tuple<Entry, float>;
auto d_sorted_entries = dh::ToSpan(sorted_entries);
auto d_sorted_weights = dh::ToSpan(*p_sorted_weights);
auto val_in_it = thrust::make_zip_iterator(d_sorted_entries.data(), d_sorted_weights.data());
auto val_out_it = thrust::make_zip_iterator(d_sorted_entries.data(), d_sorted_weights.data());
n_uniques = dh::SegmentedUnique(
column_sizes_scan.data().get(), column_sizes_scan.data().get() + column_sizes_scan.size(),
val_in_it, val_in_it + sorted_entries.size(), new_column_scan.data().get(), val_out_it,
[=] __device__(Pair const& l, Pair const& r) {
Entry const& le = thrust::get<0>(l);
Entry const& re = thrust::get<0>(r);
if (le.index == re.index && IsCat(d_feature_types, le.index)) {
return le.fvalue == re.fvalue;
}
return false;
});
p_sorted_weights->resize(n_uniques);
} else {
n_uniques = dh::SegmentedUnique(
column_sizes_scan.data().get(), column_sizes_scan.data().get() + column_sizes_scan.size(),
sorted_entries.begin(), sorted_entries.end(), new_column_scan.data().get(),
sorted_entries.begin(), [=] __device__(Entry const& l, Entry const& r) {
if (l.index == r.index) {
if (IsCat(d_feature_types, l.index)) {
return l.fvalue == r.fvalue;
}
}
return false;
});
}
sorted_entries.resize(n_uniques);
// Renew the column scan and cut scan based on categorical data.
auto d_old_column_sizes_scan = dh::ToSpan(column_sizes_scan);
dh::caching_device_vector<SketchContainer::OffsetT> new_cuts_size(info.num_col_ + 1);
CHECK_EQ(new_column_scan.size(), new_cuts_size.size());
dh::LaunchN(new_column_scan.size(),
[=, d_new_cuts_size = dh::ToSpan(new_cuts_size),
d_old_column_sizes_scan = dh::ToSpan(column_sizes_scan),
d_new_columns_ptr = dh::ToSpan(new_column_scan)] __device__(size_t idx) {
d_old_column_sizes_scan[idx] = d_new_columns_ptr[idx];
if (idx == d_new_columns_ptr.size() - 1) {
return;
}
if (IsCat(d_feature_types, idx)) {
// Cut size is the same as number of categories in input.
d_new_cuts_size[idx] = d_new_columns_ptr[idx + 1] - d_new_columns_ptr[idx];
} else {
d_new_cuts_size[idx] = d_cuts_ptr[idx + 1] - d_cuts_ptr[idx];
}
});
// Turn size into ptr.
thrust::exclusive_scan(thrust::device, new_cuts_size.cbegin(), new_cuts_size.cend(),
d_cuts_ptr.data());
}
} // namespace detail
void ProcessBatch(int device, MetaInfo const &info, const SparsePage &page,
size_t begin, size_t end, SketchContainer *sketch_container,
int num_cuts_per_feature, size_t num_columns) {
dh::XGBCachingDeviceAllocator<char> alloc;
dh::device_vector<Entry> sorted_entries;
if (page.data.DeviceCanRead()) {
const auto& device_data = page.data.ConstDevicePointer();
sorted_entries = dh::device_vector<Entry>(device_data + begin, device_data + end);
} else {
const auto& host_data = page.data.ConstHostVector();
sorted_entries = dh::device_vector<Entry>(host_data.begin() + begin,
host_data.begin() + end);
}
thrust::sort(thrust::cuda::par(alloc), sorted_entries.begin(),
sorted_entries.end(), detail::EntryCompareOp());
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scanning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
IterSpan{batch_it, sorted_entries.size()}, dummy_is_valid, &cuts_ptr,
&column_sizes_scan);
auto d_cuts_ptr = cuts_ptr.DeviceSpan();
if (sketch_container->HasCategorical()) {
detail::RemoveDuplicatedCategories(device, info, d_cuts_ptr, &sorted_entries, nullptr,
&column_sizes_scan);
}
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
CHECK_EQ(d_cuts_ptr.size(), column_sizes_scan.size());
// add cuts into sketches
sketch_container->Push(dh::ToSpan(sorted_entries), dh::ToSpan(column_sizes_scan),
d_cuts_ptr, h_cuts_ptr.back());
sorted_entries.clear();
sorted_entries.shrink_to_fit();
CHECK_EQ(sorted_entries.capacity(), 0);
CHECK_NE(cuts_ptr.Size(), 0);
}
void ProcessWeightedBatch(int device, const SparsePage& page,
MetaInfo const& info, size_t begin, size_t end,
SketchContainer* sketch_container, int num_cuts_per_feature,
size_t num_columns,
bool is_ranking, Span<bst_group_t const> d_group_ptr) {
auto weights = info.weights_.ConstDeviceSpan();
dh::XGBCachingDeviceAllocator<char> alloc;
const auto& host_data = page.data.ConstHostVector();
dh::device_vector<Entry> sorted_entries(host_data.begin() + begin,
host_data.begin() + end);
// Binary search to assign weights to each element
dh::device_vector<float> temp_weights(sorted_entries.size());
auto d_temp_weights = temp_weights.data().get();
page.offset.SetDevice(device);
auto row_ptrs = page.offset.ConstDeviceSpan();
size_t base_rowid = page.base_rowid;
if (is_ranking) {
CHECK_GE(d_group_ptr.size(), 2)
<< "Must have at least 1 group for ranking.";
CHECK_EQ(weights.size(), d_group_ptr.size() - 1)
<< "Weight size should equal to number of groups.";
dh::LaunchN(temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = dh::SegmentId(row_ptrs, element_idx);
bst_group_t group_idx = dh::SegmentId(d_group_ptr, ridx + base_rowid);
d_temp_weights[idx] = weights[group_idx];
});
} else {
dh::LaunchN(temp_weights.size(), [=] __device__(size_t idx) {
size_t element_idx = idx + begin;
size_t ridx = dh::SegmentId(row_ptrs, element_idx);
d_temp_weights[idx] = weights[ridx + base_rowid];
});
}
detail::SortByWeight(&temp_weights, &sorted_entries);
HostDeviceVector<SketchContainer::OffsetT> cuts_ptr;
dh::caching_device_vector<size_t> column_sizes_scan;
data::IsValidFunctor dummy_is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_it = dh::MakeTransformIterator<data::COOTuple>(
sorted_entries.data().get(),
[] __device__(Entry const &e) -> data::COOTuple {
return {0, e.index, e.fvalue}; // row_idx is not needed for scaning column size.
});
detail::GetColumnSizesScan(device, num_columns, num_cuts_per_feature,
IterSpan{batch_it, sorted_entries.size()}, dummy_is_valid, &cuts_ptr,
&column_sizes_scan);
auto d_cuts_ptr = cuts_ptr.DeviceSpan();
if (sketch_container->HasCategorical()) {
detail::RemoveDuplicatedCategories(device, info, d_cuts_ptr, &sorted_entries, &temp_weights,
&column_sizes_scan);
}
auto const& h_cuts_ptr = cuts_ptr.ConstHostVector();
// Extract cuts
sketch_container->Push(dh::ToSpan(sorted_entries),
dh::ToSpan(column_sizes_scan), d_cuts_ptr,
h_cuts_ptr.back(), dh::ToSpan(temp_weights));
sorted_entries.clear();
sorted_entries.shrink_to_fit();
}
HistogramCuts DeviceSketch(int device, DMatrix* dmat, int max_bins,
size_t sketch_batch_num_elements) {
dmat->Info().feature_types.SetDevice(device);
dmat->Info().feature_types.ConstDevicePointer(); // pull to device early
// Configure batch size based on available memory
bool has_weights = dmat->Info().weights_.Size() > 0;
size_t num_cuts_per_feature =
detail::RequiredSampleCutsPerColumn(max_bins, dmat->Info().num_row_);
sketch_batch_num_elements = detail::SketchBatchNumElements(
sketch_batch_num_elements,
dmat->Info().num_row_,
dmat->Info().num_col_,
dmat->Info().num_nonzero_,
device, num_cuts_per_feature, has_weights);
HistogramCuts cuts;
SketchContainer sketch_container(dmat->Info().feature_types, max_bins, dmat->Info().num_col_,
dmat->Info().num_row_, device);
dmat->Info().weights_.SetDevice(device);
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
size_t batch_nnz = batch.data.Size();
auto const& info = dmat->Info();
for (auto begin = 0ull; begin < batch_nnz; begin += sketch_batch_num_elements) {
size_t end = std::min(batch_nnz, static_cast<std::size_t>(begin + sketch_batch_num_elements));
if (has_weights) {
bool is_ranking = HostSketchContainer::UseGroup(dmat->Info());
dh::caching_device_vector<uint32_t> groups(info.group_ptr_.cbegin(),
info.group_ptr_.cend());
ProcessWeightedBatch(
device, batch, dmat->Info(), begin, end,
&sketch_container,
num_cuts_per_feature,
dmat->Info().num_col_,
is_ranking, dh::ToSpan(groups));
} else {
ProcessBatch(device, dmat->Info(), batch, begin, end, &sketch_container,
num_cuts_per_feature, dmat->Info().num_col_);
}
}
}
sketch_container.MakeCuts(&cuts);
return cuts;
}
} // namespace common
} // namespace xgboost
|
f5782001ac1af1b03d36589b0f903d6edc7883b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/softplus_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SoftplusKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = log(exp(X[i]) + 1.0f);
}
}
template <typename T>
__global__ void
SoftplusGradientKernel(const int N, const T* Y, const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float nexpY = exp(-Y[i]);
dX[i] = dY[i] * (1 - nexpY);
}
}
} // namespace
template <>
bool SoftplusOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
DCHECK_GT(X.size(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( SoftplusKernel<float>)
, dim3(CAFFE_GET_BLOCKS(X.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.size(), X.data<float>(), Y->template mutable_data<float>());
return true;
}
template <>
bool SoftplusGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
DCHECK_GT(Y.size(), 0);
DCHECK_EQ(dY.size(), Y.size());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
hipLaunchKernelGGL(( SoftplusGradientKernel<float>)
, dim3(CAFFE_GET_BLOCKS(Y.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
Y.size(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(Softplus, SoftplusOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SoftplusGradient,
SoftplusGradientOp<float, CUDAContext>);
} // namespace caffe2
| f5782001ac1af1b03d36589b0f903d6edc7883b2.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/softplus_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SoftplusKernel(const int N, const T* X, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = log(exp(X[i]) + 1.0f);
}
}
template <typename T>
__global__ void
SoftplusGradientKernel(const int N, const T* Y, const T* dY, T* dX) {
CUDA_1D_KERNEL_LOOP(i, N) {
const float nexpY = exp(-Y[i]);
dX[i] = dY[i] * (1 - nexpY);
}
}
} // namespace
template <>
bool SoftplusOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
DCHECK_GT(X.size(), 0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
SoftplusKernel<float>
<<<CAFFE_GET_BLOCKS(X.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.size(), X.data<float>(), Y->template mutable_data<float>());
return true;
}
template <>
bool SoftplusGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
DCHECK_GT(Y.size(), 0);
DCHECK_EQ(dY.size(), Y.size());
auto* dX = Output(0, Y.sizes(), at::dtype<float>());
SoftplusGradientKernel<float>
<<<CAFFE_GET_BLOCKS(Y.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
Y.size(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(Softplus, SoftplusOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SoftplusGradient,
SoftplusGradientOp<float, CUDAContext>);
} // namespace caffe2
|
838916850b8c63f757b259bbb285fd078bc97d38.hip | // !!! This is a file automatically generated by hipify!!!
#include <string.h>
#include <stdio.h>
#include <time.h>
#include <stdint.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "sm4cuda.cuh"
//S
uint8_t SboxTable[256] = { \
0xd6,0x90,0xe9,0xfe,0xcc,0xe1,0x3d,0xb7,0x16,0xb6,0x14,0xc2,0x28,0xfb,0x2c,0x05, \
0x2b,0x67,0x9a,0x76,0x2a,0xbe,0x04,0xc3,0xaa,0x44,0x13,0x26,0x49,0x86,0x06,0x99, \
0x9c,0x42,0x50,0xf4,0x91,0xef,0x98,0x7a,0x33,0x54,0x0b,0x43,0xed,0xcf,0xac,0x62, \
0xe4,0xb3,0x1c,0xa9,0xc9,0x08,0xe8,0x95,0x80,0xdf,0x94,0xfa,0x75,0x8f,0x3f,0xa6, \
0x47,0x07,0xa7,0xfc,0xf3,0x73,0x17,0xba,0x83,0x59,0x3c,0x19,0xe6,0x85,0x4f,0xa8, \
0x68,0x6b,0x81,0xb2,0x71,0x64,0xda,0x8b,0xf8,0xeb,0x0f,0x4b,0x70,0x56,0x9d,0x35, \
0x1e,0x24,0x0e,0x5e,0x63,0x58,0xd1,0xa2,0x25,0x22,0x7c,0x3b,0x01,0x21,0x78,0x87, \
0xd4,0x00,0x46,0x57,0x9f,0xd3,0x27,0x52,0x4c,0x36,0x02,0xe7,0xa0,0xc4,0xc8,0x9e, \
0xea,0xbf,0x8a,0xd2,0x40,0xc7,0x38,0xb5,0xa3,0xf7,0xf2,0xce,0xf9,0x61,0x15,0xa1, \
0xe0,0xae,0x5d,0xa4,0x9b,0x34,0x1a,0x55,0xad,0x93,0x32,0x30,0xf5,0x8c,0xb1,0xe3, \
0x1d,0xf6,0xe2,0x2e,0x82,0x66,0xca,0x60,0xc0,0x29,0x23,0xab,0x0d,0x53,0x4e,0x6f, \
0xd5,0xdb,0x37,0x45,0xde,0xfd,0x8e,0x2f,0x03,0xff,0x6a,0x72,0x6d,0x6c,0x5b,0x51, \
0x8d,0x1b,0xaf,0x92,0xbb,0xdd,0xbc,0x7f,0x11,0xd9,0x5c,0x41,0x1f,0x10,0x5a,0xd8, \
0x0a,0xc1,0x31,0x88,0xa5,0xcd,0x7b,0xbd,0x2d,0x74,0xd0,0x12,0xb8,0xe5,0xb4,0xb0, \
0x89,0x69,0x97,0x4a,0x0c,0x96,0x77,0x7e,0x65,0xb9,0xf1,0x09,0xc5,0x6e,0xc6,0x84, \
0x18,0xf0,0x7d,0xec,0x3a,0xdc,0x4d,0x20,0x79,0xee,0x5f,0x3e,0xd7,0xcb,0x39,0x48, \
};
/* System parameter */
uint32_t FK[4] = { 0xa3b1bac6,0x56aa3350,0x677d9197,0xb27022dc };
/* fixed parameter */
uint32_t CK[32] = { \
0x00070e15,0x1c232a31,0x383f464d,0x545b6269, \
0x70777e85,0x8c939aa1,0xa8afb6bd,0xc4cbd2d9, \
0xe0e7eef5,0xfc030a11,0x181f262d,0x343b4249, \
0x50575e65,0x6c737a81,0x888f969d,0xa4abb2b9, \
0xc0c7ced5,0xdce3eaf1,0xf8ff060d,0x141b2229, \
0x30373e45,0x4c535a61,0x686f767d,0x848b9299, \
0xa0a7aeb5,0xbcc3cad1,0xd8dfe6ed,0xf4fb0209, \
0x10171e25,0x2c333a41,0x484f565d,0x646b7279, \
};
/*
C++
b:
i:
n:
*/
inline void GET_UINT_BE(uint32_t *n, uint8_t *b, uint32_t i)
{
(*n) = (((uint32_t)b[i]) << 24) | (((uint32_t)b[i + 1]) << 16) | (((uint32_t)b[i + 2]) << 8) | (uint32_t)b[i + 3];
}
/*
C++
b:
i:
n:
*/
inline void PUT_UINT_BE(uint32_t n, uint8_t *b, uint32_t i)
{
//n
b[i + 0] = (uint8_t)(n >> 24);
//n
b[i + 1] = (uint8_t)(n >> 16);
//n
b[i + 2] = (uint8_t)(n >> 8);
//n
b[i + 3] = (uint8_t)n;
}
/*
S
*/
inline uint8_t sm4Sbox(uint8_t inch)
{
return SboxTable[inch];
}
/*
xn
*/
inline uint32_t ROTL(uint32_t x, uint32_t n)
{
return (x << n) | (x >> (32 - n));
}
/*
a b
*/
inline void SWAP(uint32_t *a, uint32_t *b)
{
uint32_t c = *a;
*a = *b;
*b = c;
}
uint32_t sm4Lt(uint32_t ka)
{
uint8_t a[4];
PUT_UINT_BE(ka, a, 0);
//
a[0] = sm4Sbox(a[0]);
a[1] = sm4Sbox(a[1]);
a[2] = sm4Sbox(a[2]);
a[3] = sm4Sbox(a[3]);
//bb
uint32_t bb = 0;
GET_UINT_BE(&bb, a, 0);
//bb2101824
return bb ^ (ROTL(bb, 2)) ^ (ROTL(bb, 10)) ^ (ROTL(bb, 18)) ^ (ROTL(bb, 24));
}
uint32_t sm4F(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t rk)
{
return (x0^sm4Lt(x1^x2^x3^rk));
}
/*
*/
uint32_t sm4CalciRK(uint32_t ka)
{
uint8_t a[4];
PUT_UINT_BE(ka, a, 0);
a[0] = sm4Sbox(a[0]);
a[1] = sm4Sbox(a[1]);
a[2] = sm4Sbox(a[2]);
a[3] = sm4Sbox(a[3]);
uint32_t bb = 0;
GET_UINT_BE(&bb, a, 0);
return bb ^ (ROTL(bb, 13)) ^ (ROTL(bb, 23));
}
/*
SK:
key:(128bit)
*/
void sm4_setkey(uint32_t SK[32], uint8_t key[16])
{
uint32_t MK[4];
GET_UINT_BE(&MK[0], key, 0);
GET_UINT_BE(&MK[1], key, 4);
GET_UINT_BE(&MK[2], key, 8);
GET_UINT_BE(&MK[3], key, 12);
//
uint32_t k[36];
k[0] = MK[0] ^ FK[0];
k[1] = MK[1] ^ FK[1];
k[2] = MK[2] ^ FK[2];
k[3] = MK[3] ^ FK[3];
for (int i = 0; i < 32; i++)
{
k[i + 4] = k[i] ^ (sm4CalciRK(k[i + 1] ^ k[i + 2] ^ k[i + 3] ^ CK[i]));
SK[i] = k[i + 4];
}
}
/*
SM4
*/
void sm4_one_round(uint32_t sk[32], uint8_t input[16], uint8_t output[16])
{
uint32_t ulbuf[36];
memset(ulbuf, 0, sizeof(ulbuf));
GET_UINT_BE(&ulbuf[0], input, 0);
GET_UINT_BE(&ulbuf[1], input, 4);
GET_UINT_BE(&ulbuf[2], input, 8);
GET_UINT_BE(&ulbuf[3], input, 12);
for (int i = 0; i < 32; i++)
{
ulbuf[i + 4] = sm4F(ulbuf[i], ulbuf[i + 1], ulbuf[i + 2], ulbuf[i + 3], sk[i]);
}
PUT_UINT_BE(ulbuf[35], output, 0);
PUT_UINT_BE(ulbuf[34], output, 4);
PUT_UINT_BE(ulbuf[33], output, 8);
PUT_UINT_BE(ulbuf[32], output, 12);
}
/*
ctx
key: 128bit
*/
void sm4_setkey_enc(sm4_context *ctx, uint8_t key[16])
{
ctx->mode = SM4_ENCRYPT;
sm4_setkey(ctx->sk, key);
}
/*
ctx
key: 128bit
*/
void sm4_setkey_dec(sm4_context *ctx, uint8_t key[16])
{
ctx->mode = SM4_DECRYPT;
sm4_setkey(ctx->sk, key);
for (int i = 0; i < 16; i++)
{
SWAP(&(ctx->sk[i]), &(ctx->sk[31 - i]));
}
}
/*
* SM4-ECB block encryption/decryption
*
* SM4-ECB
* ctx
* mode:SM4
* input:(16)
* output:(16)
*/
void sm4_crypt_ecb(sm4_context *ctx, int length, uint8_t *input, uint8_t *output)
{
while (length > 0)
{
sm4_one_round(ctx->sk, input, output);
input += 16;
output += 16;
length -= 16;
}
}
/*
*/
//0
uint32_t matrix_table_zero[32] = {
0 * 4 + 0 * 128, 1 * 4 + 0 * 128, 2 * 4 + 0 * 128, 3 * 4 + 0 * 128, \
4 * 4 + 0 * 128, 5 * 4 + 0 * 128, 6 * 4 + 0 * 128, 7 * 4 + 0 * 128, \
8 * 4 + 1 * 128, 9 * 4 + 1 * 128, 10 * 4 + 1 * 128, 11 * 4 + 1 * 128, \
12 * 4 + 1 * 128, 13 * 4 + 1 * 128, 14 * 4 + 1 * 128, 15 * 4 + 1 * 128, \
16 * 4 + 2 * 128, 17 * 4 + 2 * 128, 18 * 4 + 2 * 128, 19 * 4 + 2 * 128, \
20 * 4 + 2 * 128, 21 * 4 + 2 * 128, 22 * 4 + 2 * 128, 23 * 4 + 2 * 128, \
24 * 4 + 3 * 128, 25 * 4 + 3 * 128, 26 * 4 + 3 * 128, 27 * 4 + 3 * 128, \
28 * 4 + 3 * 128, 29 * 4 + 3 * 128, 30 * 4 + 3 * 128, 31 * 4 + 3 * 128, \
};
uint32_t linear_table_zero[32] = {
0 * 4 + 0 * 128, 4 * 4 + 0 * 128, 8 * 4 + 0 * 128, 12 * 4 + 0 * 128, \
16 * 4 + 0 * 128, 20 * 4 + 0 * 128, 24 * 4 + 0 * 128, 28 * 4 + 0 * 128,\
1 * 4 + 1 * 128, 5 * 4 + 1 * 128, 9 * 4 + 1 * 128, 13 * 4 + 1 * 128, \
17 * 4 + 1 * 128, 21 * 4 + 1 * 128, 25 * 4 + 1 * 128, 29 * 4 + 1 * 128, \
2 * 4 + 2 * 128, 6 * 4 + 2 * 128, 10 * 4 + 2 * 128, 14 * 4 + 2 * 128, \
18 * 4 + 2 * 128, 22 * 4 + 2 * 128, 26 * 4 + 2 * 128, 30 * 4 + 2 * 128, \
3 * 4 + 3 * 128, 7 * 4 + 3 * 128, 11 * 4 + 3 * 128, 15 * 4 + 3 * 128, \
19 * 4 + 3 * 128, 23 * 4 + 3 * 128, 27 * 4 + 3 * 128, 31 * 4 + 3 * 128, \
};
//1
uint32_t matrix_table_one[32] = {
0 * 4 + 1 * 128, 1 * 4 + 1 * 128, 2 * 4 + 1 * 128, 3 * 4 + 1 * 128, \
4 * 4 + 1 * 128, 5 * 4 + 1 * 128, 6 * 4 + 1 * 128, 7 * 4 + 1 * 128, \
8 * 4 + 2 * 128, 9 * 4 + 2 * 128, 10 * 4 + 2 * 128, 11 * 4 + 2 * 128, \
12 * 4 + 2 * 128, 13 * 4 + 2 * 128, 14 * 4 + 2 * 128, 15 * 4 + 2 * 128, \
16 * 4 + 3 * 128, 17 * 4 + 3 * 128, 18 * 4 + 3 * 128, 19 * 4 + 3 * 128, \
20 * 4 + 3 * 128, 21 * 4 + 3 * 128, 22 * 4 + 3 * 128, 23 * 4 + 3 * 128, \
24 * 4 + 0 * 128, 25 * 4 + 0 * 128, 26 * 4 + 0 * 128, 27 * 4 + 0 * 128, \
28 * 4 + 0 * 128, 29 * 4 + 0 * 128, 30 * 4 + 0 * 128, 31 * 4 + 0 * 128, \
};
uint32_t linear_table_one[32] = {
1 * 4 + 0 * 128, 5 * 4 + 0 * 128, 9 * 4 + 0 * 128, 13 * 4 + 0 * 128, \
17 * 4 + 0 * 128, 21 * 4 + 0 * 128, 25 * 4 + 0 * 128, 29 * 4 + 0 * 128,\
2 * 4 + 1 * 128, 6 * 4 + 1 * 128, 10 * 4 + 1 * 128, 14 * 4 + 1 * 128, \
18 * 4 + 1 * 128, 22 * 4 + 1 * 128, 26 * 4 + 1 * 128, 30 * 4 + 1 * 128, \
3 * 4 + 2 * 128, 7 * 4 + 2 * 128, 11 * 4 + 2 * 128, 15 * 4 + 2 * 128, \
19 * 4 + 2 * 128, 23 * 4 + 2 * 128, 27 * 4 + 2 * 128, 31 * 4 + 2 * 128, \
0 * 4 + 3 * 128, 4 * 4 + 3 * 128, 8 * 4 + 3 * 128, 12 * 4 + 3 * 128, \
16 * 4 + 3 * 128, 20 * 4 + 3 * 128, 24 * 4 + 3 * 128, 28 * 4 + 3 * 128, \
};
//2
uint32_t matrix_table_two[32] = {
0 * 4 + 2 * 128, 1 * 4 + 2 * 128, 2 * 4 + 2 * 128, 3 * 4 + 2 * 128, \
4 * 4 + 2 * 128, 5 * 4 + 2 * 128, 6 * 4 + 2 * 128, 7 * 4 + 2 * 128, \
8 * 4 + 3 * 128, 9 * 4 + 3 * 128, 10 * 4 + 3 * 128, 11 * 4 + 3 * 128, \
12 * 4 + 3 * 128, 13 * 4 + 3 * 128, 14 * 4 + 3 * 128, 15 * 4 + 3 * 128, \
16 * 4 + 0 * 128, 17 * 4 + 0 * 128, 18 * 4 + 0 * 128, 19 * 4 + 0 * 128, \
20 * 4 + 0 * 128, 21 * 4 + 0 * 128, 22 * 4 + 0 * 128, 23 * 4 + 0 * 128, \
24 * 4 + 1 * 128, 25 * 4 + 1 * 128, 26 * 4 + 1 * 128, 27 * 4 + 1 * 128, \
28 * 4 + 1 * 128, 29 * 4 + 1 * 128, 30 * 4 + 1 * 128, 31 * 4 + 1 * 128, \
};
uint32_t linear_table_two[32] = {
2 * 4 + 0 * 128, 6 * 4 + 0 * 128, 10 * 4 + 0 * 128, 14 * 4 + 0 * 128, \
18 * 4 + 0 * 128, 22 * 4 + 0 * 128, 26 * 4 + 0 * 128, 30 * 4 + 0 * 128,\
3 * 4 + 1 * 128, 7 * 4 + 1 * 128, 11 * 4 + 1 * 128, 15 * 4 + 1 * 128, \
19 * 4 + 1 * 128, 23 * 4 + 1 * 128, 27 * 4 + 1 * 128, 31 * 4 + 1 * 128, \
0 * 4 + 2 * 128, 4 * 4 + 2 * 128, 8 * 4 + 2 * 128, 12 * 4 + 2 * 128, \
16 * 4 + 2 * 128, 20 * 4 + 2 * 128, 24 * 4 + 2 * 128, 28 * 4 + 2 * 128, \
1 * 4 + 3 * 128, 5 * 4 + 3 * 128, 9 * 4 + 3 * 128, 13 * 4 + 3 * 128, \
17 * 4 + 3 * 128, 21 * 4 + 3 * 128, 25 * 4 + 3 * 128, 29 * 4 + 3 * 128, \
};
//3
uint32_t matrix_table_three[32] = {
0 * 4 + 3 * 128, 1 * 4 + 3 * 128, 2 * 4 + 3 * 128, 3 * 4 + 3 * 128, \
4 * 4 + 3 * 128, 5 * 4 + 3 * 128, 6 * 4 + 3 * 128, 7 * 4 + 3 * 128, \
8 * 4 + 0 * 128, 9 * 4 + 0 * 128, 10 * 4 + 0 * 128, 11 * 4 + 0 * 128, \
12 * 4 + 0 * 128, 13 * 4 + 0 * 128, 14 * 4 + 0 * 128, 15 * 4 + 0 * 128, \
16 * 4 + 1 * 128, 17 * 4 + 1 * 128, 18 * 4 + 1 * 128, 19 * 4 + 1 * 128, \
20 * 4 + 1 * 128, 21 * 4 + 1 * 128, 22 * 4 + 1 * 128, 23 * 4 + 1 * 128, \
24 * 4 + 2 * 128, 25 * 4 + 2 * 128, 26 * 4 + 2 * 128, 27 * 4 + 2 * 128, \
28 * 4 + 2 * 128, 29 * 4 + 2 * 128, 30 * 4 + 2 * 128, 31 * 4 + 2 * 128, \
};
uint32_t linear_table_three[32] = {
3 * 4 + 0 * 128, 7 * 4 + 0 * 128, 11 * 4 + 0 * 128, 15 * 4 + 0 * 128, \
19 * 4 + 0 * 128, 23 * 4 + 0 * 128, 27 * 4 + 0 * 128, 31 * 4 + 0 * 128,\
0 * 4 + 1 * 128, 4 * 4 + 1 * 128, 8 * 4 + 1 * 128, 12 * 4 + 1 * 128, \
16 * 4 + 1 * 128, 20 * 4 + 1 * 128, 24 * 4 + 1 * 128, 28 * 4 + 1 * 128, \
1 * 4 + 2 * 128, 5 * 4 + 2 * 128, 9 * 4 + 2 * 128, 13 * 4 + 2 * 128, \
17 * 4 + 2 * 128, 21 * 4 + 2 * 128, 25 * 4 + 2 * 128, 29 * 4 + 2 * 128, \
2 * 4 + 3 * 128, 6 * 4 + 3 * 128, 10 * 4 + 3 * 128, 14 * 4 + 3 * 128, \
18 * 4 + 3 * 128, 22 * 4 + 3 * 128, 26 * 4 + 3 * 128, 30 * 4 + 3 * 128, \
};
//IV, SK, ency0, lenAC
__constant__ uint8_t constant_iv[12];
__constant__ uint32_t constant_sk[32];
__constant__ uint8_t constant_ency0[16];
__constant__ uint8_t constant_lenAC[16];
void otherT(uint8_t T[16][256][16])
{
int i = 0, j = 0, k = 0;
uint64_t vh, vl;
uint64_t zh, zl;
for (i = 0; i < 256; i++)
{
vh = ((uint64_t)T[0][i][0] << 56) ^ ((uint64_t)T[0][i][1] << 48) ^ \
((uint64_t)T[0][i][2] << 40) ^ ((uint64_t)T[0][i][3] << 32) ^ \
((uint64_t)T[0][i][4] << 24) ^ ((uint64_t)T[0][i][5] << 16) ^ \
((uint64_t)T[0][i][6] << 8) ^ ((uint64_t)T[0][i][7]);
vl = ((uint64_t)T[0][i][8] << 56) ^ ((uint64_t)T[0][i][9] << 48) ^ \
((uint64_t)T[0][i][10] << 40) ^ ((uint64_t)T[0][i][11] << 32) ^ \
((uint64_t)T[0][i][12] << 24) ^ ((uint64_t)T[0][i][13] << 16) ^ \
((uint64_t)T[0][i][14] << 8) ^ ((uint64_t)T[0][i][15]);
zh = zl = 0;
for (j = 0; j <= 120; j++)
{
if ((j > 0) && (0 == j % 8))
{
zh ^= vh;
zl ^= vl;
for (k = 1; k <= 16 / 2; k++)
{
T[j / 8][i][16 / 2 - k] = (uint8_t)zh;
zh = zh >> 8;
T[j / 8][i][16 - k] = (uint8_t)zl;
zl = zl >> 8;
}
zh = zl = 0;
}
if (vl & 0x1)
{
vl = vl >> 1;
if (vh & 0x1) { vl ^= 0x8000000000000000; }
vh = vh >> 1;
vh ^= 0xe100000000000000;
}
else
{
vl = vl >> 1;
if (vh & 0x1) { vl ^= 0x8000000000000000; }
vh = vh >> 1;
}
}
}
}
//GF
void computeTable(uint8_t T[16][256][16], uint8_t H[16])
{
// zh is the higher 64-bit, zl is the lower 64-bit
uint64_t zh = 0, zl = 0;
// vh is the higher 64-bit, vl is the lower 64-bit
uint64_t vh = ((uint64_t)H[0] << 56) ^ ((uint64_t)H[1] << 48) ^ \
((uint64_t)H[2] << 40) ^ ((uint64_t)H[3] << 32) ^ \
((uint64_t)H[4] << 24) ^ ((uint64_t)H[5] << 16) ^ \
((uint64_t)H[6] << 8) ^ ((uint64_t)H[7]);
uint64_t vl = ((uint64_t)H[8] << 56) ^ ((uint64_t)H[9] << 48) ^ \
((uint64_t)H[10] << 40) ^ ((uint64_t)H[11] << 32) ^ \
((uint64_t)H[12] << 24) ^ ((uint64_t)H[13] << 16) ^ \
((uint64_t)H[14] << 8) ^ ((uint64_t)H[15]);
uint8_t temph;
uint64_t tempvh = vh;
uint64_t tempvl = vl;
int i = 0, j = 0;
for (i = 0; i < 256; i++)
{
temph = (uint8_t)i;
vh = tempvh;
vl = tempvl;
zh = zl = 0;
for (j = 0; j < 8; j++)
{
if (0x80 & temph)
{
zh ^= vh;
zl ^= vl;
}
if (vl & 0x1)
{
vl = vl >> 1;
if (vh & 0x1) { vl ^= 0x8000000000000000; }
vh = vh >> 1;
vh ^= 0xe100000000000000;
}
else
{
vl = vl >> 1;
if (vh & 0x1) { vl ^= 0x8000000000000000; }
vh = vh >> 1;
}
temph = temph << 1;
}
// get result
for (j = 1; j <= 16 / 2; j++)
{
T[0][i][16 / 2 - j] = (uint8_t)zh;
zh = zh >> 8;
T[0][i][16 - j] = (uint8_t)zl;
zl = zl >> 8;
}
}
otherT(T);
}
/**
* return the value of (output.H) by looking up tables
*/
void multi(uint8_t T[16][256][16], uint8_t *output)
{
uint8_t i, j;
uint8_t temp[16];
for (i = 0; i < 16; i++)
{
temp[i] = output[i];
output[i] = 0;
}
for (i = 0; i < 16; i++)
{
for (j = 0; j < 16; j++)
{
output[j] ^= T[i][*(temp + i)][j];
}
}
}
/*
* a: additional authenticated data
* c: the cipher text or initial vector
*/
void ghash(uint8_t T[16][256][16], uint8_t *add, size_t add_len, uint8_t *cipher, size_t length, uint8_t *output)
{
/* x0 = 0 */
*(uint64_t *)output = 0;
*((uint64_t *)output + 1) = 0;
/* compute with add */
int i = 0;
for (i = 0; i < add_len / 16; i++)
{
*(uint64_t *)output ^= *(uint64_t *)add;
*((uint64_t *)output + 1) ^= *((uint64_t *)add + 1);
add += 16;
multi(T, output);
}
if (add_len % 16)
{
// the remaining add
for (i = 0; i < add_len % 16; i++)
{
*(output + i) ^= *(add + i);
}
multi(T, output);
}
/* compute with cipher text */
for (i = 0; i < length / 16; i++)
{
*(uint64_t *)output ^= *(uint64_t *)cipher;
*((uint64_t *)output + 1) ^= *((uint64_t *)cipher + 1);
cipher += 16;
multi(T, output);
}
if (length % 16)
{
// the remaining cipher
for (i = 0; i < length % 16; i++)
{
*(output + i) ^= *(cipher + i);
}
multi(T, output);
}
/* eor (len(A)||len(C)) */
uint64_t temp_len = (uint64_t)(add_len * 8); // len(A) = (uint64_t)(add_len*8)
for (i = 1; i <= 16 / 2; i++)
{
output[16 / 2 - i] ^= (uint8_t)temp_len;
temp_len = temp_len >> 8;
}
temp_len = (uint64_t)(length * 8); // len(C) = (uint64_t)(length*8)
for (i = 1; i <= 16 / 2; i++)
{
output[16 - i] ^= (uint8_t)temp_len;
temp_len = temp_len >> 8;
}
multi(T, output);
}
/*
**
** dev_linear
** dev_matrix
*/
__global__ void kernal_linear_to_matrix(\
uint32_t dev_matrix_table_zero[32], uint32_t dev_linear_table_zero[32], \
uint32_t dev_matrix_table_one[32], uint32_t dev_linear_table_one[32], \
uint32_t dev_matrix_table_two[32], uint32_t dev_linear_table_two[32], \
uint32_t dev_matrix_table_three[32], uint32_t dev_linear_table_three[32], \
uint8_t dev_linear[PARTICLE_SIZE / STREAM_SIZE], \
uint8_t dev_matrix[PARTICLE_SIZE / STREAM_SIZE])
{
__shared__ uint8_t smem[16 * BLOCK_SIZE * 2];
uint8_t *matrix = smem;
uint8_t *linear = smem + 16 * BLOCK_SIZE;
uint32_t dev_offset = blockIdx.x * blockDim.x * 16 + threadIdx.x * 4;
uint32_t share_offset = threadIdx.x * 4;
//
{
uint32_t *read = (uint32_t *)(dev_linear + dev_offset);
uint32_t *write = (uint32_t *)(linear + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = *(read + i * BLOCK_SIZE);
}
}
//
__syncthreads();
//
{
uint32_t warpaddr = (threadIdx.x / 32) * (32 * 16);
uint32_t inertid = threadIdx.x % 32;
uint32_t *read;
uint32_t *write;
//0
write = (uint32_t *)(matrix + warpaddr + dev_matrix_table_zero[inertid]);
read = (uint32_t *)(linear + warpaddr + dev_linear_table_zero[inertid]);
*write = *read;
//1
write = (uint32_t *)(matrix + warpaddr + dev_matrix_table_one[inertid]);
read = (uint32_t *)(linear + warpaddr + dev_linear_table_one[inertid]);
*write = *read;
//2
write = (uint32_t *)(matrix + warpaddr + dev_matrix_table_two[inertid]);
read = (uint32_t *)(linear + warpaddr + dev_linear_table_two[inertid]);
*write = *read;
//3
write = (uint32_t *)(matrix + warpaddr + dev_matrix_table_three[inertid]);
read = (uint32_t *)(linear + warpaddr + dev_linear_table_three[inertid]);
*write = *read;
}
//
__syncthreads();
//
{
uint32_t *write = (uint32_t *)(dev_matrix + dev_offset);
uint32_t *read = (uint32_t *)(matrix + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = *(read + i * BLOCK_SIZE);
}
}
}
/*
**
** dev_matrix
** dev_linear
*/
__global__ void kernal_matrix_to_linear(\
uint32_t dev_matrix_table_zero[32], uint32_t dev_linear_table_zero[32], \
uint32_t dev_matrix_table_one[32], uint32_t dev_linear_table_one[32], \
uint32_t dev_matrix_table_two[32], uint32_t dev_linear_table_two[32], \
uint32_t dev_matrix_table_three[32], uint32_t dev_linear_table_three[32], \
uint8_t dev_matrix[PARTICLE_SIZE / STREAM_SIZE], \
uint8_t dev_linear[PARTICLE_SIZE / STREAM_SIZE])
{
__shared__ uint8_t smem[16 * BLOCK_SIZE * 2];
uint8_t *matrix = smem;
uint8_t *linear = smem + 16 * BLOCK_SIZE;
uint32_t dev_offset = blockIdx.x * blockDim.x * 16 + threadIdx.x * 4;
uint32_t share_offset = threadIdx.x * 4;
//
{
uint32_t *read = (uint32_t *)(dev_matrix + dev_offset);
uint32_t *write = (uint32_t *)(matrix + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = *(read + i * BLOCK_SIZE);
}
}
//
__syncthreads();
//
{
uint32_t warpaddr = (threadIdx.x / 32) * (32 * 16);
uint32_t inertid = threadIdx.x % 32;
uint32_t *read;
uint32_t *write;
//0
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_zero[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_zero[inertid]);
*write = *read;
//1
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_one[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_one[inertid]);
*write = *read;
//2
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_two[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_two[inertid]);
*write = *read;
//3
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_three[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_three[inertid]);
*write = *read;
}
//
__syncthreads();
//
{
uint32_t *write = (uint32_t *)(dev_linear + dev_offset);
uint32_t *read = (uint32_t *)(linear + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = *(read + i * BLOCK_SIZE);
}
}
}
/*
** SM4-CTR
** dev_SboxTable:S
** counter:
** streamid:ID
** dev_input:
** dev_output:
*/
__global__ void kernal_enc(uint8_t *const __restrict__ dev_SboxTable, \
uint32_t dev_matrix_table_zero[32], uint32_t dev_linear_table_zero[32], \
uint32_t dev_matrix_table_one[32], uint32_t dev_linear_table_one[32], \
uint32_t dev_matrix_table_two[32], uint32_t dev_linear_table_two[32], \
uint32_t dev_matrix_table_three[32], uint32_t dev_linear_table_three[32], \
uint32_t counter, uint32_t streamid, \
uint8_t dev_input[PARTICLE_SIZE / STREAM_SIZE], \
uint8_t dev_output[PARTICLE_SIZE / STREAM_SIZE])
{
__shared__ uint8_t smem[16 * BLOCK_SIZE * 2];
uint8_t *matrix = smem;
uint8_t *linear = smem + 16 * BLOCK_SIZE;
uint8_t *rw_matrix = matrix + (threadIdx.x / 32) * (16 * 32) + (threadIdx.x % 32) * 4;
uint32_t dev_offset = blockIdx.x * blockDim.x * 16 + threadIdx.x * 4;
uint32_t share_offset = threadIdx.x * 4;
{
uint32_t ulbuf[5];
{
//iv
uint8_t tidCTR[16];
*(uint32_t *)(tidCTR + 0) = *(uint32_t *)(constant_iv + 0);
*(uint32_t *)(tidCTR + 4) = *(uint32_t *)(constant_iv + 4);
*(uint32_t *)(tidCTR + 8) = *(uint32_t *)(constant_iv + 8);
*(uint32_t *)(tidCTR + 12) = counter + (uint32_t)(threadIdx.x + blockIdx.x * blockDim.x + streamid * (PARTICLE_SIZE / STREAM_SIZE / 16));
//*(uint32_t *)(tidCTR + 12) = counter;
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
ulbuf[i] = (((uint32_t)tidCTR[i * 4]) << 24) | \
(((uint32_t)tidCTR[i * 4 + 1]) << 16) | \
(((uint32_t)tidCTR[i * 4 + 2]) << 8) | \
(uint32_t)tidCTR[i * 4 + 3];
}
}
//32
{
uint32_t temp;
uint8_t a[4];
uint32_t bb;
#pragma unroll 32
for (int i = 0; i < 32; i++)
{
temp = ulbuf[(i + 1) % 5] ^ ulbuf[(i + 2) % 5] ^ ulbuf[(i + 3) % 5] ^ constant_sk[i];
a[0] = (uint8_t)(temp >> 24);
a[1] = (uint8_t)(temp >> 16);
a[2] = (uint8_t)(temp >> 8);
a[3] = (uint8_t)temp;
a[0] = dev_SboxTable[a[0]];
a[1] = dev_SboxTable[a[1]];
a[2] = dev_SboxTable[a[2]];
a[3] = dev_SboxTable[a[3]];
bb = (((uint32_t)a[0]) << 24) | (((uint32_t)a[1]) << 16) | (((uint32_t)a[2]) << 8) | (uint32_t)a[3];
bb = bb ^ ((bb << 2) | (bb >> 30)) ^ ((bb << 10) | (bb >> 22)) ^ ((bb << 18) | (bb >> 14)) ^ ((bb << 24) | (bb >> 8));
ulbuf[(i + 4) % 5] = ulbuf[(i + 0) % 5] ^ bb;
}
}
{
//()
uint8_t temp[4];
uint8_t *write = rw_matrix;
temp[0] = (uint8_t)(ulbuf[0] >> 24);
temp[1] = (uint8_t)(ulbuf[0] >> 16);
temp[2] = (uint8_t)(ulbuf[0] >> 8);
temp[3] = (uint8_t)ulbuf[0];
*(uint32_t *)(rw_matrix + 0 * 128) = *(uint32_t *)temp;
temp[0] = (uint8_t)(ulbuf[4] >> 24);
temp[1] = (uint8_t)(ulbuf[4] >> 16);
temp[2] = (uint8_t)(ulbuf[4] >> 8);
temp[3] = (uint8_t)ulbuf[4];
*(uint32_t *)(rw_matrix + 1 * 128) = *(uint32_t *)temp;
temp[0] = (uint8_t)(ulbuf[3] >> 24);
temp[1] = (uint8_t)(ulbuf[3] >> 16);
temp[2] = (uint8_t)(ulbuf[3] >> 8);
temp[3] = (uint8_t)ulbuf[3];
*(uint32_t *)(rw_matrix + 2 * 128) = *(uint32_t *)temp;
temp[0] = (uint8_t)(ulbuf[2] >> 24);
temp[1] = (uint8_t)(ulbuf[2] >> 16);
temp[2] = (uint8_t)(ulbuf[2] >> 8);
temp[3] = (uint8_t)ulbuf[2];
*(uint32_t *)(rw_matrix + 3 * 128) = *(uint32_t *)temp;
}
}
//
__syncthreads();
//
{
uint32_t warpaddr = (threadIdx.x / 32) * (32 * 16);
uint32_t inertid = threadIdx.x % 32;
uint32_t *read;
uint32_t *write;
//0
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_zero[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_zero[inertid]);
*write = *read;
//1
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_one[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_one[inertid]);
*write = *read;
//2
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_two[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_two[inertid]);
*write = *read;
//3
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_three[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_three[inertid]);
*write = *read;
}
//
__syncthreads();
//
{
uint32_t *read = (uint32_t *)(dev_input + dev_offset);
uint32_t *write = (uint32_t *)(dev_output + dev_offset);
uint32_t *cipher = (uint32_t *)(linear + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = (*(read + i * BLOCK_SIZE)) ^ (*(cipher + i * BLOCK_SIZE));
}
}
}
/*
**
** dev_gfmult_table:
** dev_cipher:()
** dev_gfmult:()
*/
__global__ void kernal_gfmult(\
uint8_t dev_gfmult_table[16][256][16], \
uint8_t dev_cipher[PARTICLE_SIZE / STREAM_SIZE], \
uint8_t dev_gfmult[PARTICLE_SIZE / STREAM_SIZE])
{
__shared__ uint8_t smem[16 * BLOCK_SIZE];
uint8_t *matrix = smem;
uint32_t dev_offset = blockIdx.x * blockDim.x * 16 + threadIdx.x * 4;
uint32_t share_offset = threadIdx.x * 4;
//
//
{
uint32_t *read_cipher = (uint32_t *)(dev_cipher + dev_offset);
uint32_t *read_gfmult = (uint32_t *)(dev_gfmult + dev_offset);
uint32_t *write = (uint32_t *)(matrix + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = (*(read_cipher + i * BLOCK_SIZE)) ^ (*(read_gfmult + i * BLOCK_SIZE));
}
}
//
__syncthreads();
//
{
uint8_t *tid_cipher = matrix + (threadIdx.x / 32) * (16 * 32) + (threadIdx.x % 32) * 4;
uint8_t temp;
uint8_t *read;
//GF
uint8_t tid_gfmult[16];
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(uint32_t *)(tid_gfmult + i * 4) = 0;
}
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
read = tid_cipher + i * (32 * 4);
#pragma unroll 4
for (int j = 0; j < 4; j++)
{
temp = read[j];
#pragma unroll 16
for (int k = 0; k < 16; k++)
{
tid_gfmult[k] ^= dev_gfmult_table[i * 4 + j][temp][k];
}
}
}
//
{
uint32_t *write = (uint32_t *)(matrix + (threadIdx.x / 32) * (16 * 32) + (threadIdx.x % 32) * 4);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * 32) = *(uint32_t *)(tid_gfmult + i * 4);
}
}
}
//
__syncthreads();
//
{
uint32_t *write = (uint32_t *)(dev_gfmult + dev_offset);
uint32_t *read = (uint32_t *)(matrix + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = *(read + i * BLOCK_SIZE);
}
}
}
/*
** GHASH
** dev_gfmult_table;
** dev_gfmult:
*/
__global__ void kernal_final(\
uint8_t dev_gfmult_table[16][256][16], \
uint32_t dev_matrix_table_zero[32], uint32_t dev_linear_table_zero[32], \
uint32_t dev_matrix_table_one[32], uint32_t dev_linear_table_one[32], \
uint32_t dev_matrix_table_two[32], uint32_t dev_linear_table_two[32], \
uint32_t dev_matrix_table_three[32], uint32_t dev_linear_table_three[32], \
uint8_t dev_gfmult[PARTICLE_SIZE / STREAM_SIZE])
{
__shared__ uint8_t smem[16 * BLOCK_SIZE];
uint8_t *matrix = smem;
uint32_t dev_offset = blockIdx.x * blockDim.x * 16 + threadIdx.x * 4;
uint32_t share_offset = threadIdx.x * 4;
//GF
{
uint32_t *read = (uint32_t *)(dev_gfmult + dev_offset);
uint32_t *write = (uint32_t *)(matrix + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = *(read + i * BLOCK_SIZE);
}
}
//
__syncthreads();
{
uint8_t *tid_cipher = matrix + (threadIdx.x / 32) * (16 * 32) + (threadIdx.x % 32) * 4;
uint8_t temp;
uint8_t *read;
//GF
uint8_t tid_gfmult[16];
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(uint32_t *)(tid_gfmult + i * 4) = 0;
}
//
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
read = tid_cipher + i * (32 * 4);
#pragma unroll 4
for (int j = 0; j < 4; j++)
{
temp = read[j] ^ constant_lenAC[i * 4 + j];
#pragma unroll 16
for (int k = 0; k < 16; k++)
{
tid_gfmult[k] ^= dev_gfmult_table[i * 4 + j][temp][k];
}
}
}
//ency0tag
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(uint32_t *)(tid_gfmult + i * 4) ^= *(uint32_t *)(constant_ency0 + i * 4);
}
//GF
{
uint32_t *write = (uint32_t *)(matrix + (threadIdx.x / 32) * (16 * 32) + (threadIdx.x % 32) * 4);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * 32) = *(uint32_t *)(tid_gfmult + i * 4);
}
}
}
//
__syncthreads();
//
{
uint32_t *write = (uint32_t *)(dev_gfmult + dev_offset);
uint32_t *read = (uint32_t *)(matrix + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = *(read + i * BLOCK_SIZE);
}
}
}
void Init_device_memory(device_memory *way, uint8_t add[16], uint8_t iv[12])
{
//
for (int i = 0; i < STREAM_SIZE; i++)
{
hipStreamCreate(&(way->stream[i]));
}
//
hipHostMalloc((void**)&(way->dev_matrix_table_zero), 32 * sizeof(uint32_t), hipHostMallocDefault);
hipMemcpy(way->dev_matrix_table_zero, matrix_table_zero, 32 * sizeof(uint32_t), hipMemcpyHostToDevice);
hipHostMalloc((void**)&(way->dev_linear_table_zero), 32 * sizeof(uint32_t), hipHostMallocDefault);
hipMemcpy(way->dev_linear_table_zero, linear_table_zero, 32 * sizeof(uint32_t), hipMemcpyHostToDevice);
hipHostMalloc((void**)&(way->dev_matrix_table_one), 32 * sizeof(uint32_t), hipHostMallocDefault);
hipMemcpy(way->dev_matrix_table_one, matrix_table_one, 32 * sizeof(uint32_t), hipMemcpyHostToDevice);
hipHostMalloc((void**)&(way->dev_linear_table_one), 32 * sizeof(uint32_t), hipHostMallocDefault);
hipMemcpy(way->dev_linear_table_one, linear_table_one, 32 * sizeof(uint32_t), hipMemcpyHostToDevice);
hipHostMalloc((void**)&(way->dev_matrix_table_two), 32 * sizeof(uint32_t), hipHostMallocDefault);
hipMemcpy(way->dev_matrix_table_two, matrix_table_two, 32 * sizeof(uint32_t), hipMemcpyHostToDevice);
hipHostMalloc((void**)&(way->dev_linear_table_two), 32 * sizeof(uint32_t), hipHostMallocDefault);
hipMemcpy(way->dev_linear_table_two, linear_table_two, 32 * sizeof(uint32_t), hipMemcpyHostToDevice);
hipHostMalloc((void**)&(way->dev_matrix_table_three), 32 * sizeof(uint32_t), hipHostMallocDefault);
hipMemcpy(way->dev_matrix_table_three, matrix_table_three, 32 * sizeof(uint32_t), hipMemcpyHostToDevice);
hipHostMalloc((void**)&(way->dev_linear_table_three), 32 * sizeof(uint32_t), hipHostMallocDefault);
hipMemcpy(way->dev_linear_table_three, linear_table_three, 32 * sizeof(uint32_t), hipMemcpyHostToDevice);
//
hipMemcpyToSymbol(constant_sk, way->ctx.sk, 32 * sizeof(uint32_t));
//IV
hipMemcpyToSymbol(constant_iv, iv, 12);
//S
hipHostMalloc((void**)&(way->dev_SboxTable), 256, hipHostMallocDefault);
hipMemcpy(way->dev_SboxTable, SboxTable, 256, hipMemcpyHostToDevice);
//
hipHostMalloc((void**)&(way->dev_input), PARTICLE_SIZE, hipHostMallocDefault);
//
hipHostMalloc((void**)&(way->dev_output), PARTICLE_SIZE, hipHostMallocDefault);
//0
uint8_t y0[16];
uint8_t ency0[16];
memset(y0, 0, 16);
//ency0
sm4_crypt_ecb(&way->ctx, 16, y0, ency0);
hipMemcpyToSymbol(constant_ency0, ency0, 16);
uint8_t gfmult_table[16][256][16];
//
computeTable(gfmult_table, ency0);
//
hipHostMalloc((void**)&(way->dev_gfmult_table), \
sizeof(gfmult_table), hipHostMallocDefault);
hipMemcpy(way->dev_gfmult_table, gfmult_table, \
sizeof(gfmult_table), hipMemcpyHostToDevice);
//
uint8_t temp[16];
memset(temp, 0, 16);
for (int i = 0; i < 16; i++)
{
temp[i] ^= add[i];
}
multi(gfmult_table, temp);
uint8_t *gfmult_init = (uint8_t *)malloc(PARTICLE_SIZE);
for (int i = 0; i < PARTICLE_SIZE / 16; i++)
{
memcpy(gfmult_init + i * 16, temp, 16);
}
//
hipHostMalloc((void**)&(way->dev_gfmult), \
PARTICLE_SIZE, hipHostMallocDefault);
{
dim3 grid(GRID_SIZE, 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
for (int i = 0; i < STREAM_SIZE; i++)
{
//
hipMemcpyAsync(\
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE), \
gfmult_init + i * (PARTICLE_SIZE / STREAM_SIZE), \
PARTICLE_SIZE / STREAM_SIZE, \
hipMemcpyHostToDevice, way->stream[i]);
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//GF
kernal_linear_to_matrix << < grid, block, 0, way->stream[i] >> > (\
way->dev_matrix_table_zero, way->dev_linear_table_zero, \
way->dev_matrix_table_one, way->dev_linear_table_one, \
way->dev_matrix_table_two, way->dev_linear_table_two, \
way->dev_matrix_table_three, way->dev_linear_table_three, \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//
hipStreamSynchronize(way->stream[i]);
}
}
free(gfmult_init);
}
/*
** :
*/
void Free_device_memory(device_memory *way)
{
for (int i = 0; i < STREAM_SIZE; i++)
{
//
hipStreamSynchronize(way->stream[i]);
}
//
hipHostFree(way->dev_gfmult_table);
hipHostFree(way->dev_IV);
hipHostFree(way->dev_SboxTable);
hipHostFree(way->dev_matrix_table_zero);
hipHostFree(way->dev_linear_table_zero);
hipHostFree(way->dev_matrix_table_one);
hipHostFree(way->dev_linear_table_one);
hipHostFree(way->dev_matrix_table_two);
hipHostFree(way->dev_linear_table_two);
hipHostFree(way->dev_matrix_table_three);
hipHostFree(way->dev_linear_table_three);
hipHostFree(way->dev_input);
hipHostFree(way->dev_output);
hipHostFree(way->dev_gfmult);
//
for (int i = 0; i < STREAM_SIZE; i++)
{
hipStreamDestroy(way->stream[i]);
}
}
/*
**
** counter:
** input:
** output:
*/
void sm4_gcm_enc(device_memory *way, uint32_t counter, uint8_t input[PARTICLE_SIZE], uint8_t output[PARTICLE_SIZE])
{
dim3 grid(GRID_SIZE, 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
for (int i = 0; i < STREAM_SIZE; i++)
{
//
hipMemcpyAsync(\
way->dev_input + i * (PARTICLE_SIZE / STREAM_SIZE), \
input + i * (PARTICLE_SIZE / STREAM_SIZE), \
PARTICLE_SIZE / STREAM_SIZE, \
hipMemcpyHostToDevice, way->stream[i]);
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//
kernal_enc << < grid, block, 0, way->stream[i] >> > (way->dev_SboxTable, \
way->dev_matrix_table_zero, way->dev_linear_table_zero, \
way->dev_matrix_table_one, way->dev_linear_table_one, \
way->dev_matrix_table_two, way->dev_linear_table_two, \
way->dev_matrix_table_three, way->dev_linear_table_three, \
counter, i, \
way->dev_input + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_output + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//
hipMemcpyAsync(output + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_output + i * (PARTICLE_SIZE / STREAM_SIZE), \
PARTICLE_SIZE / STREAM_SIZE, \
hipMemcpyDeviceToHost, way->stream[i]);
}
/*
for (int i = 0; i < STREAM_SIZE; i++)
{
//
kernal_linear_to_matrix << < grid, block, 0, way->stream[i] >> > (\
way->dev_matrix_table_zero, way->dev_linear_table_zero, \
way->dev_matrix_table_one, way->dev_linear_table_one, \
way->dev_matrix_table_two, way->dev_linear_table_two, \
way->dev_matrix_table_three, way->dev_linear_table_three, \
way->dev_output + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_input + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//
kernal_gfmult << < grid, block, 0, way->stream[i] >> > (\
(uint8_t(*)[256][16])(way->dev_gfmult_table), \
way->dev_input + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE));
}
*/
for (int i = 0; i < STREAM_SIZE; i++)
{
//
hipStreamSynchronize(way->stream[i]);
}
}
/*
**
** counter:
** input:
** output:
*/
void sm4_gcm_dec(device_memory *way, uint32_t counter, uint8_t input[PARTICLE_SIZE], uint8_t output[PARTICLE_SIZE])
{
dim3 grid(GRID_SIZE, 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
for (int i = 0; i < STREAM_SIZE; i++)
{
//
hipMemcpyAsync(\
way->dev_input + i * (PARTICLE_SIZE / STREAM_SIZE), \
input + i * (PARTICLE_SIZE / STREAM_SIZE), \
PARTICLE_SIZE / STREAM_SIZE, \
hipMemcpyHostToDevice, way->stream[i]);
}
//
for (int i = 0; i < STREAM_SIZE; i++)
{
kernal_linear_to_matrix << < grid, block, 0, way->stream[i] >> > (\
way->dev_matrix_table_zero, way->dev_linear_table_zero, \
way->dev_matrix_table_one, way->dev_linear_table_one, \
way->dev_matrix_table_two, way->dev_linear_table_two, \
way->dev_matrix_table_three, way->dev_linear_table_three, \
way->dev_input + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_output + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//
kernal_gfmult << < grid, block, 0, way->stream[i] >> > (\
(uint8_t(*)[256][16])(way->dev_gfmult_table), \
way->dev_output + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//
kernal_enc << < grid, block, 0, way->stream[i] >> > (way->dev_SboxTable, \
way->dev_matrix_table_zero, way->dev_linear_table_zero, \
way->dev_matrix_table_one, way->dev_linear_table_one, \
way->dev_matrix_table_two, way->dev_linear_table_two, \
way->dev_matrix_table_three, way->dev_linear_table_three, \
counter, i, \
way->dev_input + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_output + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//
hipMemcpyAsync(output + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_output + i * (PARTICLE_SIZE / STREAM_SIZE), \
PARTICLE_SIZE / STREAM_SIZE, \
hipMemcpyDeviceToHost, way->stream[i]);
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//
hipStreamSynchronize(way->stream[i]);
}
}
/*
**
** length:
** tag:tag
*/
void sm4_gcm_final(device_memory *way, uint64_t length, uint8_t tag[PARTICLE_SIZE])
{
uint8_t temp[16];
/* eor (len(A)||len(C)) */
uint64_t temp_len = (uint64_t)(16 * 8); // len(A) = (uint64_t)(add_len*8)
for (int i = 1; i <= 16 / 2; i++)
{
temp[16 / 2 - i] = (uint8_t)temp_len;
temp_len = temp_len >> 8;
}
length = length * 16;
temp_len = (uint64_t)(length * 8); // len(C) = (uint64_t)(length*8)
for (int i = 1; i <= 16 / 2; i++)
{
temp[16 - i] = (uint8_t)temp_len;
temp_len = temp_len >> 8;
}
//(len(A)||len(C))
hipMemcpyToSymbol(constant_lenAC, temp, 16);
dim3 grid(GRID_SIZE, 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
for (int i = 0; i < STREAM_SIZE; i++)
{
//GHASH
kernal_final << < grid, block, 0, way->stream[i] >> > ((uint8_t(*)[256][16])(way->dev_gfmult_table), \
way->dev_matrix_table_zero, way->dev_linear_table_zero, \
way->dev_matrix_table_one, way->dev_linear_table_one, \
way->dev_matrix_table_two, way->dev_linear_table_two, \
way->dev_matrix_table_three, way->dev_linear_table_three, \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//GHASH
kernal_matrix_to_linear << < grid, block, 0, way->stream[i] >> > (\
way->dev_matrix_table_zero, way->dev_linear_table_zero, \
way->dev_matrix_table_one, way->dev_linear_table_one, \
way->dev_matrix_table_two, way->dev_linear_table_two, \
way->dev_matrix_table_three, way->dev_linear_table_three, \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//tag
hipMemcpyAsync(tag + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE), \
PARTICLE_SIZE / STREAM_SIZE, \
hipMemcpyDeviceToHost, way->stream[i]);
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//
hipStreamSynchronize(way->stream[i]);
}
} | 838916850b8c63f757b259bbb285fd078bc97d38.cu | #include <string.h>
#include <stdio.h>
#include <time.h>
#include <stdint.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "sm4cuda.cuh"
//S盒参数
uint8_t SboxTable[256] = { \
0xd6,0x90,0xe9,0xfe,0xcc,0xe1,0x3d,0xb7,0x16,0xb6,0x14,0xc2,0x28,0xfb,0x2c,0x05, \
0x2b,0x67,0x9a,0x76,0x2a,0xbe,0x04,0xc3,0xaa,0x44,0x13,0x26,0x49,0x86,0x06,0x99, \
0x9c,0x42,0x50,0xf4,0x91,0xef,0x98,0x7a,0x33,0x54,0x0b,0x43,0xed,0xcf,0xac,0x62, \
0xe4,0xb3,0x1c,0xa9,0xc9,0x08,0xe8,0x95,0x80,0xdf,0x94,0xfa,0x75,0x8f,0x3f,0xa6, \
0x47,0x07,0xa7,0xfc,0xf3,0x73,0x17,0xba,0x83,0x59,0x3c,0x19,0xe6,0x85,0x4f,0xa8, \
0x68,0x6b,0x81,0xb2,0x71,0x64,0xda,0x8b,0xf8,0xeb,0x0f,0x4b,0x70,0x56,0x9d,0x35, \
0x1e,0x24,0x0e,0x5e,0x63,0x58,0xd1,0xa2,0x25,0x22,0x7c,0x3b,0x01,0x21,0x78,0x87, \
0xd4,0x00,0x46,0x57,0x9f,0xd3,0x27,0x52,0x4c,0x36,0x02,0xe7,0xa0,0xc4,0xc8,0x9e, \
0xea,0xbf,0x8a,0xd2,0x40,0xc7,0x38,0xb5,0xa3,0xf7,0xf2,0xce,0xf9,0x61,0x15,0xa1, \
0xe0,0xae,0x5d,0xa4,0x9b,0x34,0x1a,0x55,0xad,0x93,0x32,0x30,0xf5,0x8c,0xb1,0xe3, \
0x1d,0xf6,0xe2,0x2e,0x82,0x66,0xca,0x60,0xc0,0x29,0x23,0xab,0x0d,0x53,0x4e,0x6f, \
0xd5,0xdb,0x37,0x45,0xde,0xfd,0x8e,0x2f,0x03,0xff,0x6a,0x72,0x6d,0x6c,0x5b,0x51, \
0x8d,0x1b,0xaf,0x92,0xbb,0xdd,0xbc,0x7f,0x11,0xd9,0x5c,0x41,0x1f,0x10,0x5a,0xd8, \
0x0a,0xc1,0x31,0x88,0xa5,0xcd,0x7b,0xbd,0x2d,0x74,0xd0,0x12,0xb8,0xe5,0xb4,0xb0, \
0x89,0x69,0x97,0x4a,0x0c,0x96,0x77,0x7e,0x65,0xb9,0xf1,0x09,0xc5,0x6e,0xc6,0x84, \
0x18,0xf0,0x7d,0xec,0x3a,0xdc,0x4d,0x20,0x79,0xee,0x5f,0x3e,0xd7,0xcb,0x39,0x48, \
};
/* System parameter */
uint32_t FK[4] = { 0xa3b1bac6,0x56aa3350,0x677d9197,0xb27022dc };
/* fixed parameter */
uint32_t CK[32] = { \
0x00070e15,0x1c232a31,0x383f464d,0x545b6269, \
0x70777e85,0x8c939aa1,0xa8afb6bd,0xc4cbd2d9, \
0xe0e7eef5,0xfc030a11,0x181f262d,0x343b4249, \
0x50575e65,0x6c737a81,0x888f969d,0xa4abb2b9, \
0xc0c7ced5,0xdce3eaf1,0xf8ff060d,0x141b2229, \
0x30373e45,0x4c535a61,0x686f767d,0x848b9299, \
0xa0a7aeb5,0xbcc3cad1,0xd8dfe6ed,0xf4fb0209, \
0x10171e25,0x2c333a41,0x484f565d,0x646b7279, \
};
/*
行移位函数 C++版本
b:需要移动的数组指针
i:需要移动的位数
n:返回值,
*/
inline void GET_UINT_BE(uint32_t *n, uint8_t *b, uint32_t i)
{
(*n) = (((uint32_t)b[i]) << 24) | (((uint32_t)b[i + 1]) << 16) | (((uint32_t)b[i + 2]) << 8) | (uint32_t)b[i + 3];
}
/*
行移位函数 C++版本逆运算
b:需要移动的数组指针
i:需要移动的位数
n:输入值,
*/
inline void PUT_UINT_BE(uint32_t n, uint8_t *b, uint32_t i)
{
//取n的高四位
b[i + 0] = (uint8_t)(n >> 24);
//取n的次高四位
b[i + 1] = (uint8_t)(n >> 16);
//取n的次低四位
b[i + 2] = (uint8_t)(n >> 8);
//取n的低四位
b[i + 3] = (uint8_t)n;
}
/*
S盒替换
*/
inline uint8_t sm4Sbox(uint8_t inch)
{
return SboxTable[inch];
}
/*
循环左移函数,即将x循环左移n位
*/
inline uint32_t ROTL(uint32_t x, uint32_t n)
{
return (x << n) | (x >> (32 - n));
}
/*
互换a b的值
*/
inline void SWAP(uint32_t *a, uint32_t *b)
{
uint32_t c = *a;
*a = *b;
*b = c;
}
uint32_t sm4Lt(uint32_t ka)
{
uint8_t a[4];
PUT_UINT_BE(ka, a, 0);
//查表替换
a[0] = sm4Sbox(a[0]);
a[1] = sm4Sbox(a[1]);
a[2] = sm4Sbox(a[2]);
a[3] = sm4Sbox(a[3]);
//将查表后的数放到bb数组中去
uint32_t bb = 0;
GET_UINT_BE(&bb, a, 0);
//bb分别与其循环左移2位,10位,18位,24位数相异或, 得到的值返回
return bb ^ (ROTL(bb, 2)) ^ (ROTL(bb, 10)) ^ (ROTL(bb, 18)) ^ (ROTL(bb, 24));
}
uint32_t sm4F(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t rk)
{
return (x0^sm4Lt(x1^x2^x3^rk));
}
/*
密钥拓展函数
*/
uint32_t sm4CalciRK(uint32_t ka)
{
uint8_t a[4];
PUT_UINT_BE(ka, a, 0);
a[0] = sm4Sbox(a[0]);
a[1] = sm4Sbox(a[1]);
a[2] = sm4Sbox(a[2]);
a[3] = sm4Sbox(a[3]);
uint32_t bb = 0;
GET_UINT_BE(&bb, a, 0);
return bb ^ (ROTL(bb, 13)) ^ (ROTL(bb, 23));
}
/*
SK:值结果参数,用于填写扩展密钥
key:初始密钥(128bit)
*/
void sm4_setkey(uint32_t SK[32], uint8_t key[16])
{
uint32_t MK[4];
GET_UINT_BE(&MK[0], key, 0);
GET_UINT_BE(&MK[1], key, 4);
GET_UINT_BE(&MK[2], key, 8);
GET_UINT_BE(&MK[3], key, 12);
//初始轮密钥
uint32_t k[36];
k[0] = MK[0] ^ FK[0];
k[1] = MK[1] ^ FK[1];
k[2] = MK[2] ^ FK[2];
k[3] = MK[3] ^ FK[3];
for (int i = 0; i < 32; i++)
{
k[i + 4] = k[i] ^ (sm4CalciRK(k[i + 1] ^ k[i + 2] ^ k[i + 3] ^ CK[i]));
SK[i] = k[i + 4];
}
}
/*
SM4轮函数
*/
void sm4_one_round(uint32_t sk[32], uint8_t input[16], uint8_t output[16])
{
uint32_t ulbuf[36];
memset(ulbuf, 0, sizeof(ulbuf));
GET_UINT_BE(&ulbuf[0], input, 0);
GET_UINT_BE(&ulbuf[1], input, 4);
GET_UINT_BE(&ulbuf[2], input, 8);
GET_UINT_BE(&ulbuf[3], input, 12);
for (int i = 0; i < 32; i++)
{
ulbuf[i + 4] = sm4F(ulbuf[i], ulbuf[i + 1], ulbuf[i + 2], ulbuf[i + 3], sk[i]);
}
PUT_UINT_BE(ulbuf[35], output, 0);
PUT_UINT_BE(ulbuf[34], output, 4);
PUT_UINT_BE(ulbuf[33], output, 8);
PUT_UINT_BE(ulbuf[32], output, 12);
}
/*
加密模式密钥拓展
ctx:值结果参数,函数执行完毕后会填写加密密钥相关信息,
key: 加密密钥(长度128bit)
*/
void sm4_setkey_enc(sm4_context *ctx, uint8_t key[16])
{
ctx->mode = SM4_ENCRYPT;
sm4_setkey(ctx->sk, key);
}
/*
解密模式密钥拓展
ctx:值结果参数,函数执行完毕后会填写加密密钥相关信息,
key: 加密密钥(长度128bit)
*/
void sm4_setkey_dec(sm4_context *ctx, uint8_t key[16])
{
ctx->mode = SM4_DECRYPT;
sm4_setkey(ctx->sk, key);
for (int i = 0; i < 16; i++)
{
SWAP(&(ctx->sk[i]), &(ctx->sk[31 - i]));
}
}
/*
* SM4-ECB block encryption/decryption
*
* SM4-ECB模式加解密函数
* ctx:值结果参数,子密钥参数指针
* mode:加解密模式,SM4不区分加解密模式,密文进则明文出,明文进则密文出
* input:数据输入(16字节)
* output:数据输出(16字节)
*/
void sm4_crypt_ecb(sm4_context *ctx, int length, uint8_t *input, uint8_t *output)
{
while (length > 0)
{
sm4_one_round(ctx->sk, input, output);
input += 16;
output += 16;
length -= 16;
}
}
/*
为避免存储体访存冲突时,需要对数据块进行存储模式转换,
转换需要四次内存事务访问请求,故需要四个查找表
下面是四轮转换采用的查找表
*/
//转换查找表0
uint32_t matrix_table_zero[32] = {
0 * 4 + 0 * 128, 1 * 4 + 0 * 128, 2 * 4 + 0 * 128, 3 * 4 + 0 * 128, \
4 * 4 + 0 * 128, 5 * 4 + 0 * 128, 6 * 4 + 0 * 128, 7 * 4 + 0 * 128, \
8 * 4 + 1 * 128, 9 * 4 + 1 * 128, 10 * 4 + 1 * 128, 11 * 4 + 1 * 128, \
12 * 4 + 1 * 128, 13 * 4 + 1 * 128, 14 * 4 + 1 * 128, 15 * 4 + 1 * 128, \
16 * 4 + 2 * 128, 17 * 4 + 2 * 128, 18 * 4 + 2 * 128, 19 * 4 + 2 * 128, \
20 * 4 + 2 * 128, 21 * 4 + 2 * 128, 22 * 4 + 2 * 128, 23 * 4 + 2 * 128, \
24 * 4 + 3 * 128, 25 * 4 + 3 * 128, 26 * 4 + 3 * 128, 27 * 4 + 3 * 128, \
28 * 4 + 3 * 128, 29 * 4 + 3 * 128, 30 * 4 + 3 * 128, 31 * 4 + 3 * 128, \
};
uint32_t linear_table_zero[32] = {
0 * 4 + 0 * 128, 4 * 4 + 0 * 128, 8 * 4 + 0 * 128, 12 * 4 + 0 * 128, \
16 * 4 + 0 * 128, 20 * 4 + 0 * 128, 24 * 4 + 0 * 128, 28 * 4 + 0 * 128,\
1 * 4 + 1 * 128, 5 * 4 + 1 * 128, 9 * 4 + 1 * 128, 13 * 4 + 1 * 128, \
17 * 4 + 1 * 128, 21 * 4 + 1 * 128, 25 * 4 + 1 * 128, 29 * 4 + 1 * 128, \
2 * 4 + 2 * 128, 6 * 4 + 2 * 128, 10 * 4 + 2 * 128, 14 * 4 + 2 * 128, \
18 * 4 + 2 * 128, 22 * 4 + 2 * 128, 26 * 4 + 2 * 128, 30 * 4 + 2 * 128, \
3 * 4 + 3 * 128, 7 * 4 + 3 * 128, 11 * 4 + 3 * 128, 15 * 4 + 3 * 128, \
19 * 4 + 3 * 128, 23 * 4 + 3 * 128, 27 * 4 + 3 * 128, 31 * 4 + 3 * 128, \
};
//转换查找表1
uint32_t matrix_table_one[32] = {
0 * 4 + 1 * 128, 1 * 4 + 1 * 128, 2 * 4 + 1 * 128, 3 * 4 + 1 * 128, \
4 * 4 + 1 * 128, 5 * 4 + 1 * 128, 6 * 4 + 1 * 128, 7 * 4 + 1 * 128, \
8 * 4 + 2 * 128, 9 * 4 + 2 * 128, 10 * 4 + 2 * 128, 11 * 4 + 2 * 128, \
12 * 4 + 2 * 128, 13 * 4 + 2 * 128, 14 * 4 + 2 * 128, 15 * 4 + 2 * 128, \
16 * 4 + 3 * 128, 17 * 4 + 3 * 128, 18 * 4 + 3 * 128, 19 * 4 + 3 * 128, \
20 * 4 + 3 * 128, 21 * 4 + 3 * 128, 22 * 4 + 3 * 128, 23 * 4 + 3 * 128, \
24 * 4 + 0 * 128, 25 * 4 + 0 * 128, 26 * 4 + 0 * 128, 27 * 4 + 0 * 128, \
28 * 4 + 0 * 128, 29 * 4 + 0 * 128, 30 * 4 + 0 * 128, 31 * 4 + 0 * 128, \
};
uint32_t linear_table_one[32] = {
1 * 4 + 0 * 128, 5 * 4 + 0 * 128, 9 * 4 + 0 * 128, 13 * 4 + 0 * 128, \
17 * 4 + 0 * 128, 21 * 4 + 0 * 128, 25 * 4 + 0 * 128, 29 * 4 + 0 * 128,\
2 * 4 + 1 * 128, 6 * 4 + 1 * 128, 10 * 4 + 1 * 128, 14 * 4 + 1 * 128, \
18 * 4 + 1 * 128, 22 * 4 + 1 * 128, 26 * 4 + 1 * 128, 30 * 4 + 1 * 128, \
3 * 4 + 2 * 128, 7 * 4 + 2 * 128, 11 * 4 + 2 * 128, 15 * 4 + 2 * 128, \
19 * 4 + 2 * 128, 23 * 4 + 2 * 128, 27 * 4 + 2 * 128, 31 * 4 + 2 * 128, \
0 * 4 + 3 * 128, 4 * 4 + 3 * 128, 8 * 4 + 3 * 128, 12 * 4 + 3 * 128, \
16 * 4 + 3 * 128, 20 * 4 + 3 * 128, 24 * 4 + 3 * 128, 28 * 4 + 3 * 128, \
};
//转换查找表2
uint32_t matrix_table_two[32] = {
0 * 4 + 2 * 128, 1 * 4 + 2 * 128, 2 * 4 + 2 * 128, 3 * 4 + 2 * 128, \
4 * 4 + 2 * 128, 5 * 4 + 2 * 128, 6 * 4 + 2 * 128, 7 * 4 + 2 * 128, \
8 * 4 + 3 * 128, 9 * 4 + 3 * 128, 10 * 4 + 3 * 128, 11 * 4 + 3 * 128, \
12 * 4 + 3 * 128, 13 * 4 + 3 * 128, 14 * 4 + 3 * 128, 15 * 4 + 3 * 128, \
16 * 4 + 0 * 128, 17 * 4 + 0 * 128, 18 * 4 + 0 * 128, 19 * 4 + 0 * 128, \
20 * 4 + 0 * 128, 21 * 4 + 0 * 128, 22 * 4 + 0 * 128, 23 * 4 + 0 * 128, \
24 * 4 + 1 * 128, 25 * 4 + 1 * 128, 26 * 4 + 1 * 128, 27 * 4 + 1 * 128, \
28 * 4 + 1 * 128, 29 * 4 + 1 * 128, 30 * 4 + 1 * 128, 31 * 4 + 1 * 128, \
};
uint32_t linear_table_two[32] = {
2 * 4 + 0 * 128, 6 * 4 + 0 * 128, 10 * 4 + 0 * 128, 14 * 4 + 0 * 128, \
18 * 4 + 0 * 128, 22 * 4 + 0 * 128, 26 * 4 + 0 * 128, 30 * 4 + 0 * 128,\
3 * 4 + 1 * 128, 7 * 4 + 1 * 128, 11 * 4 + 1 * 128, 15 * 4 + 1 * 128, \
19 * 4 + 1 * 128, 23 * 4 + 1 * 128, 27 * 4 + 1 * 128, 31 * 4 + 1 * 128, \
0 * 4 + 2 * 128, 4 * 4 + 2 * 128, 8 * 4 + 2 * 128, 12 * 4 + 2 * 128, \
16 * 4 + 2 * 128, 20 * 4 + 2 * 128, 24 * 4 + 2 * 128, 28 * 4 + 2 * 128, \
1 * 4 + 3 * 128, 5 * 4 + 3 * 128, 9 * 4 + 3 * 128, 13 * 4 + 3 * 128, \
17 * 4 + 3 * 128, 21 * 4 + 3 * 128, 25 * 4 + 3 * 128, 29 * 4 + 3 * 128, \
};
//转换查找表3
uint32_t matrix_table_three[32] = {
0 * 4 + 3 * 128, 1 * 4 + 3 * 128, 2 * 4 + 3 * 128, 3 * 4 + 3 * 128, \
4 * 4 + 3 * 128, 5 * 4 + 3 * 128, 6 * 4 + 3 * 128, 7 * 4 + 3 * 128, \
8 * 4 + 0 * 128, 9 * 4 + 0 * 128, 10 * 4 + 0 * 128, 11 * 4 + 0 * 128, \
12 * 4 + 0 * 128, 13 * 4 + 0 * 128, 14 * 4 + 0 * 128, 15 * 4 + 0 * 128, \
16 * 4 + 1 * 128, 17 * 4 + 1 * 128, 18 * 4 + 1 * 128, 19 * 4 + 1 * 128, \
20 * 4 + 1 * 128, 21 * 4 + 1 * 128, 22 * 4 + 1 * 128, 23 * 4 + 1 * 128, \
24 * 4 + 2 * 128, 25 * 4 + 2 * 128, 26 * 4 + 2 * 128, 27 * 4 + 2 * 128, \
28 * 4 + 2 * 128, 29 * 4 + 2 * 128, 30 * 4 + 2 * 128, 31 * 4 + 2 * 128, \
};
uint32_t linear_table_three[32] = {
3 * 4 + 0 * 128, 7 * 4 + 0 * 128, 11 * 4 + 0 * 128, 15 * 4 + 0 * 128, \
19 * 4 + 0 * 128, 23 * 4 + 0 * 128, 27 * 4 + 0 * 128, 31 * 4 + 0 * 128,\
0 * 4 + 1 * 128, 4 * 4 + 1 * 128, 8 * 4 + 1 * 128, 12 * 4 + 1 * 128, \
16 * 4 + 1 * 128, 20 * 4 + 1 * 128, 24 * 4 + 1 * 128, 28 * 4 + 1 * 128, \
1 * 4 + 2 * 128, 5 * 4 + 2 * 128, 9 * 4 + 2 * 128, 13 * 4 + 2 * 128, \
17 * 4 + 2 * 128, 21 * 4 + 2 * 128, 25 * 4 + 2 * 128, 29 * 4 + 2 * 128, \
2 * 4 + 3 * 128, 6 * 4 + 3 * 128, 10 * 4 + 3 * 128, 14 * 4 + 3 * 128, \
18 * 4 + 3 * 128, 22 * 4 + 3 * 128, 26 * 4 + 3 * 128, 30 * 4 + 3 * 128, \
};
//每个线程块共享IV, SK, ency0, lenAC
__constant__ uint8_t constant_iv[12];
__constant__ uint32_t constant_sk[32];
__constant__ uint8_t constant_ency0[16];
__constant__ uint8_t constant_lenAC[16];
void otherT(uint8_t T[16][256][16])
{
int i = 0, j = 0, k = 0;
uint64_t vh, vl;
uint64_t zh, zl;
for (i = 0; i < 256; i++)
{
vh = ((uint64_t)T[0][i][0] << 56) ^ ((uint64_t)T[0][i][1] << 48) ^ \
((uint64_t)T[0][i][2] << 40) ^ ((uint64_t)T[0][i][3] << 32) ^ \
((uint64_t)T[0][i][4] << 24) ^ ((uint64_t)T[0][i][5] << 16) ^ \
((uint64_t)T[0][i][6] << 8) ^ ((uint64_t)T[0][i][7]);
vl = ((uint64_t)T[0][i][8] << 56) ^ ((uint64_t)T[0][i][9] << 48) ^ \
((uint64_t)T[0][i][10] << 40) ^ ((uint64_t)T[0][i][11] << 32) ^ \
((uint64_t)T[0][i][12] << 24) ^ ((uint64_t)T[0][i][13] << 16) ^ \
((uint64_t)T[0][i][14] << 8) ^ ((uint64_t)T[0][i][15]);
zh = zl = 0;
for (j = 0; j <= 120; j++)
{
if ((j > 0) && (0 == j % 8))
{
zh ^= vh;
zl ^= vl;
for (k = 1; k <= 16 / 2; k++)
{
T[j / 8][i][16 / 2 - k] = (uint8_t)zh;
zh = zh >> 8;
T[j / 8][i][16 - k] = (uint8_t)zl;
zl = zl >> 8;
}
zh = zl = 0;
}
if (vl & 0x1)
{
vl = vl >> 1;
if (vh & 0x1) { vl ^= 0x8000000000000000; }
vh = vh >> 1;
vh ^= 0xe100000000000000;
}
else
{
vl = vl >> 1;
if (vh & 0x1) { vl ^= 0x8000000000000000; }
vh = vh >> 1;
}
}
}
}
//生成GF乘法表
void computeTable(uint8_t T[16][256][16], uint8_t H[16])
{
// zh is the higher 64-bit, zl is the lower 64-bit
uint64_t zh = 0, zl = 0;
// vh is the higher 64-bit, vl is the lower 64-bit
uint64_t vh = ((uint64_t)H[0] << 56) ^ ((uint64_t)H[1] << 48) ^ \
((uint64_t)H[2] << 40) ^ ((uint64_t)H[3] << 32) ^ \
((uint64_t)H[4] << 24) ^ ((uint64_t)H[5] << 16) ^ \
((uint64_t)H[6] << 8) ^ ((uint64_t)H[7]);
uint64_t vl = ((uint64_t)H[8] << 56) ^ ((uint64_t)H[9] << 48) ^ \
((uint64_t)H[10] << 40) ^ ((uint64_t)H[11] << 32) ^ \
((uint64_t)H[12] << 24) ^ ((uint64_t)H[13] << 16) ^ \
((uint64_t)H[14] << 8) ^ ((uint64_t)H[15]);
uint8_t temph;
uint64_t tempvh = vh;
uint64_t tempvl = vl;
int i = 0, j = 0;
for (i = 0; i < 256; i++)
{
temph = (uint8_t)i;
vh = tempvh;
vl = tempvl;
zh = zl = 0;
for (j = 0; j < 8; j++)
{
if (0x80 & temph)
{
zh ^= vh;
zl ^= vl;
}
if (vl & 0x1)
{
vl = vl >> 1;
if (vh & 0x1) { vl ^= 0x8000000000000000; }
vh = vh >> 1;
vh ^= 0xe100000000000000;
}
else
{
vl = vl >> 1;
if (vh & 0x1) { vl ^= 0x8000000000000000; }
vh = vh >> 1;
}
temph = temph << 1;
}
// get result
for (j = 1; j <= 16 / 2; j++)
{
T[0][i][16 / 2 - j] = (uint8_t)zh;
zh = zh >> 8;
T[0][i][16 - j] = (uint8_t)zl;
zl = zl >> 8;
}
}
otherT(T);
}
/**
* return the value of (output.H) by looking up tables
*/
void multi(uint8_t T[16][256][16], uint8_t *output)
{
uint8_t i, j;
uint8_t temp[16];
for (i = 0; i < 16; i++)
{
temp[i] = output[i];
output[i] = 0;
}
for (i = 0; i < 16; i++)
{
for (j = 0; j < 16; j++)
{
output[j] ^= T[i][*(temp + i)][j];
}
}
}
/*
* a: additional authenticated data
* c: the cipher text or initial vector
*/
void ghash(uint8_t T[16][256][16], uint8_t *add, size_t add_len, uint8_t *cipher, size_t length, uint8_t *output)
{
/* x0 = 0 */
*(uint64_t *)output = 0;
*((uint64_t *)output + 1) = 0;
/* compute with add */
int i = 0;
for (i = 0; i < add_len / 16; i++)
{
*(uint64_t *)output ^= *(uint64_t *)add;
*((uint64_t *)output + 1) ^= *((uint64_t *)add + 1);
add += 16;
multi(T, output);
}
if (add_len % 16)
{
// the remaining add
for (i = 0; i < add_len % 16; i++)
{
*(output + i) ^= *(add + i);
}
multi(T, output);
}
/* compute with cipher text */
for (i = 0; i < length / 16; i++)
{
*(uint64_t *)output ^= *(uint64_t *)cipher;
*((uint64_t *)output + 1) ^= *((uint64_t *)cipher + 1);
cipher += 16;
multi(T, output);
}
if (length % 16)
{
// the remaining cipher
for (i = 0; i < length % 16; i++)
{
*(output + i) ^= *(cipher + i);
}
multi(T, output);
}
/* eor (len(A)||len(C)) */
uint64_t temp_len = (uint64_t)(add_len * 8); // len(A) = (uint64_t)(add_len*8)
for (i = 1; i <= 16 / 2; i++)
{
output[16 / 2 - i] ^= (uint8_t)temp_len;
temp_len = temp_len >> 8;
}
temp_len = (uint64_t)(length * 8); // len(C) = (uint64_t)(length*8)
for (i = 1; i <= 16 / 2; i++)
{
output[16 - i] ^= (uint8_t)temp_len;
temp_len = temp_len >> 8;
}
multi(T, output);
}
/*
** 本核函数将数据由线性存储模式转化为矩形存储模式
** dev_linear:线性存储模式数据块,即数据块以线性形式存储在全局内存中
** dev_matrix:矩形存储模式数据块,即数据块以矩形形式存储在全局内存中
*/
__global__ void kernal_linear_to_matrix(\
uint32_t dev_matrix_table_zero[32], uint32_t dev_linear_table_zero[32], \
uint32_t dev_matrix_table_one[32], uint32_t dev_linear_table_one[32], \
uint32_t dev_matrix_table_two[32], uint32_t dev_linear_table_two[32], \
uint32_t dev_matrix_table_three[32], uint32_t dev_linear_table_three[32], \
uint8_t dev_linear[PARTICLE_SIZE / STREAM_SIZE], \
uint8_t dev_matrix[PARTICLE_SIZE / STREAM_SIZE])
{
__shared__ uint8_t smem[16 * BLOCK_SIZE * 2];
uint8_t *matrix = smem;
uint8_t *linear = smem + 16 * BLOCK_SIZE;
uint32_t dev_offset = blockIdx.x * blockDim.x * 16 + threadIdx.x * 4;
uint32_t share_offset = threadIdx.x * 4;
//以对齐合并访存的方式将数据从全局内存缓存到共享内存
{
uint32_t *read = (uint32_t *)(dev_linear + dev_offset);
uint32_t *write = (uint32_t *)(linear + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = *(read + i * BLOCK_SIZE);
}
}
//同步点
__syncthreads();
//查表转换
{
uint32_t warpaddr = (threadIdx.x / 32) * (32 * 16);
uint32_t inertid = threadIdx.x % 32;
uint32_t *read;
uint32_t *write;
//第0轮转换
write = (uint32_t *)(matrix + warpaddr + dev_matrix_table_zero[inertid]);
read = (uint32_t *)(linear + warpaddr + dev_linear_table_zero[inertid]);
*write = *read;
//第1轮转换
write = (uint32_t *)(matrix + warpaddr + dev_matrix_table_one[inertid]);
read = (uint32_t *)(linear + warpaddr + dev_linear_table_one[inertid]);
*write = *read;
//第2轮转换
write = (uint32_t *)(matrix + warpaddr + dev_matrix_table_two[inertid]);
read = (uint32_t *)(linear + warpaddr + dev_linear_table_two[inertid]);
*write = *read;
//第3轮转换
write = (uint32_t *)(matrix + warpaddr + dev_matrix_table_three[inertid]);
read = (uint32_t *)(linear + warpaddr + dev_linear_table_three[inertid]);
*write = *read;
}
//同步点
__syncthreads();
//以对齐合并访存的方式将数据从共享内存写回全局内存
{
uint32_t *write = (uint32_t *)(dev_matrix + dev_offset);
uint32_t *read = (uint32_t *)(matrix + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = *(read + i * BLOCK_SIZE);
}
}
}
/*
** 本核函数将数据由矩形存储模式转化为线性存储模式
** dev_matrix:矩形存储模式数据块,即数据块以矩形形式存储在全局内存中
** dev_linear:线性存储模式数据块,即数据块以线性形式存储在全局内存中
*/
__global__ void kernal_matrix_to_linear(\
uint32_t dev_matrix_table_zero[32], uint32_t dev_linear_table_zero[32], \
uint32_t dev_matrix_table_one[32], uint32_t dev_linear_table_one[32], \
uint32_t dev_matrix_table_two[32], uint32_t dev_linear_table_two[32], \
uint32_t dev_matrix_table_three[32], uint32_t dev_linear_table_three[32], \
uint8_t dev_matrix[PARTICLE_SIZE / STREAM_SIZE], \
uint8_t dev_linear[PARTICLE_SIZE / STREAM_SIZE])
{
__shared__ uint8_t smem[16 * BLOCK_SIZE * 2];
uint8_t *matrix = smem;
uint8_t *linear = smem + 16 * BLOCK_SIZE;
uint32_t dev_offset = blockIdx.x * blockDim.x * 16 + threadIdx.x * 4;
uint32_t share_offset = threadIdx.x * 4;
//以对齐合并访存的方式将数据从全局内存缓存到共享内存
{
uint32_t *read = (uint32_t *)(dev_matrix + dev_offset);
uint32_t *write = (uint32_t *)(matrix + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = *(read + i * BLOCK_SIZE);
}
}
//同步点
__syncthreads();
//查表转换
{
uint32_t warpaddr = (threadIdx.x / 32) * (32 * 16);
uint32_t inertid = threadIdx.x % 32;
uint32_t *read;
uint32_t *write;
//第0轮转换
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_zero[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_zero[inertid]);
*write = *read;
//第1轮转换
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_one[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_one[inertid]);
*write = *read;
//第2轮转换
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_two[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_two[inertid]);
*write = *read;
//第3轮转换
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_three[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_three[inertid]);
*write = *read;
}
//同步点
__syncthreads();
//以对齐合并访存的方式将数据从共享内存写回全局内存
{
uint32_t *write = (uint32_t *)(dev_linear + dev_offset);
uint32_t *read = (uint32_t *)(linear + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = *(read + i * BLOCK_SIZE);
}
}
}
/*
** 加密算法核函数,完成SM4-CTR模式加密,每个线程加密一个序号,之后与明文数据块异或,生成密文
** dev_SboxTable:S盒
** counter:数据块序号
** streamid:流ID
** dev_input:明文数据输入
** dev_output:密文数据输出
*/
__global__ void kernal_enc(uint8_t *const __restrict__ dev_SboxTable, \
uint32_t dev_matrix_table_zero[32], uint32_t dev_linear_table_zero[32], \
uint32_t dev_matrix_table_one[32], uint32_t dev_linear_table_one[32], \
uint32_t dev_matrix_table_two[32], uint32_t dev_linear_table_two[32], \
uint32_t dev_matrix_table_three[32], uint32_t dev_linear_table_three[32], \
uint32_t counter, uint32_t streamid, \
uint8_t dev_input[PARTICLE_SIZE / STREAM_SIZE], \
uint8_t dev_output[PARTICLE_SIZE / STREAM_SIZE])
{
__shared__ uint8_t smem[16 * BLOCK_SIZE * 2];
uint8_t *matrix = smem;
uint8_t *linear = smem + 16 * BLOCK_SIZE;
uint8_t *rw_matrix = matrix + (threadIdx.x / 32) * (16 * 32) + (threadIdx.x % 32) * 4;
uint32_t dev_offset = blockIdx.x * blockDim.x * 16 + threadIdx.x * 4;
uint32_t share_offset = threadIdx.x * 4;
{
uint32_t ulbuf[5];
{
//各个线程读取iv
uint8_t tidCTR[16];
*(uint32_t *)(tidCTR + 0) = *(uint32_t *)(constant_iv + 0);
*(uint32_t *)(tidCTR + 4) = *(uint32_t *)(constant_iv + 4);
*(uint32_t *)(tidCTR + 8) = *(uint32_t *)(constant_iv + 8);
*(uint32_t *)(tidCTR + 12) = counter + (uint32_t)(threadIdx.x + blockIdx.x * blockDim.x + streamid * (PARTICLE_SIZE / STREAM_SIZE / 16));
//*(uint32_t *)(tidCTR + 12) = counter;
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
ulbuf[i] = (((uint32_t)tidCTR[i * 4]) << 24) | \
(((uint32_t)tidCTR[i * 4 + 1]) << 16) | \
(((uint32_t)tidCTR[i * 4 + 2]) << 8) | \
(uint32_t)tidCTR[i * 4 + 3];
}
}
//32轮迭代运算
{
uint32_t temp;
uint8_t a[4];
uint32_t bb;
#pragma unroll 32
for (int i = 0; i < 32; i++)
{
temp = ulbuf[(i + 1) % 5] ^ ulbuf[(i + 2) % 5] ^ ulbuf[(i + 3) % 5] ^ constant_sk[i];
a[0] = (uint8_t)(temp >> 24);
a[1] = (uint8_t)(temp >> 16);
a[2] = (uint8_t)(temp >> 8);
a[3] = (uint8_t)temp;
a[0] = dev_SboxTable[a[0]];
a[1] = dev_SboxTable[a[1]];
a[2] = dev_SboxTable[a[2]];
a[3] = dev_SboxTable[a[3]];
bb = (((uint32_t)a[0]) << 24) | (((uint32_t)a[1]) << 16) | (((uint32_t)a[2]) << 8) | (uint32_t)a[3];
bb = bb ^ ((bb << 2) | (bb >> 30)) ^ ((bb << 10) | (bb >> 22)) ^ ((bb << 18) | (bb >> 14)) ^ ((bb << 24) | (bb >> 8));
ulbuf[(i + 4) % 5] = ulbuf[(i + 0) % 5] ^ bb;
}
}
{
//填写本线程密文输出起始地址(矩形存储模式),密文存放在共享内存
uint8_t temp[4];
uint8_t *write = rw_matrix;
temp[0] = (uint8_t)(ulbuf[0] >> 24);
temp[1] = (uint8_t)(ulbuf[0] >> 16);
temp[2] = (uint8_t)(ulbuf[0] >> 8);
temp[3] = (uint8_t)ulbuf[0];
*(uint32_t *)(rw_matrix + 0 * 128) = *(uint32_t *)temp;
temp[0] = (uint8_t)(ulbuf[4] >> 24);
temp[1] = (uint8_t)(ulbuf[4] >> 16);
temp[2] = (uint8_t)(ulbuf[4] >> 8);
temp[3] = (uint8_t)ulbuf[4];
*(uint32_t *)(rw_matrix + 1 * 128) = *(uint32_t *)temp;
temp[0] = (uint8_t)(ulbuf[3] >> 24);
temp[1] = (uint8_t)(ulbuf[3] >> 16);
temp[2] = (uint8_t)(ulbuf[3] >> 8);
temp[3] = (uint8_t)ulbuf[3];
*(uint32_t *)(rw_matrix + 2 * 128) = *(uint32_t *)temp;
temp[0] = (uint8_t)(ulbuf[2] >> 24);
temp[1] = (uint8_t)(ulbuf[2] >> 16);
temp[2] = (uint8_t)(ulbuf[2] >> 8);
temp[3] = (uint8_t)ulbuf[2];
*(uint32_t *)(rw_matrix + 3 * 128) = *(uint32_t *)temp;
}
}
//同步点
__syncthreads();
//将共享内存中矩形存储模式的数据转换成线性存储模式
{
uint32_t warpaddr = (threadIdx.x / 32) * (32 * 16);
uint32_t inertid = threadIdx.x % 32;
uint32_t *read;
uint32_t *write;
//第0轮转换
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_zero[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_zero[inertid]);
*write = *read;
//第1轮转换
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_one[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_one[inertid]);
*write = *read;
//第2轮转换
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_two[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_two[inertid]);
*write = *read;
//第3轮转换
read = (uint32_t *)(matrix + warpaddr + dev_matrix_table_three[inertid]);
write = (uint32_t *)(linear + warpaddr + dev_linear_table_three[inertid]);
*write = *read;
}
//同步点
__syncthreads();
//以对齐合并访存的方式读取明文,将加密后的序号与明文异或后生成密文,生成密文后再以对齐合并访存的方式写回全局内存
{
uint32_t *read = (uint32_t *)(dev_input + dev_offset);
uint32_t *write = (uint32_t *)(dev_output + dev_offset);
uint32_t *cipher = (uint32_t *)(linear + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = (*(read + i * BLOCK_SIZE)) ^ (*(cipher + i * BLOCK_SIZE));
}
}
}
/*
** 有限域乘法加法运算核函数
** dev_gfmult_table:有限域乘法表
** dev_cipher:密文输入(矩形存储模式)
** dev_gfmult:有限域乘法结果(矩形存储模式)
*/
__global__ void kernal_gfmult(\
uint8_t dev_gfmult_table[16][256][16], \
uint8_t dev_cipher[PARTICLE_SIZE / STREAM_SIZE], \
uint8_t dev_gfmult[PARTICLE_SIZE / STREAM_SIZE])
{
__shared__ uint8_t smem[16 * BLOCK_SIZE];
uint8_t *matrix = smem;
uint32_t dev_offset = blockIdx.x * blockDim.x * 16 + threadIdx.x * 4;
uint32_t share_offset = threadIdx.x * 4;
//以对齐合并访存的方式从全局内存读取密文与上一次有限域乘法结果,将二者异或后的结果写到共享内存
//此时共享内存中的数据块以矩形存储模式存储。
{
uint32_t *read_cipher = (uint32_t *)(dev_cipher + dev_offset);
uint32_t *read_gfmult = (uint32_t *)(dev_gfmult + dev_offset);
uint32_t *write = (uint32_t *)(matrix + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = (*(read_cipher + i * BLOCK_SIZE)) ^ (*(read_gfmult + i * BLOCK_SIZE));
}
}
//同步点
__syncthreads();
//有限域乘法
{
uint8_t *tid_cipher = matrix + (threadIdx.x / 32) * (16 * 32) + (threadIdx.x % 32) * 4;
uint8_t temp;
uint8_t *read;
//暂存GF乘法结果
uint8_t tid_gfmult[16];
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(uint32_t *)(tid_gfmult + i * 4) = 0;
}
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
read = tid_cipher + i * (32 * 4);
#pragma unroll 4
for (int j = 0; j < 4; j++)
{
temp = read[j];
#pragma unroll 16
for (int k = 0; k < 16; k++)
{
tid_gfmult[k] ^= dev_gfmult_table[i * 4 + j][temp][k];
}
}
}
//将本数据块的有限域乘法的结果写回共享内存
{
uint32_t *write = (uint32_t *)(matrix + (threadIdx.x / 32) * (16 * 32) + (threadIdx.x % 32) * 4);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * 32) = *(uint32_t *)(tid_gfmult + i * 4);
}
}
}
//同步点
__syncthreads();
//以对齐合并访存的方式将共享内存中的乘法结果写回全局内存,此时数据块以矩形存储模式存放在全局内存中。
{
uint32_t *write = (uint32_t *)(dev_gfmult + dev_offset);
uint32_t *read = (uint32_t *)(matrix + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = *(read + i * BLOCK_SIZE);
}
}
}
/*
** 本核函数完成计算每个线程最终的GHASH结果
** dev_gfmult_table;有限域乘法表
** dev_gfmult:有限域乘法结果
*/
__global__ void kernal_final(\
uint8_t dev_gfmult_table[16][256][16], \
uint32_t dev_matrix_table_zero[32], uint32_t dev_linear_table_zero[32], \
uint32_t dev_matrix_table_one[32], uint32_t dev_linear_table_one[32], \
uint32_t dev_matrix_table_two[32], uint32_t dev_linear_table_two[32], \
uint32_t dev_matrix_table_three[32], uint32_t dev_linear_table_three[32], \
uint8_t dev_gfmult[PARTICLE_SIZE / STREAM_SIZE])
{
__shared__ uint8_t smem[16 * BLOCK_SIZE];
uint8_t *matrix = smem;
uint32_t dev_offset = blockIdx.x * blockDim.x * 16 + threadIdx.x * 4;
uint32_t share_offset = threadIdx.x * 4;
//以对齐合并访存方式读取前一组GF乘法结果到共享内存
{
uint32_t *read = (uint32_t *)(dev_gfmult + dev_offset);
uint32_t *write = (uint32_t *)(matrix + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = *(read + i * BLOCK_SIZE);
}
}
//同步点
__syncthreads();
{
uint8_t *tid_cipher = matrix + (threadIdx.x / 32) * (16 * 32) + (threadIdx.x % 32) * 4;
uint8_t temp;
uint8_t *read;
//暂存GF乘法中间结果
uint8_t tid_gfmult[16];
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(uint32_t *)(tid_gfmult + i * 4) = 0;
}
//查表法进行有限域乘法
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
read = tid_cipher + i * (32 * 4);
#pragma unroll 4
for (int j = 0; j < 4; j++)
{
temp = read[j] ^ constant_lenAC[i * 4 + j];
#pragma unroll 16
for (int k = 0; k < 16; k++)
{
tid_gfmult[k] ^= dev_gfmult_table[i * 4 + j][temp][k];
}
}
}
//每个线程与ency0异或生成最终的tag
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(uint32_t *)(tid_gfmult + i * 4) ^= *(uint32_t *)(constant_ency0 + i * 4);
}
//以对齐合并访存的方式将本轮GF乘法结果写回共享内存,此时数据块以矩形存储模式存放在共享内存中
{
uint32_t *write = (uint32_t *)(matrix + (threadIdx.x / 32) * (16 * 32) + (threadIdx.x % 32) * 4);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * 32) = *(uint32_t *)(tid_gfmult + i * 4);
}
}
}
//同步点
__syncthreads();
//将共享内存中的乘法结果对齐合并访存的方式写回全局内存
{
uint32_t *write = (uint32_t *)(dev_gfmult + dev_offset);
uint32_t *read = (uint32_t *)(matrix + share_offset);
#pragma unroll 4
for (int i = 0; i < 4; i++)
{
*(write + i * BLOCK_SIZE) = *(read + i * BLOCK_SIZE);
}
}
}
void Init_device_memory(device_memory *way, uint8_t add[16], uint8_t iv[12])
{
//创建流
for (int i = 0; i < STREAM_SIZE; i++)
{
cudaStreamCreate(&(way->stream[i]));
}
//初始化存储模式转换查找表内存空间
cudaHostAlloc((void**)&(way->dev_matrix_table_zero), 32 * sizeof(uint32_t), cudaHostAllocDefault);
cudaMemcpy(way->dev_matrix_table_zero, matrix_table_zero, 32 * sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaHostAlloc((void**)&(way->dev_linear_table_zero), 32 * sizeof(uint32_t), cudaHostAllocDefault);
cudaMemcpy(way->dev_linear_table_zero, linear_table_zero, 32 * sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaHostAlloc((void**)&(way->dev_matrix_table_one), 32 * sizeof(uint32_t), cudaHostAllocDefault);
cudaMemcpy(way->dev_matrix_table_one, matrix_table_one, 32 * sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaHostAlloc((void**)&(way->dev_linear_table_one), 32 * sizeof(uint32_t), cudaHostAllocDefault);
cudaMemcpy(way->dev_linear_table_one, linear_table_one, 32 * sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaHostAlloc((void**)&(way->dev_matrix_table_two), 32 * sizeof(uint32_t), cudaHostAllocDefault);
cudaMemcpy(way->dev_matrix_table_two, matrix_table_two, 32 * sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaHostAlloc((void**)&(way->dev_linear_table_two), 32 * sizeof(uint32_t), cudaHostAllocDefault);
cudaMemcpy(way->dev_linear_table_two, linear_table_two, 32 * sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaHostAlloc((void**)&(way->dev_matrix_table_three), 32 * sizeof(uint32_t), cudaHostAllocDefault);
cudaMemcpy(way->dev_matrix_table_three, matrix_table_three, 32 * sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaHostAlloc((void**)&(way->dev_linear_table_three), 32 * sizeof(uint32_t), cudaHostAllocDefault);
cudaMemcpy(way->dev_linear_table_three, linear_table_three, 32 * sizeof(uint32_t), cudaMemcpyHostToDevice);
//将轮密钥拷贝到常量内存
cudaMemcpyToSymbol(constant_sk, way->ctx.sk, 32 * sizeof(uint32_t));
//初始化每个线程的IV
cudaMemcpyToSymbol(constant_iv, iv, 12);
//初始化S盒内存空间
cudaHostAlloc((void**)&(way->dev_SboxTable), 256, cudaHostAllocDefault);
cudaMemcpy(way->dev_SboxTable, SboxTable, 256, cudaMemcpyHostToDevice);
//分配数据输入空间
cudaHostAlloc((void**)&(way->dev_input), PARTICLE_SIZE, cudaHostAllocDefault);
//分配数据输出空间
cudaHostAlloc((void**)&(way->dev_output), PARTICLE_SIZE, cudaHostAllocDefault);
//加密全0密文块
uint8_t y0[16];
uint8_t ency0[16];
memset(y0, 0, 16);
//将ency0拷贝到常量内存
sm4_crypt_ecb(&way->ctx, 16, y0, ency0);
cudaMemcpyToSymbol(constant_ency0, ency0, 16);
uint8_t gfmult_table[16][256][16];
//计算有限域乘法查找表
computeTable(gfmult_table, ency0);
//将有限域乘法表拷贝到全局内存
cudaHostAlloc((void**)&(way->dev_gfmult_table), \
sizeof(gfmult_table), cudaHostAllocDefault);
cudaMemcpy(way->dev_gfmult_table, gfmult_table, \
sizeof(gfmult_table), cudaMemcpyHostToDevice);
//初始化有限域乘法表中间结果
uint8_t temp[16];
memset(temp, 0, 16);
for (int i = 0; i < 16; i++)
{
temp[i] ^= add[i];
}
multi(gfmult_table, temp);
uint8_t *gfmult_init = (uint8_t *)malloc(PARTICLE_SIZE);
for (int i = 0; i < PARTICLE_SIZE / 16; i++)
{
memcpy(gfmult_init + i * 16, temp, 16);
}
//初始化有限域乘法输出空间
cudaHostAlloc((void**)&(way->dev_gfmult), \
PARTICLE_SIZE, cudaHostAllocDefault);
{
dim3 grid(GRID_SIZE, 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
for (int i = 0; i < STREAM_SIZE; i++)
{
//将有限域乘法结果输出到全局内存
cudaMemcpyAsync(\
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE), \
gfmult_init + i * (PARTICLE_SIZE / STREAM_SIZE), \
PARTICLE_SIZE / STREAM_SIZE, \
cudaMemcpyHostToDevice, way->stream[i]);
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//将全局内存中的GF乘法结果由线性存储模式转换成矩形存储模式
kernal_linear_to_matrix << < grid, block, 0, way->stream[i] >> > (\
way->dev_matrix_table_zero, way->dev_linear_table_zero, \
way->dev_matrix_table_one, way->dev_linear_table_one, \
way->dev_matrix_table_two, way->dev_linear_table_two, \
way->dev_matrix_table_three, way->dev_linear_table_three, \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//同步流
cudaStreamSynchronize(way->stream[i]);
}
}
free(gfmult_init);
}
/*
** 主机接口函数:本函数完成设备内存的释放工作。
*/
void Free_device_memory(device_memory *way)
{
for (int i = 0; i < STREAM_SIZE; i++)
{
//同步流
cudaStreamSynchronize(way->stream[i]);
}
//释放全局内存
cudaFreeHost(way->dev_gfmult_table);
cudaFreeHost(way->dev_IV);
cudaFreeHost(way->dev_SboxTable);
cudaFreeHost(way->dev_matrix_table_zero);
cudaFreeHost(way->dev_linear_table_zero);
cudaFreeHost(way->dev_matrix_table_one);
cudaFreeHost(way->dev_linear_table_one);
cudaFreeHost(way->dev_matrix_table_two);
cudaFreeHost(way->dev_linear_table_two);
cudaFreeHost(way->dev_matrix_table_three);
cudaFreeHost(way->dev_linear_table_three);
cudaFreeHost(way->dev_input);
cudaFreeHost(way->dev_output);
cudaFreeHost(way->dev_gfmult);
//释放流
for (int i = 0; i < STREAM_SIZE; i++)
{
cudaStreamDestroy(way->stream[i]);
}
}
/*
** 认证加密主机接口函数
** counter:加密序号
** input:明文输入
** output:密文输出
*/
void sm4_gcm_enc(device_memory *way, uint32_t counter, uint8_t input[PARTICLE_SIZE], uint8_t output[PARTICLE_SIZE])
{
dim3 grid(GRID_SIZE, 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
for (int i = 0; i < STREAM_SIZE; i++)
{
//将明文从主机内存拷贝到设备全局内存
cudaMemcpyAsync(\
way->dev_input + i * (PARTICLE_SIZE / STREAM_SIZE), \
input + i * (PARTICLE_SIZE / STREAM_SIZE), \
PARTICLE_SIZE / STREAM_SIZE, \
cudaMemcpyHostToDevice, way->stream[i]);
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//对明文数据块进行加密
kernal_enc << < grid, block, 0, way->stream[i] >> > (way->dev_SboxTable, \
way->dev_matrix_table_zero, way->dev_linear_table_zero, \
way->dev_matrix_table_one, way->dev_linear_table_one, \
way->dev_matrix_table_two, way->dev_linear_table_two, \
way->dev_matrix_table_three, way->dev_linear_table_three, \
counter, i, \
way->dev_input + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_output + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//将加密后的密文数据块从设备全局内存拷贝到主机内存
cudaMemcpyAsync(output + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_output + i * (PARTICLE_SIZE / STREAM_SIZE), \
PARTICLE_SIZE / STREAM_SIZE, \
cudaMemcpyDeviceToHost, way->stream[i]);
}
/*
for (int i = 0; i < STREAM_SIZE; i++)
{
//将线性存储模式的密文数据块转化为矩形存储模式
kernal_linear_to_matrix << < grid, block, 0, way->stream[i] >> > (\
way->dev_matrix_table_zero, way->dev_linear_table_zero, \
way->dev_matrix_table_one, way->dev_linear_table_one, \
way->dev_matrix_table_two, way->dev_linear_table_two, \
way->dev_matrix_table_three, way->dev_linear_table_three, \
way->dev_output + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_input + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//进行有限域乘法和加法运算
kernal_gfmult << < grid, block, 0, way->stream[i] >> > (\
(uint8_t(*)[256][16])(way->dev_gfmult_table), \
way->dev_input + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE));
}
*/
for (int i = 0; i < STREAM_SIZE; i++)
{
//流同步
cudaStreamSynchronize(way->stream[i]);
}
}
/*
** 认证解密主机接口函数
** counter:加密序号
** input:密文输入
** output:明文输出
*/
void sm4_gcm_dec(device_memory *way, uint32_t counter, uint8_t input[PARTICLE_SIZE], uint8_t output[PARTICLE_SIZE])
{
dim3 grid(GRID_SIZE, 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
for (int i = 0; i < STREAM_SIZE; i++)
{
//将密文从主机内存拷贝到设备全局内存
cudaMemcpyAsync(\
way->dev_input + i * (PARTICLE_SIZE / STREAM_SIZE), \
input + i * (PARTICLE_SIZE / STREAM_SIZE), \
PARTICLE_SIZE / STREAM_SIZE, \
cudaMemcpyHostToDevice, way->stream[i]);
}
//将线性存储模式的密文数据块转化为矩形存储模式
for (int i = 0; i < STREAM_SIZE; i++)
{
kernal_linear_to_matrix << < grid, block, 0, way->stream[i] >> > (\
way->dev_matrix_table_zero, way->dev_linear_table_zero, \
way->dev_matrix_table_one, way->dev_linear_table_one, \
way->dev_matrix_table_two, way->dev_linear_table_two, \
way->dev_matrix_table_three, way->dev_linear_table_three, \
way->dev_input + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_output + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//进行有限域乘法和加法运算
kernal_gfmult << < grid, block, 0, way->stream[i] >> > (\
(uint8_t(*)[256][16])(way->dev_gfmult_table), \
way->dev_output + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//对密文数据块进行解密
kernal_enc << < grid, block, 0, way->stream[i] >> > (way->dev_SboxTable, \
way->dev_matrix_table_zero, way->dev_linear_table_zero, \
way->dev_matrix_table_one, way->dev_linear_table_one, \
way->dev_matrix_table_two, way->dev_linear_table_two, \
way->dev_matrix_table_three, way->dev_linear_table_three, \
counter, i, \
way->dev_input + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_output + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//将解密后的明文数据块从设备全局内存拷贝到主机内存
cudaMemcpyAsync(output + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_output + i * (PARTICLE_SIZE / STREAM_SIZE), \
PARTICLE_SIZE / STREAM_SIZE, \
cudaMemcpyDeviceToHost, way->stream[i]);
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//流同步
cudaStreamSynchronize(way->stream[i]);
}
}
/*
** 本主机接口函数生成最终的标签
** length:密文数据块长度
** tag:值结果参数,函数执行完毕将其填入tag的内存空间
*/
void sm4_gcm_final(device_memory *way, uint64_t length, uint8_t tag[PARTICLE_SIZE])
{
uint8_t temp[16];
/* eor (len(A)||len(C)) */
uint64_t temp_len = (uint64_t)(16 * 8); // len(A) = (uint64_t)(add_len*8)
for (int i = 1; i <= 16 / 2; i++)
{
temp[16 / 2 - i] = (uint8_t)temp_len;
temp_len = temp_len >> 8;
}
length = length * 16;
temp_len = (uint64_t)(length * 8); // len(C) = (uint64_t)(length*8)
for (int i = 1; i <= 16 / 2; i++)
{
temp[16 - i] = (uint8_t)temp_len;
temp_len = temp_len >> 8;
}
//初始化(len(A)||len(C))
cudaMemcpyToSymbol(constant_lenAC, temp, 16);
dim3 grid(GRID_SIZE, 1, 1);
dim3 block(BLOCK_SIZE, 1, 1);
for (int i = 0; i < STREAM_SIZE; i++)
{
//计算最终的GHASH结果
kernal_final << < grid, block, 0, way->stream[i] >> > ((uint8_t(*)[256][16])(way->dev_gfmult_table), \
way->dev_matrix_table_zero, way->dev_linear_table_zero, \
way->dev_matrix_table_one, way->dev_linear_table_one, \
way->dev_matrix_table_two, way->dev_linear_table_two, \
way->dev_matrix_table_three, way->dev_linear_table_three, \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//将全局内存中矩形存储模式的GHASH结果转换成线性存储模式
kernal_matrix_to_linear << < grid, block, 0, way->stream[i] >> > (\
way->dev_matrix_table_zero, way->dev_linear_table_zero, \
way->dev_matrix_table_one, way->dev_linear_table_one, \
way->dev_matrix_table_two, way->dev_linear_table_two, \
way->dev_matrix_table_three, way->dev_linear_table_three, \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE));
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//将每个线程的标签tag从全局内存拷贝回主机内存
cudaMemcpyAsync(tag + i * (PARTICLE_SIZE / STREAM_SIZE), \
way->dev_gfmult + i * (PARTICLE_SIZE / STREAM_SIZE), \
PARTICLE_SIZE / STREAM_SIZE, \
cudaMemcpyDeviceToHost, way->stream[i]);
}
for (int i = 0; i < STREAM_SIZE; i++)
{
//同步流
cudaStreamSynchronize(way->stream[i]);
}
} |
aa67c72c77095647caaa925897b6cf6ad7ea7082.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#include <rocm_smi/rocm_smi.h>
#define IS 5000
#define JS 5000
#define I 50000
#define J 50000
int main(){
/*Create context*/
float* d;
hipMalloc((void**)&d,sizeof(float)*1);
hipFree(d);
FILE *fp;
char *fname = "array_data.csv";
fp = fopen(fname,"w");
int failed_counter = 0;
hipError_t res;
hipArray_t array;
rsmi_status_t nres;
nvmlMemory_t mem;
uint32_t dev;
size_t width = 0,height = 0;
size_t prev_width = 0,prev_height = 0;
struct hipChannelFormatDesc desc;
size_t before_mem = 0;
size_t after_mem = 0;
size_t prev_mem = 0;
desc = hipCreateChannelDesc(32,0,0,0,hipChannelFormatKindFloat);
// desc = hipCreateChannelDesc(16,16,16,16,hipChannelFormatKindSigned);
nres = nvmlInit();
if(nres != RSMI_STATUS_SUCCESS){
printf("NVML ERROR : %d\n",nres);
exit(-1);
}
nres = nvmlDeviceGetHandleByIndex(0,&dev);
if(nres != RSMI_STATUS_SUCCESS){
printf("NVML ERROR : %d\n",nres);
exit(-1);
}
#if 1
int max = 0 , min = 100000000;
// for(int i = IS ; i < IS+I ; i ++){
for(int i = IS ; i < IS+I ; i += 32){
width = i;
// for(int j = JS ; j < JS+J ; j ++){
for(int j = JS ; j < JS+J ; j += 32){
height = j;
nres = nvmlDeviceGetMemoryInfo(dev,&mem);
if(nres != RSMI_STATUS_SUCCESS){
printf("NVML ERROR : %d\n",nres);
exit(-1);
}
before_mem = mem.free;
res = hipMallocArray(&array,&desc,width,height,0);
if(res == hipSuccess){
nres = nvmlDeviceGetMemoryInfo(dev,&mem);
if(nres != RSMI_STATUS_SUCCESS){
printf("NVML ERROR : %d\n",nres);
exit(-1);
}
after_mem = mem.free;
size_t used;
size_t expected;
used = before_mem-after_mem;
expected = ((desc.w+desc.x+desc.y+desc.z+7)/8)
*((width+31)&~(size_t)31)
*((height+127)&~(size_t)127)
+ (2<<20);
if(expected < used){
printf("expected : %lu\n",expected);
printf("used : %lu\n",used);
printf("width : %lu\n",width);
printf("height : %lu\n",height);
exit(-1);
}
if(min > expected-used){
min = expected-used;
}
if(max < expected-used){
max = expected-used;
}
printf("%lu\t%lu\t%lu\t%lu\t%lu\n",expected-used,min,max,width,height);
/*
fprintf(fp,"%lu",used);
if(j < J-1){
fprintf(fp,",");
}else{
fprintf(fp,"\n");
}
*/
/*
size_t expected;
if(before_mem-after_mem != expected){
printf("Failed\n");
printf("width : %lu\n",width);
printf("height : %lu\n",height);
printf("width*height : %lu\n",width*height);
printf("used : %lu\n",before_mem-after_mem);
printf("be expected as : %lu\n",expected);
if(++failed_counter >= 10)
exit(-1);
}else{
// printf("PASS(%lu,%lu:%lu)",width,height,expected);
}
size_t e;
e = ((width-1)/32+1)*131072+((height-1)/128+1)*524288;
expected = e > 2097152 ? e : 2097152;
if(expected-used > max){
max = expected-used;
}
if(min > (int)expected-(int)used){
min = expected-used;
}
printf("max:%d\tmin:%d\tused:%lu\texpected:%lu\twidth:%d\theight:%d\n",max,min,used,expected,width,height);
if(min < 0){
printf("Detected min < 0\n");
printf("width:%d,height:%d,used:%d,expected:%d\n",width,height,used,expected);
exit(-1);
}
// size_t left = used/w-used/2048;
// size_t right = -960+1966080/w;
if(left != right){
printf("\t%lu\t%lu\n",left,right);
exit(-1);
}
*/
if((before_mem-after_mem) > prev_mem){
/*
printf("width : %lu\n",width);
printf("height : %lu\n",height);
printf("used : %lu\n",before_mem-after_mem);
printf("diff(width) : %lu\n",width-prev_width);
printf("diff(height) : %lu\n",height-prev_height);
printf("diff : %lu\n",(before_mem-after_mem)-prev_mem);
printf("diff(width*height) : %lu\n",width*height-prev_width*prev_height);
printf("(used-2097152)/131072: %f\n",(float)(before_mem-after_mem-2097152)/(float)131072);
*/
prev_mem = before_mem-after_mem;
prev_width = width;
prev_height = height;
}
hipFreeArray(array);
}else{
printf("\nERROR : %d\n",res);
printf("Failed Counter : %d\n",failed_counter);
exit(-1);
}
}
}
#endif
return 0;
}
| aa67c72c77095647caaa925897b6cf6ad7ea7082.cu | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <cuda_runtime.h>
#include <nvml.h>
#define IS 5000
#define JS 5000
#define I 50000
#define J 50000
int main(){
/*Create context*/
float* d;
cudaMalloc((void**)&d,sizeof(float)*1);
cudaFree(d);
FILE *fp;
char *fname = "array_data.csv";
fp = fopen(fname,"w");
int failed_counter = 0;
cudaError_t res;
cudaArray_t array;
nvmlReturn_t nres;
nvmlMemory_t mem;
nvmlDevice_t dev;
size_t width = 0,height = 0;
size_t prev_width = 0,prev_height = 0;
struct cudaChannelFormatDesc desc;
size_t before_mem = 0;
size_t after_mem = 0;
size_t prev_mem = 0;
desc = cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindFloat);
// desc = cudaCreateChannelDesc(16,16,16,16,cudaChannelFormatKindSigned);
nres = nvmlInit();
if(nres != NVML_SUCCESS){
printf("NVML ERROR : %d\n",nres);
exit(-1);
}
nres = nvmlDeviceGetHandleByIndex(0,&dev);
if(nres != NVML_SUCCESS){
printf("NVML ERROR : %d\n",nres);
exit(-1);
}
#if 1
int max = 0 , min = 100000000;
// for(int i = IS ; i < IS+I ; i ++){
for(int i = IS ; i < IS+I ; i += 32){
width = i;
// for(int j = JS ; j < JS+J ; j ++){
for(int j = JS ; j < JS+J ; j += 32){
height = j;
nres = nvmlDeviceGetMemoryInfo(dev,&mem);
if(nres != NVML_SUCCESS){
printf("NVML ERROR : %d\n",nres);
exit(-1);
}
before_mem = mem.free;
res = cudaMallocArray(&array,&desc,width,height,0);
if(res == cudaSuccess){
nres = nvmlDeviceGetMemoryInfo(dev,&mem);
if(nres != NVML_SUCCESS){
printf("NVML ERROR : %d\n",nres);
exit(-1);
}
after_mem = mem.free;
size_t used;
size_t expected;
used = before_mem-after_mem;
expected = ((desc.w+desc.x+desc.y+desc.z+7)/8)
*((width+31)&~(size_t)31)
*((height+127)&~(size_t)127)
+ (2<<20);
if(expected < used){
printf("expected : %lu\n",expected);
printf("used : %lu\n",used);
printf("width : %lu\n",width);
printf("height : %lu\n",height);
exit(-1);
}
if(min > expected-used){
min = expected-used;
}
if(max < expected-used){
max = expected-used;
}
printf("%lu\t%lu\t%lu\t%lu\t%lu\n",expected-used,min,max,width,height);
/*
fprintf(fp,"%lu",used);
if(j < J-1){
fprintf(fp,",");
}else{
fprintf(fp,"\n");
}
*/
/*
size_t expected;
if(before_mem-after_mem != expected){
printf("Failed\n");
printf("width : %lu\n",width);
printf("height : %lu\n",height);
printf("width*height : %lu\n",width*height);
printf("used : %lu\n",before_mem-after_mem);
printf("be expected as : %lu\n",expected);
if(++failed_counter >= 10)
exit(-1);
}else{
// printf("PASS(%lu,%lu:%lu)",width,height,expected);
}
size_t e;
e = ((width-1)/32+1)*131072+((height-1)/128+1)*524288;
expected = e > 2097152 ? e : 2097152;
if(expected-used > max){
max = expected-used;
}
if(min > (int)expected-(int)used){
min = expected-used;
}
printf("max:%d\tmin:%d\tused:%lu\texpected:%lu\twidth:%d\theight:%d\n",max,min,used,expected,width,height);
if(min < 0){
printf("Detected min < 0\n");
printf("width:%d,height:%d,used:%d,expected:%d\n",width,height,used,expected);
exit(-1);
}
// size_t left = used/w-used/2048;
// size_t right = -960+1966080/w;
if(left != right){
printf("\t%lu\t%lu\n",left,right);
exit(-1);
}
*/
if((before_mem-after_mem) > prev_mem){
/*
printf("width : %lu\n",width);
printf("height : %lu\n",height);
printf("used : %lu\n",before_mem-after_mem);
printf("diff(width) : %lu\n",width-prev_width);
printf("diff(height) : %lu\n",height-prev_height);
printf("diff : %lu\n",(before_mem-after_mem)-prev_mem);
printf("diff(width*height) : %lu\n",width*height-prev_width*prev_height);
printf("(used-2097152)/131072: %f\n",(float)(before_mem-after_mem-2097152)/(float)131072);
*/
prev_mem = before_mem-after_mem;
prev_width = width;
prev_height = height;
}
cudaFreeArray(array);
}else{
printf("\nERROR : %d\n",res);
printf("Failed Counter : %d\n",failed_counter);
exit(-1);
}
}
}
#endif
return 0;
}
|
5441905b4563c5e8144b30e95136f52fd7a07626.hip | // !!! This is a file automatically generated by hipify!!!
// reco based on template.cu
// cuda stuff
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <helper_math.h>
#include <hip/device_functions.h>
// generic stuff
#include "mystuff.h"
#include "aoptions.h"
// pet stuff
#include "F120_long_defs.h"
#include "cudapet.h"
#include "lors.h"
#include "smatrix.h"
#include "reco.h"
#include "reco_kernels.h"
// windows stuff
#include <algorithm>
#include <Windows.h>
int roibox_cut(int x, int y);
int compute_forward(SMfull &sm, float *voxvals, float *tsum);
int bigjob(SMfull &sm,AOptions &opt);
double timePassed(LARGE_INTEGER &StartingTime);
int show_full_tsum(float *tsum);
int simple_check(SMfull &sm,AOptions &opt,float *tsum,int nx,int ny,int sector);
int dump_sm(SMfull &sm,AOptions &opt);
int do_forward_projection(SMfull &sm,AOptions &opt,float *voxval,float *tsum,float *zdzmap);
int cuda_do_forward_projection(SMfull &sm,AOptions &opt,float *voxval,float *tsum,float *zdzmap);
int cyl_fill(float *vox,AOptions &opt,int nxy,int nz,double dxy);
int const_fill(float *vox,AOptions &opt,int nxy,int nz,float val);
int setup_cuda_sm(SMfull &sm,cudaSM &host_sm,cudaSM **dev_sm_out);
int setup_cuda_vmap(VoxMap &vm);
template <typename T> int swizzle_buffer(T *a,int n1,int n2,int n3,int m1,int m2,int m3);
int map_swizz(char *name_in, char *name_out);
int map_chop(char *name_in, char *name_out);
int make_dc(int c1,int c2);
int host_do_forward_project(SMfull &sm,float *voxval,float *zdzmap);
int do_mlem(SMfull&sm, AOptions &opt);
template <typename T> int make_buffers(T **buf_out,T **dev_buf_out, size_t len, char *tag);
template <typename T> int read_buffers(char *name, int len, T *h_buf, T *d_buf,T rescale);
template <typename T> int copy_buffer_to(int len, T *h_buf, T *d_buf);
template <typename T> int copy_buffer_from(int len, T *h_buf, T *d_buf);
template <typename T> int clear_buffer(int len, T *d_buf);
int cyl_buffer_fill_normalized(float *vox,double val);
int do_forward(SMfull &sm,char *vol_in,char *zmap_out);
int do_backward(SMfull &sm,char *zmap_in,char *vol_out);
char *smfile ="D:\\data\\syseff.raw";
char *tvfile ="D:\\data\\big_tveff.raw"; //changed back 13/11/17
char *mini_smfile ="D:\\data\\smbigger.raw";
//char *mini_tvfile ="D:\\data\\tvbigger2.raw"; //changed 01/11/17
char *mini_tvfile ="D:\\data\\tvbigger_szk.raw"; //changed 29/11/17
int main(int argc, char *argv[])
{
LARGE_INTEGER StartingTime;
QueryPerformanceCounter(&StartingTime);
if (argc < 2){
printf("Reco - PET reconstuction with complete system matrix\n",F120_ID);
printf("sysmat:filename system matix (default %s)\n",smfile);
printf("mlem MLEM reco, same as OSEM:1\n");
printf("osem:n OSEM n subsets, n=1,2,4 or 8 supported\n");
printf("maxit:n [24/n] Max full OMEM passes\n");
printf("dzcut:val max dz [47 i.e. all]\n");
printf("sector:val use single sector [-1 i.e, all]\n");
printf("one_x:sx use single voxel sx sy [default use all]\n");
printf("one_y:sy set sy [defaults to sx which must be set]\n");
printf("cylfill use cylinder for activity\n");
printf("cylrad cylinder radius [23.0]\n");
printf("cyltest or cyltestpr write active volume and exit, add pr to print\n");
printf("ones file ROI with 1.0f\n");
printf("voxval write active volume\n");
printf("tsum write full tsum dataset\n");
printf("ones fill ROI voxels with 1.0f\n");
printf("mapsum write tsum summed over slices\n");
printf("cuda use cuda!\n");
printf("minivox use small voxel defaults\n");
printf("mapswizz <fin> <fout> convert full map to swizz form\n");
printf("mapchop <fin> <fout> extract mid 48 z-range from 96 map\n");
printf("doforward <vol in> <zdzmap out> do one forward projection\n");
printf("dobackward <zdzmap in> <vol out> do one backward projection\n");
return 0;
}
char cudalog_name[] = "D:\\logfiles\\cudareco.log";
FILE *logfile = fopen(cudalog_name,"a");
if (!logfile) {
logfile = fopen(cudalog_name,"w");
if (logfile) printf("new %s logfile created\n",cudalog_name);
}
if (!logfile) { printf("can't open %s",cudalog_name); return 1; }
fprintf(logfile,"cudareco %s version 2.0 args: ",F120_ID);
for (int k=0; k<argc; k++) fprintf(logfile," %s",argv[k]);
fprintf(logfile,"\n");
AOptions opt(argc,argv,1);
// misc quick options here before open system matrix
if (opt.isset("mapchop")){
int k= opt.isset("mapchop");
return map_chop(argv[k+1], argv[k+2]);
}
if (opt.isset("mapswizz")){
int k= opt.isset("mapswizz");
return map_swizz(argv[k+1], argv[k+2]);
}
char sm_name[256];
if (opt.isset("sysmat")) strcpy(sm_name, opt.get_string("sysmat"));
else if (opt.isset("minivox")) strcpy(sm_name,mini_smfile);
else strcpy(sm_name,smfile);
SMfull sm(sm_name);
if (sm.numlors <10) return 1;
if (opt.isset("doforward"))
{
int k=opt.isset("doforward");
return do_forward(sm,argv[k+1],argv[k+2]);
}
if (opt.isset("dobackward"))
{
int k=opt.isset("dobackward");
return do_backward(sm,argv[k+1],argv[k+2]);
}
if (opt.isset("mlem") || opt.isset("osem")){
return do_mlem(sm,opt);
}
if (opt.isset("cudatest")) {
cudaSM *dev_sm;
cudaSM host_sm;
setup_cuda_sm(sm,host_sm,&dev_sm);
hipLaunchKernelGGL(( check_sm), dim3(1),dim3(16), 0, 0, dev_sm);
return 0;
}
//printf("A\n");
if (opt.isset("dump")) dump_sm(sm,opt);
else bigjob(sm,opt);
if (opt.isset("cuda")){
hipError_t cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) { printf("Failed to deinitialize the cuda device?? error=%s\n",hipGetErrorString(cudaStatus)); }
}
printf("Total time %.3f secs\n",timePassed(StartingTime));
fclose(logfile);
return 0;
}
int do_forward(SMfull &sm,char *vol_in,char *zmap_out)
{
LARGE_INTEGER CudaTime;
QueryPerformanceCounter(&CudaTime);
printf("Running MLEM!\n");
//system matrix
cudaSM *dev_sm = NULL;
cudaSM host_sm;
VoxMap vm;
hipError_t cudaStatus;
int nlors = F120_NXY * F120_DCsize * F120_DZstride; //zdz format
int nvox = F120_NXYbins * F120_NXYbins * F120_NZbins;
// measured actvity (#lors) init from external file
float *vol = NULL;
float *dev_vol = NULL;
if (make_buffers<float>(&vol,&dev_vol,nvox,"vol")) return 1;
if (read_buffers<float>(vol_in,nvox,vol,dev_vol,1.0f)) return 1;
// forward projection (#lors) int with zeros
float *zdzmap = NULL;
float *dev_zdzmap = NULL;
if (make_buffers<float>(&zdzmap,&dev_zdzmap,nlors,"zdzmap")) return 1;
// efficiency sums (#voxels) init from external file
float *teffs = NULL;
float *dev_teffs = NULL;
if (make_buffers<float>(&teffs,&dev_teffs,nvox,"teffs")) return 1;
if (read_buffers<float>(tvfile,nvox,teffs,dev_teffs,1.0f)) return 1;
// sm for device
if (setup_cuda_sm(sm,host_sm,&dev_sm)) return 1;
if (setup_cuda_vmap(vm)) return 1;
// one fp step
for (int kv=0; kv<sm.voxels; kv++){
// current activity => lor lors
//forward_project_faster<<<64,64>>>(dev_sm,dev_vol,dev_zdzmap,dev_teffs,kv,0); //TODO fix this for osem
// forward_project_faster<<<64,64>>>(dev_sm,dev_vol,dev_zdzmap,dev_teffs,kv,1);
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) { fprintf(stderr,"forward_proj kernel error: [%s]\n",hipGetErrorString(cudaStatus)); return 1; }
printf("forward projection time %.3f secs\n",timePassed(CudaTime));
hipDeviceSynchronize();
if (copy_buffer_from<float>(nlors,zdzmap,dev_zdzmap))return 1;
write_raw<float>(zmap_out,zdzmap,nlors);
if (vol) free(vol);
if (dev_vol) hipFree(dev_vol);
if (zdzmap) free(zdzmap);
if (dev_zdzmap) hipFree(dev_zdzmap);
return 0;
}
int do_backward(SMfull &sm,char *zmap_in,char *vol_out)
{
LARGE_INTEGER CudaTime;
QueryPerformanceCounter(&CudaTime);
printf("Running MLEM!\n");
//system matrix
cudaSM *dev_sm = NULL;
cudaSM host_sm;
VoxMap vm;
hipError_t cudaStatus;
int nlors = F120_NXY * F120_DCsize * F120_DZstride; //zdz format
int nvox = F120_NXYbins * F120_NXYbins * F120_NZbins;
// measured actvity (#lors) init from external file
float *vol = NULL;
float *dev_vol = NULL;
if (make_buffers<float>(&vol,&dev_vol,nvox,"vol")) return 1;
// forward projection (#lors) int with zeros
float *zdzmap = NULL;
float *dev_zdzmap = NULL;
if (make_buffers<float>(&zdzmap,&dev_zdzmap,nlors,"zdzmap")) return 1;
if (read_buffers<float>(zmap_in,nlors,zdzmap,dev_zdzmap,1.0f)) return 1;
// efficiency sums (#voxels) init from external file
float *teffs = NULL;
float *dev_teffs = NULL;
if (make_buffers<float>(&teffs,&dev_teffs,nvox,"teffs")) return 1;
if (read_buffers<float>(tvfile,nvox,teffs,dev_teffs,1.0f)) return 1;
// sm for device
if (setup_cuda_sm(sm,host_sm,&dev_sm)) return 1;
if (setup_cuda_vmap(vm)) return 1;
// one fp step
for (int kv = 0; kv<sm.voxels; kv++){
// backward_project_faster<<<64,64>>>(dev_sm,dev_vol,dev_zdzmap,dev_teffs,kv); // TODO fix this for OSEM
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) { fprintf(stderr,"backward_proj kernel error: [%s]\n",hipGetErrorString(cudaStatus)); return 1; }
printf("backward projection time %.3f secs\n",timePassed(CudaTime));
hipDeviceSynchronize();
if (copy_buffer_from<float>(nvox,vol,dev_vol))return 1;
write_raw<float>(vol_out,vol,nvox);
if (vol) free(vol);
if (dev_vol) hipFree(dev_vol);
if (zdzmap) free(zdzmap);
if (dev_zdzmap) hipFree(dev_zdzmap);
return 0;
}
// osem added 28/10/17
int do_mlem(SMfull &sm, AOptions &opt)
{
LARGE_INTEGER CudaTime;
LARGE_INTEGER FpTime; float Fpsum = 0.0f;
LARGE_INTEGER BpTime; float Bpsum = 0.0f;
LARGE_INTEGER VfTime; //float Vfsum = 0.0f;
LARGE_INTEGER LfTime; //float Lfsum = 0.0f;
LARGE_INTEGER RunTime;
QueryPerformanceCounter(&CudaTime);
QueryPerformanceCounter(&RunTime);
int osem = opt.set_from_opt("osem",1);
//if (osem !=1){ printf("sorry osem is broken at the moment - using mlem\n"); osem = 1; }
if(osem==1)printf("Running MLEM!\n");
else printf("Running OSEM %d subsets!\n",osem);
//system matrix
cudaSM *dev_sm = NULL;
cudaSM host_sm;
VoxMap vm;
hipError_t cudaStatus;
int nlors = F120_NXY * F120_DCsize * F120_DZstride;
//int nvox = F120_NXYbins * F120_NXYbins * F120_NZbins; //this for cartesian 128*128*95
int nvox = F120_SZKsize; //this for szk 8*95*1661
int big_nvox = nvox*(1+2+4+8); // TODO just store required subsets?
// measured actvity (#lors) init from external file
float *meas = NULL;
float *dev_meas = NULL;
if (make_buffers<float>(&meas,&dev_meas,nlors,"meas")) return 1;
if (read_buffers<float>("measured.raw",nlors,meas,dev_meas,1.0f)) return 1;
// forward projection (#lors) int with zeros
float *fproj = NULL;
float *dev_fproj = NULL;
if (make_buffers<float>(&fproj,&dev_fproj,nlors,"fproj")) return 1;
// backward projection (#voxels) int with zeros
float *bproj = NULL;
float *dev_bproj = NULL;
if (make_buffers<float>(&bproj,&dev_bproj,nvox,"bproj")) return 1;
// estimated activity (#voxels) init using measured lors (maybe use bp not uniform)
float *act = NULL;
float *dev_act = NULL;
if (make_buffers<float>(&act,&dev_act,nvox,"act")) return 1; // this clears dev buffer
// efficiency sums (#voxels) init from external file
float *teffs = NULL;
float *dev_teffs = NULL;
if (make_buffers<float>(&teffs,&dev_teffs,big_nvox,"teffs")) return 1;
if (opt.isset("minivox")){ if(read_buffers<float>(mini_tvfile,big_nvox,teffs,dev_teffs,1.0f)) return 1; }
else if (read_buffers<float>(tvfile,big_nvox,teffs,dev_teffs,1.0f)) return 1;
//for (int k=0; k<nvox; k++) teffs[k] = 1.0f;
// sm for device
if(setup_cuda_sm(sm,host_sm,&dev_sm)) return 1;
if(setup_cuda_vmap(vm)) return 1;
// check for existing roi_start file
if (read_raw_quiet<float>("roi_start.raw", act, nvox)==0){
copy_buffer_to<float>(nvox,act,dev_act);
printf("activity initialised from roi_start.raw\n");
}
else{
// use back projection for initialization of buffer instead of constant filling.
QueryPerformanceCounter(&BpTime);
for (int kv = 0; kv<sm.voxels; kv++){
hipLaunchKernelGGL(( backward_project_faster), dim3(64),dim3(64), 0, 0, dev_sm,dev_act,dev_meas,1,0,kv); // back proj measured lors to activity.
}
hipDeviceSynchronize();
printf("initial bp time %.3f secs\n",timePassed(BpTime));
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) { fprintf(stderr,"initial backward_proj kernel error: [%s]\n",hipGetErrorString(cudaStatus)); return 1; }
if(copy_buffer_from<float>(nvox,act,dev_act))return 1;
write_raw<float>("roi_start.raw",act,nvox);
}
char name[256];
int maxit = 24/osem;
if (opt.isset("maxit")) maxit = opt.set_from_opt("maxit",maxit);
char bugname[256];
//return 0;
for (int iter=0; iter<maxit; iter++) for (int osem_set = 0; osem_set<osem; osem_set++) {
if (clear_buffer(nlors,dev_fproj)) return 1;
QueryPerformanceCounter(&FpTime);
for (int kv=0; kv<sm.voxels; kv++){
// current activity => lor lors
//forward_project_faster<64><<<64,64>>>(dev_sm,dev_act,dev_fproj,osem,osem_set,kv,0);
hipLaunchKernelGGL(( forward_project_faster<128>), dim3(64),dim3(128), 0, 0, dev_sm,dev_act,dev_fproj,osem,osem_set,kv,0);
//cudaStatus = hipGetLastError();
//if (cudaStatus != hipSuccess) { fprintf(stderr,"forward_project kernel error it %d: [%s] kv %d even 0\n",iter,hipGetErrorString(cudaStatus),kv); return 1; }
//forward_project_faster<64><<<64,64>>>(dev_sm,dev_act,dev_fproj,osem,osem_set,kv,1);
hipLaunchKernelGGL(( forward_project_faster<128>), dim3(64),dim3(128), 0, 0, dev_sm,dev_act,dev_fproj,osem,osem_set,kv,1);
//cudaStatus = hipGetLastError();
//if (cudaStatus != hipSuccess) { fprintf(stderr,"forward_project kernel error it %d: [%s] kv %d even 1\n",iter,hipGetErrorString(cudaStatus),kv); return 1; }
}
hipDeviceSynchronize();
printf("fp time %.3f secs\n",timePassed(FpTime));
Fpsum += (float)timePassed(FpTime);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) { fprintf(stderr,"forward_project kernel error it %d: [%s]\n",iter,hipGetErrorString(cudaStatus)); return 1; }
if (0){
hipDeviceSynchronize();
sprintf(bugname,"fpdone%2.2d.raw",iter);
if (copy_buffer_from<float>(nlors,fproj,dev_fproj))return 1;
hipDeviceSynchronize();
write_raw<float>(bugname,fproj,nlors);
}
QueryPerformanceCounter(&LfTime);
hipLaunchKernelGGL(( lor_factors) , dim3(F120_DCsize),dim3(256), 0, 0, dev_meas,dev_fproj,osem,osem_set);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) { fprintf(stderr,"lor_factors kernel error it %d: [%s]\n",iter,hipGetErrorString(cudaStatus)); return 1; }
//hipDeviceSynchronize();
//printf("lf time %.3f secs\n",timePassed(LfTime));
// Lfsum += timePassed(LfTime);
if (0){
hipDeviceSynchronize();
sprintf(bugname,"lfdone%2.2d.raw",iter);
if (copy_buffer_from<float>(nlors,fproj,dev_fproj))return 1;
hipDeviceSynchronize();
write_raw<float>(bugname,fproj,nlors);
}
QueryPerformanceCounter(&BpTime);
for (int kv = 0; kv<sm.voxels; kv++){
hipLaunchKernelGGL(( backward_project_faster), dim3(64),dim3(64), 0, 0, dev_sm,dev_bproj,dev_fproj,osem,osem_set,kv);
//backward_project_faster2<<<64,64>>>(dev_sm,dev_bproj,dev_fproj,osem,osem_set,kv,0); // back proj measured lors to activity.
//backward_project_faster2<<<64,64>>>(dev_sm,dev_bproj,dev_fproj,osem,osem_set,kv,1); // back proj measured lors to activity.
}
hipDeviceSynchronize();
printf("bp time %.3f secs\n",timePassed(BpTime));
Bpsum += (float)timePassed(BpTime);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) { fprintf(stderr,"backward_proj kernel error it %d: [%s]\n",iter,hipGetErrorString(cudaStatus)); return 1; }
if (0){
hipDeviceSynchronize();
sprintf(bugname,"bpdone%2.2d.raw",iter);
if (copy_buffer_from<float>(nvox,bproj,dev_bproj))return 1;
hipDeviceSynchronize();
write_raw<float>(bugname,bproj,nvox);
}
QueryPerformanceCounter(&VfTime);
hipLaunchKernelGGL(( vox_factors), dim3(128),dim3(256), 0, 0, dev_teffs,dev_act,dev_bproj,osem,osem_set);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) { fprintf(stderr,"vox_factors kernel error it %d: [%s]\n",iter,hipGetErrorString(cudaStatus)); return 1; }
//hipDeviceSynchronize();
//printf("vf time %.3f secs\n",timePassed(VfTime));
// VFsum += timePassed(VfTime);
clear_buffer<float>(nlors,dev_fproj); // BUG fix 16/10/17
clear_buffer<float>(nvox,dev_bproj);
// thats it!
hipDeviceSynchronize();
if (maxit < 6 || (iter+1)%5==0 || iter+1 == maxit){
if (osem==1){
sprintf(name,"mlem%2.2d.raw",iter+1);
if (copy_buffer_from<float>(nvox,act,dev_act))return 1;
write_raw<float>(name,act,nvox);
}
else if (osem_set==osem-1){
sprintf(name,"osem%2.2d_subset%2.2d_iter%2.2d.raw",osem,osem_set+1,iter+1);
if (copy_buffer_from<float>(nvox,act,dev_act))return 1;
write_raw<float>(name,act,nvox);
}
}
}
if (meas) free(meas);
if (dev_meas) hipFree(dev_meas);
if (fproj) free(fproj);
if (dev_fproj) hipFree(dev_fproj);
if (bproj) free(bproj);
if (dev_bproj) hipFree(dev_bproj);
if (act) free(act);
if (dev_act) hipFree(dev_act);
if (teffs) free(teffs);
if (dev_teffs) hipFree(dev_teffs);
printf("total times mlem %.3f, fp %.3f, bp %.3f secs\n",timePassed(RunTime),Fpsum,Bpsum);
return 0;
}
int setup_cuda_vmap(VoxMap &vm)
{
// sector constants to global device memory
int *map = vm.amap_x();
hipError_t cudaStatus;
cudaStatus = hipMemcpyToSymbol(dev_map8_x,map,24*sizeof(int));
if (cudaStatus != hipSuccess) { printf("hipMemcpyToSymbol map8_x failed [%s]",hipGetErrorString(cudaStatus)); return 1; }
map = vm.amap_y();
cudaStatus = hipMemcpyToSymbol(dev_map8_y,map,24*sizeof(int));
if (cudaStatus != hipSuccess) { printf("hipMemcpyToSymbol map8_y failed [%s]",hipGetErrorString(cudaStatus)); return 1; }
map = vm.amap_c();
cudaStatus = hipMemcpyToSymbol(dev_map8_c,map,16*sizeof(int));
if (cudaStatus != hipSuccess) { printf("hipMemcpyToSymbol map8_c failed [%s]",hipGetErrorString(cudaStatus)); return 1; }
return 0;
}
int setup_cuda_sm(SMfull &sm,cudaSM &host_sm,cudaSM **dev_sm_out)
{
// allocate actual sm buffer on device
cudaSM *dev_sm = NULL;
hipError_t cudaStatus = hipMalloc((void**)&dev_sm,sizeof(cudaSM));
if (cudaStatus != hipSuccess) { printf("hipMalloc dev_sm failed [%s]\n",hipGetErrorString(cudaStatus)); return 1; }
// mirror copy of dev_sm on host
//cudaSM host_sm;
host_sm.voxels = sm.voxels;
cudaStatus = hipMalloc((void**)&host_sm.v,sm.voxels*sizeof(SMfull_vox));
if (cudaStatus != hipSuccess) { printf("hipMalloc for sm.voxels failed [%s]\n",hipGetErrorString(cudaStatus)); return 1; }
// copy voxels to device pointer
cudaStatus = hipMemcpy(host_sm.v,sm.v,sm.voxels*sizeof(SMfull_vox),hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy to sm.v failed [%s]",hipGetErrorString(cudaStatus)); return 1; }
host_sm.numlors = sm.numlors;
cudaStatus = hipMalloc((void**)&host_sm.lors,sm.numlors*sizeof(smlor));
if (cudaStatus != hipSuccess) { printf("hipMalloc for sm.lors failed [%s]\n",hipGetErrorString(cudaStatus)); return 1; }
// copy lors to device pointer
cudaStatus = hipMemcpy(host_sm.lors,sm.lors,sm.numlors*sizeof(smlor),hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy to sm.lors failed [%s]",hipGetErrorString(cudaStatus)); return 1; }
// copy struct to device
cudaStatus = hipMemcpy(dev_sm,&host_sm,sizeof(cudaSM),hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { printf("hipMemcpy to dev_sm failed [%s]",hipGetErrorString(cudaStatus)); return 1; }
*dev_sm_out = dev_sm;
return 0;
}
int dump_sm(SMfull &sm,AOptions &opt)
{
int nx = opt.set_from_opt("nx",64);
int ny = opt.set_from_opt("ny",64);
quad p;
for (int kv = 0; kv < sm.voxels; kv++){
if (sm.v[kv].nx != nx || sm.v[kv].ny != ny) continue;
printf("found voxel %d %d at kv=%d with lors = %d\n",nx,ny,kv,sm.v[kv].lors);
for (int kl = 0; kl < sm.v[kv].lors; kl++){
uint key = sm.key(kv,kl);
small_lor_from(key,p);
printf("%5d (%2d %3d)-(%2d %3d) %9.5f %9.5f\n",kl,p.z1,p.c1,p.z2,p.c2,sm.val(kv,kl,0),sm.val(kv,kl,1));
}
}
return 0;
}
double box_bit(double r,float3 &p)
{
// return area under arc for x [p.x,p.y] within box base p.z
double a = p.x;
double b = p.y;
double theta_a = asin(a/r);
double theta_b = asin(b/r);
double area = r*r*0.5*(theta_b - theta_a + 0.5*(sin(2.0*theta_b)-sin(2.0*theta_a)));
area -= p.z*(b-a); // remove contibution below p.z
return area;
}
double box_in_circle(double r,float4 &p)
{
// float4 format {x0,y0,dx,dy}
// |
// case 0 none inside | b-----d
// case 1 just (a) inside | | |
// case 2 both (a) & (b) inside | | |
// case 3 both (a) & (c) inside | a-----c
// case 4 (a) (b) & (c) inside |
// case 5 all inside 0-------------------
float xa = p.x;
float ya = p.y;
double ra = sqrt(xa*xa+ya*ya);
float xb = xa;
float yb = ya+p.w;
double rb = sqrt(xb*xb+yb*yb);
float xc = xa+p.z;
float yc = ya;
double rc = sqrt(xc*xc+yc*yc);
float xd = xc;
float yd = yb;
double rd =sqrt(xd*xd+yd*yd);
if (rd < r ) return p.z*p.w; // inside: easy case 5;
else if (ra >= r) return 0.0; // outside: easy case 0;
else if (rb > r && rc > r) { // a inside: case 1
float xh = (float)sqrt(r*r-ya*ya);
float3 q ={ xa,xh,ya };
return box_bit(r,q);
}
else if (rb < r && rc > r) { // a & b inside: case 2
float xl = (float)sqrt(r*r-yb*yb);
float xh = (float)sqrt(r*r-ya*ya);
float3 q ={ xl,xh,ya };
return box_bit(r,q)+(xl-xa)*(yb-ya);
}
else if (rb > r && rc < r) { // a & c inside: case 3
float3 q ={ xa,xc,ya };
return box_bit(r,q);
}
else if (rb < r && rc < r) { // a, b & c inside: case 4
float xl = (float)sqrt(r*r-yb*yb);
float3 q ={ xl,xc,ya };
return box_bit(r,q) +(xl-xa)*(yb-ya);
}
else printf("unexpected case in box_in_circle p %f %f %f r %f\n",p.x,p.y,p.z,r);
return 0.0;
}
int cyl_fill(float *vox,AOptions &opt,int nxy,int nz,double dxy)
{
double r = opt.set_from_opt("cylrad",F120_XYBin*nxy);
printf("cyl_fill for radius %8.3f nxy %d nz %d\n",r,nxy,nz);
int stride = nxy*nxy;
for (int k=0; k<stride*nz;k++) vox[k] = 0;
int mx = nxy/2;
int my = nxy/2;
float4 p = { 0.0f,0.0f,(float)dxy,(float)dxy };
for (int kx=0; kx<nxy/2; kx++) {
p.x = (float)dxy*kx;
for (int ky=0; ky<nxy/2; ky++){
p.y = (float)dxy*ky;
double val = box_in_circle(r,p)/(dxy*dxy); //normalize to unity per voxel
if(val >0.0 && opt.isset("cyltestpr"))printf("%2d %2d newval %9.5f\n",kx,ky,val);
double dist = sqrt(p.x*p.x + p.y*p.y);
if (dist <= F120_Rmin/sqrt(2.0)){
vox[nxy*(my+ky) +mx+kx] = (float)val;
vox[nxy*(my-ky-1)+mx+kx] = (float)val;
vox[nxy*(my+ky) +mx-kx-1] = (float)val;
vox[nxy*(my-ky-1)+mx-kx-1] = (float)val;
}
}
}
if (opt.isset("cylrange")){
int j = opt.isset("cylrange");
int z0 = opt.iopt(j+1);
int z1 = max(1,z0);
int z2 = opt.iopt(j+2);
for (int z=z1; z<=z2; z++) for (int k=0; k<stride; k++) vox[z*stride+k] = vox[k];
if (z0>0) for (int k=0; k<stride; k++) vox[k] = 0;
printf("cyl z range limited to %d-%d\n",z0,z2);
}
else for (int z=1; z<nz; z++) for (int k=0; k<stride; k++) vox[z*stride+k] = vox[k];
if (opt.isset("cyltest")){
write_raw("cylvox.raw",vox,stride*nz);
return 1;
}
return 0;
}
int const_fill(float *vox,AOptions &opt,int nxy,int nz,float val)
{
int stride = nxy*nxy;
for (int k=0; k<stride*nz; k++) vox[k] = 0.0f;
for (int x=0; x<nxy; x++) for (int y=0; y<nxy; y++) if (roibox_cut(x,y)){
for (int z=0; z<nz; z++) vox[stride*z+(y*nxy+x)]= val;
}
write_raw<float>("cfill_check.raw",vox,stride*nz); // debug
return 0;
}
int bigjob(SMfull &sm,AOptions &opt)
{
int zstride = F120_NXYbins*F120_NXYbins;
float *voxval = mycalloc<float>(zstride*F120_NZbins,"voxval");
if (!voxval)return 1;
if (opt.isset("cylfill")) if(cyl_fill(voxval,opt,F120_NXYbins,F120_NZbins,F120_XYBin)) return 0;
else if (opt.isset("ones")) const_fill(voxval,opt,F120_NXYbins,F120_NZbins,1.0f);
else voxval[47*zstride+64*F120_NXYbins+64] =1.0f; // TODO better phantoms needed!!
// full size lor map here
int stride = F120_NXY*F120_TrueNZ;
float *tsum = NULL;
if (opt.isset("tsum") || opt.isset("mapsum")){
tsum = mycalloc<float>(stride*stride,"tsum/smap");
if (!tsum) return 1;
}
// Compact lor map here. NB Z size based on real detector not long version
float *zdzmap = NULL;
if (opt.isset("zdzmap")){
zdzmap = mycalloc<float>(F120_DZstride*F120_DCstride,"zdzmap");
if (!zdzmap) return 1;
}
//if (compute_forward(sm,voxval,tsum)) return 1;
if (opt.isset("simple")){ // actually this is brocken
int nx = opt.set_from_opt("nx",100);
int ny = opt.set_from_opt("ny",70);
int sector = opt.set_from_opt("sector",-1);
if (simple_check(sm,opt,tsum,nx,ny,sector)) return 1;
}
else if (opt.isset("cuda")) {
//LARGE_INTEGER CudaTime;
//QueryPerformanceCounter(&CudaTime);
cuda_do_forward_projection(sm,opt,voxval,tsum,zdzmap);
//printf("Cuda time %.3f secs\n",timePassed(CudaTime));
}
else {
LARGE_INTEGER ForwardTime;
QueryPerformanceCounter(&ForwardTime);
// do_forward_projection(sm,opt,voxval,tsum,zdzmap);
host_do_forward_project(sm,voxval,zdzmap);
printf("Host Forward time %.3f secs\n",timePassed(ForwardTime));
}
LARGE_INTEGER IOTime;
QueryPerformanceCounter(&IOTime);
if(opt.isset("voxval")) write_raw<float>("voxval.raw",voxval,zstride*F120_NZbins);
if (opt.isset("tsum")) write_raw<float>("tsum.raw",tsum,stride*stride);
if (opt.isset("zdzmap") && !opt.isset("cuda")) {
write_raw<float>("host_small.raw",zdzmap,F120_DZstride*F120_DCstride);
swizzle_buffer(zdzmap,145,288,1176,288,1,288*145);
write_raw<float>("host_swizz.raw",zdzmap,F120_DZstride*F120_DCstride);
}
if (opt.isset("mapsum")){
for (int k = 1; k < stride; k++) for (int j=0;j<stride;j++) tsum[j] += tsum[stride*k+j];
write_raw<float>("mapsum.raw",tsum,stride);
}
//show_full_tsum(tsum);
if(!opt.isset("cuda")) printf("IO time %.3f secs\n",timePassed(IOTime));
if (zdzmap) free(zdzmap);
if (tsum) free(tsum);
if (voxval) free(voxval);
return 0;
}
int make_dc(int c1,int c2)
{
int dc = abs(c2-c1);
if (c1 > c2) dc = F120_NXY-dc; // fix logically negative dc values
//if (dc < F120_DCmin || dc > F120_DCmax) return -1; this check now done in cull program
return dc-F120_DCmin;
}
template <typename T> int swizzle_buffer(T *a,int nz,int ny,int nx,int mz,int my,int mx)
{
// reformat dim[n1,n2,n3] to [m1,m2,m3] ( a permutation of original)
int size = nz*ny*nx;
T *b = (T *)malloc(size*sizeof(T));
if (!b) return 1;
for (int k=0; k<size; k++) b[k] = a[k];
for (int z=0; z<nz; z++) for (int y = 0; y<ny; y++) for (int x=0; x<nx; x++){
int k = (z*ny+y)*nx+x;
int j =z*mz+y*my+x*mx;
a[j] = b[k];
}
free(b);
return 0;
}
int cuda_do_forward_projection(SMfull &sm,AOptions &opt,float *voxval,float *tsum,float *zdzmap)
{
LARGE_INTEGER CudaTime;
QueryPerformanceCounter(&CudaTime);
cudaSM *dev_sm = NULL;
cudaSM host_sm;
VoxMap vm;
hipError_t cudaStatus;
// sm for device
if(setup_cuda_sm(sm,host_sm,&dev_sm)) return 1;
if(setup_cuda_vmap(vm)) return 1;
// sector constants to global device memory
//int *map = vm.amap_x();
//cudaStatus = hipMemcpyToSymbol(dev_map8_x,map,24*sizeof(int));
//if (cudaStatus != hipSuccess) { printf("hipMemcpyToSymbol map8_x failed [%s]",hipGetErrorString(cudaStatus)); return 1; }
//map = vm.amap_y();
//cudaStatus = hipMemcpyToSymbol(dev_map8_y,map,24*sizeof(int));
//if (cudaStatus != hipSuccess) { printf("hipMemcpyToSymbol map8_y failed [%s]",hipGetErrorString(cudaStatus)); return 1; }
//map = vm.amap_c();
//cudaStatus = hipMemcpyToSymbol(dev_map8_c,map,16*sizeof(int));
//if (cudaStatus != hipSuccess) { printf("hipMemcpyToSymbol map8_c failed [%s]",hipGetErrorString(cudaStatus)); return 1; }
// big buffers for device
int zstride = F120_NXYbins*F120_NXYbins;
float *dev_voxval = NULL;
cudaStatus = hipMalloc((void**)&dev_voxval,zstride*F120_NZbins*sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc dev_voxval failed [%s]\n",hipGetErrorString(cudaStatus)); return 1; }
cudaStatus = hipMemcpy(dev_voxval,voxval,zstride*F120_NZbins*sizeof(float),hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { fprintf(stderr,"hipMemcpy to corners failed [%s]",hipGetErrorString(cudaStatus)); return 1; }
float *dev_tsum = NULL;
int stride = F120_NXY*F120_TrueNZ;
cudaStatus = hipMalloc((void**)&dev_tsum,stride*stride*sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc dev_tsum failed [%s]\n",hipGetErrorString(cudaStatus)); return 1; }
cudaStatus = hipMemset(dev_tsum,0,stride*stride*sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMemset to dev_tsum failed [%s]",hipGetErrorString(cudaStatus)); return 1; }
float *dev_zdzmap = NULL;
cudaStatus = hipMalloc((void**)&dev_zdzmap,F120_DZstride*F120_DCstride*sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMalloc dev_zdzmap failed [%s]\n",hipGetErrorString(cudaStatus)); return 1; }
cudaStatus = hipMemset(dev_zdzmap,0,F120_DZstride*F120_DCstride*sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMemset to dev_zdzmap failed [%s]",hipGetErrorString(cudaStatus)); return 1; }
hipDeviceSynchronize();
printf("Cuda setup time %.3f secs\n",timePassed(CudaTime));
// efficiency sums (#voxels) init from external file
float *teffs = NULL;
float *dev_teffs = NULL;
if (make_buffers<float>(&teffs,&dev_teffs,zstride*F120_NZbins,"teffs")) return 1;
if (read_buffers<float>(tvfile,zstride*F120_NZbins,teffs,dev_teffs,1.0f)) return 1;
QueryPerformanceCounter(&CudaTime);
printf("here we cuda go...\n");
// first do forward projection
for (int kv=0; kv<sm.voxels; kv++){
//if (opt.isset("evenfaster")) {
// forward_project_even_faster<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,kv);
//}
if (opt.isset("faster")) {
// forward_project_faster<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,dev_teffs,kv,0);
// forward_project_faster<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,dev_teffs,kv,1);
}
else {
// forward_project<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,kv,0);
// forward_project<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,kv,1);
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) { fprintf(stderr,"forward_project kernel error: [%s]\n",hipGetErrorString(cudaStatus)); return 1; }
}
hipDeviceSynchronize();
printf("Cuda forward kernel time %.3f secs\n",timePassed(CudaTime));
QueryPerformanceCounter(&CudaTime);
// clear device buffer first!!!
cudaStatus = hipMemset(dev_voxval,0,zstride*F120_NZbins*sizeof(float));
if (cudaStatus != hipSuccess) { printf("hipMemset to dev_voxval failed [%s]",hipGetErrorString(cudaStatus)); return 1; }
// then do backward
if (opt.isset("backproj")) for (int kv=0; kv<sm.voxels; kv++){
if (opt.isset("bfast")) {
// backward_project_faster<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,dev_teffs,kv);
//backward_project_faster<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,kv,1);
}
else{
//backward_project<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,kv,0);
//backward_project<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,kv,1);
}
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) { fprintf(stderr,"back_project kernel error: [%s]\n",hipGetErrorString(cudaStatus)); return 1; }
}
hipDeviceSynchronize();
printf("Cuda backward kernel time %.3f secs\n",timePassed(CudaTime));
QueryPerformanceCounter(&CudaTime);
cudaStatus = hipMemcpy(zdzmap,dev_zdzmap,F120_DZstride*F120_DCstride*sizeof(float),hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) { fprintf(stderr,"hipMemcpy to zdzmap failed: %s\n",hipGetErrorString(cudaStatus)); return cudaStatus; }
write_raw<float>("cuda_small.raw",zdzmap,F120_DZstride*F120_DCstride);
// zo yo xo z0->yn yo->xn xo->zn
swizzle_buffer(zdzmap,145,288,1176, 288, 1, 288*145);
write_raw<float>("cuda_swizz.raw",zdzmap,F120_DZstride*F120_DCstride);
if (opt.isset("backproj")){
cudaStatus = hipMemcpy(voxval,dev_voxval,zstride*F120_NZbins*sizeof(float),hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) { fprintf(stderr,"hipMemcpy to voxval failed: %s\n",hipGetErrorString(cudaStatus)); return cudaStatus; }
write_raw<float>("cuda_backp.raw",voxval,zstride*F120_NZbins);
}
hipDeviceSynchronize();
printf("Cuda IO end time time %.3f secs\n",timePassed(CudaTime));
if (dev_zdzmap) hipFree(dev_zdzmap);
if (dev_tsum) hipFree(dev_tsum);
if (dev_voxval) hipFree(dev_voxval);
if (host_sm.lors) hipFree(host_sm.lors);
if (host_sm.v) hipFree(host_sm.v);
return 0;
}
// this version modeled on cuda kernel!
int host_do_forward_project(SMfull &sm,float *voxval,float *zdzmap)
{
hex p;
hex q;
VoxMap vm;
for (int kv =0; kv<sm.voxels; kv++){
if(kv==sm.voxels-1) printf("kv %d\n",kv);
else printf("kv %d\r",kv);
int nlors = sm.v[kv].lors;
p.x = sm.v[kv].nx;
p.y = sm.v[kv].ny;
//int vox_xypos = p.y*F120_NXYbins+p.x;
for (int kl=0; kl<16*nlors;kl++) for(int even=0;even<2;even++){
int ids = kl%16;
int idl = kl/16;
uint key = sm.key(kv,idl);
small_lor_from(key,p);
float val = sm.val(kv,idl,even);
if (p.x==p.y) val *= 0.5f; // fix for diagonal bug in sysmat - present in all sectors thus counted twice in code below.
if (val >0.0f){
int dz = p.z2-p.z1;
vm.hex_to_sector(ids%8,p,q);
if (ids>7) mirror(q,F120_TrueNZ-1+even);
int vox_xypos = q.y*F120_NXYbins+q.x;
//int zsm_offset = 0;
int zsm_offset = (dz*(97-dz))/2; // this is ( 48+47+..) dz terms in sum
for (int sz=0; sz<F120_TrueNZ-dz; sz++){ // zloop (odd)
int z1 = sz;
int vzq = F120_TrueNZ-1+even - 2*(q.z1-z1);
vzq = min(F120_NZbins-1,max(0,vzq));
float qval = voxval[vzq*F120_NXYstride+vox_xypos];
int dcq = make_dc(q.c1,q.c2);
zdzmap[(q.c1+dcq*F120_NXY)*F120_DZstride+(sz+zsm_offset)] += val*qval;
}
}
}
}
return 0;
}
int host_do_backward_projection(SMfull &sm,float *voxval,float *zdzmap)
{
//
// Evaluate the Numerator sum: Sum[v] = SM[t][v] T[t] summing over lors t for each voxel v
//
// use 32 threads per lor each handles primary or mirror and even and odd
//int id = threadIdx.x + blockIdx.x*blockDim.x;
//int idt = id/32; // lor to process same for 8 threads per item
//int ids = id%32; // which tread am i within 8 thread set: lor 8 sectors x 2 for proper & mirror
//int even = (id%32)/16; // 0 or 1
//int idstep = blockDim.x*gridDim.x/32;
hex p;
hex q;
VoxMap vm;
for (int kv =0; kv<sm.voxels; kv++){
int nlors = sm.v[kv].lors;
p.x = sm.v[kv].nx;
p.y = sm.v[kv].ny;
//if (ids==0 && kv == 0 )printf("idt %d nlors %d\n",idt,nlors);
float div[2];
div[0] = 1.0f/sm.v[kv].geff[0];
div[1] = 1.0f/sm.v[kv].geff[1];
//int vox_xypos = p.y*F120_NXYbins+p.x;
for (int kl=0; kl<16*nlors; kl++) for (int even=0; even<2; even++){
int ids = kl%16; // sector+ mirror sector
int idl = kl/16;
uint key = sm.key(kv,idl);
lor_from(key,p);
float val = sm.val(kv,idl,even);
if (p.x==p.y) val *= 0.5f;
//val *= 0.000001f;
val *= div[even];
if (val >0.0f){
int dz = p.z2-p.z1;
vm.hex_to_sector((ids/2)%8,p,q);
if (ids%2) mirror(q,F120_TrueNZ-1+even);
int zsm_offset = (dz*(97-dz))/2; // this is ( 48+47+..) dz terms in sum
int dcq = make_dc(q.c1,q.c2);
int dcq_offset = (q.c1+dcq*F120_NXY)*F120_DZstride;
//int dcq_offset = 10;
//if (id <32 && kv==75) printf("%3d (%3d %3d %3d) %5d %5d\n",ids,q.c1,q.c2,dcq,dcq_offset,zsm_offset);
int sz_max = F120_TrueNZ-dz;
for (int sz=0; sz<sz_max; sz++){ // zloop (odd)
int vzq = F120_TrueNZ-1+even - 2*(q.z1-sz);
vzq = min(F120_NZbins-1,max(0,vzq));
float tqval = zdzmap[dcq_offset+(sz+zsm_offset)];
voxval[vzq*F120_NXYstride+q.y*F120_NXYbins+q.x] += tqval*val;
}
}
}
}
return 0;
}
int map_chop(char *name_in, char *name_out)
{
uint *map = mymalloc<uint>(F120_STride, "swizz in"); // single slice of full dataset
uint *map_bit = map+F120_STride/4;
FILE *fin = fopen(name_in, "rb");
if (!fin) { printf("bad open for %s\n",name_in); return 1; }
FILE *fout = fopen(name_out, "wb");
if (!fout) { printf("bad open for %s\n",name_out); return 1; }
int slice_in = 0;
int slice_out = 0;
for (int z1 = 0; z1<F120_NZ-24; z1++) for (int c1=0;c1<F120_NXY;c1++){
if (fread(map,sizeof(uint),F120_STride,fin) != F120_STride) {printf("bad read for map slice %d\n",slice_in); return 1;}
slice_in++;
if (z1 >= 24) {
fwrite(map_bit,sizeof(uint),F120_STride/2,fout);
slice_out++;
}
}
fclose(fout);
fclose(fin);
printf("map %s chopped to %s s_in %d sout %d\n",name_in,name_out,slice_in,slice_out);
return 0;
}
int map_swizz(char *name_in, char *name_out)
{
uint *map = mymalloc<uint>(F120_STride/2, "swizz in"); // single slice of full dataset
float *zdzmap = mycalloc<float>(F120_DZstride*F120_DCstride,"swizz out ");
if (!zdzmap) return 1;
FILE *fin = fopen(name_in, "rb");
if (!fin) { printf("bad open for %s\n",name_in); return 1; }
int slice = 0;
for (int z1 = 0; z1 < F120_TrueNZ; z1++){
printf("z1 %2d slice %d\n",z1,slice);
for (int c1 = 0; c1 < F120_NXY; c1++){
if (fread(map,sizeof(uint),F120_STride/2,fin) != F120_STride/2) {printf("bad read for map slice %d\n",slice); return 1;}
slice ++;
for (int z2=0;z2<F120_TrueNZ;z2++) for (int c2=0;c2<F120_NXY;c2++){
float val = (float)map[z2*F120_NXY+c2];
if (val > 0.0f){
quad p = {z1,c1,z2,c2};
//p.z1 = z1;
//p.c1 = c1;
//p.z2 = z2;
//p.c2 = c2;
proper_lor(p);
int dz = p.z2-p.z1;
int zsm_offset = (dz*(97-dz))/2; // this is ( 48+47+..) dz terms in sum from cuda code!!
int dcp = make_dc(p.c1,p.c2);
//if(p.z1==5 && p.z2==15) printf("lor (%2d %3d)-(%2d %3d) val %8.1f dcp %3d dz %2d offset %4d\n",p.z1,p.c1,p.z2,p.c2,val,dcp,dz,zsm_offset);
//if(dcp >= 0 && dcp <F120_DCsize) zdzmap[(p.c1+dcp*F120_NXY)*F120_DZstride+(p.z1+zsm_offset)] += val;
if(dcp >= 0 && dcp <F120_DCsize) zdzmap[(p.z1+zsm_offset)*F120_DCstride +(p.c1+dcp*F120_NXY)] += val;
}
}
}
}
fclose(fin);
write_raw<float>(name_out,zdzmap,F120_DZstride*F120_DCstride);
free(zdzmap);
free(map);
return 0;
}
int simple_check(SMfull &sm,AOptions &opt,float *tsum,int nx,int ny,int sector)
{
// NB Z in [0,47] for crystals and [0,94] for voxels CARE
printf("simple check for %d %d sector %d\n",nx,ny,sector);
quad p;
quad q;
quad m; // mirror of p;
VoxMap vm;
//int bugs = 0;
float sm_val0 = 0.0f;
float sm_val1 = 0.0f;
int stride = F120_NXY*F120_TrueNZ;
int dz_cut = opt.set_from_opt("dzcut",1);
printf("dz_cut = %d\n",dz_cut);
//return 1;
for (int kv = 0; kv < sm.voxels; kv++){
//if (sm.v[kv].nx != nx || sm.v[kv].ny != ny) continue;
//printf("found voxel kv=%d lors = %d\n",kv,sm.v[kv].lors);
printf("kv %d\r",kv);
for (int kl = 0; kl < sm.v[kv].lors; kl++){
uint key = sm.key(kv,kl);
lor_from(key,p);
//if (p.z1 != p.z2) continue; // debug!!!!
if (abs(p.z1-p.z2) > dz_cut) continue; // debug!!!!
sm_val0 = sm.val(kv,kl,0);
sm_val1 = sm.val(kv,kl,1);
int dz = p.z2-p.z1;
for (int s=0; s<8; s++){
if (sector>=0 && s != sector) continue; // sector = -1 does all
vm.quad_to_sector(s,p,q);
mirror(q,m,95);
// TODO recover Z vertex necessary for real FP!!!!! (=47/48-z1)
//if (sm_val0> 0.0f) for (int vz=1; vz<F120_NZbins; vz+=2){ //oddds TODO smart limits here
//
// int z1 = q.z1 + (vz - 95)/2;
// int z2 = q.z2 + (vz - 95)/2;
// int z3 = m.z1 + (vz - 95)/2;
// int z4 = m.z2 + (vz - 95)/2;
if (sm_val0> 0.0f) for (int sz=0; sz<F120_TrueNZ-dz; sz++){
int z1 = sz;
int z2 = sz+dz;
int z3 = sz;
int z4 = sz+dz;
//printf("lor %5d (%2d %3d)-(%2d %3d) -> %2d %2d val %9.5f\n",kl,p.z1,p.c1,p.z2,p.c2,z1,z2,sm_val);
//if (z1>=0 && z2<F120_TrueNZ){
tsum[(z1*F120_NXY+q.c1)*stride + z2*F120_NXY+q.c2] += sm_val0;
tsum[(z2*F120_NXY+q.c2)*stride + z1*F120_NXY+q.c1] += sm_val0;
//}
//if (z3>=0 && z4<F120_TrueNZ){
tsum[(z3*F120_NXY+m.c1)*stride + z4*F120_NXY+m.c2] += sm_val0;
tsum[(z4*F120_NXY+m.c2)*stride + z3*F120_NXY+m.c1] += sm_val0;
//}
}
// do evens?
mirror(q,m,96);
//if (sm_val1> 0.0f) for (int vz=0; vz<F120_NZbins; vz+=2){ //evens
// int z1 = q.z1 + (vz - 96)/2;
// int z2 = q.z2 + (vz - 96)/2;
// int z3 = m.z1 + (vz - 96)/2;
// int z4 = m.z2 + (vz - 96)/2;
if (sm_val1> 0.0f) for (int sz=0; sz<F120_TrueNZ-dz; sz++){ //evens
int z1 = sz;
int z2 = sz+dz;
int z3 = sz;
int z4 = sz+dz;
//printf("lor %5d (%2d %3d)-(%2d %3d) -> %2d %2d val %9.5f\n",kl,p.z1,p.c1,p.z2,p.c2,z1,z2,sm_val);
//if (z1>=0 && z2 < F120_TrueNZ){
tsum[(z1*F120_NXY+q.c1)*stride + z2*F120_NXY+q.c2] += sm_val1;
tsum[(z2*F120_NXY+q.c2)*stride + z1*F120_NXY+q.c1] += sm_val1;
//}
//if (z3>=0 && z4 < F120_TrueNZ){
tsum[(z3*F120_NXY+m.c1)*stride + z4*F120_NXY+m.c2] += sm_val1;
tsum[(z4*F120_NXY+m.c2)*stride + z3*F120_NXY+m.c1] += sm_val1;
//}
}
} // end s loop
} // end kl loop
} // end kv loop
return 0;
}
int show_full_tsum(float *tsum)
{
int stride = F120_NXY*F120_TrueNZ;
float *smap = mycalloc<float>(stride*stride,"tsum/smap");
if (!smap) return 1;
int zoffset = 0;
for (int z1=0; z1<F120_TrueNZ; z1++) {
for (int z2=z1; z2<F120_TrueNZ; z2++){
for (int c1=0; c1<F120_NXY; c1++) for (int dc=0; dc<F120_DCsize; dc++){
int c2 = c1+dc+F120_DCmin;
int dz = z2-z1;
smap[(z1*F120_NXY+c1)*stride+(z2*F120_NXY+c2)] =tsum[((zoffset+dz)*F120_NXY+c1)*F120_DCsize+dc];
}
}
zoffset += 48-z1;
}
write_raw<float>("tsum_full.raw",smap,stride*stride);
free(smap);
return 0;
}
int compute_forward(SMfull &sm, float *voxval, float *tsum)
{
// NB Z in [0,47] for crystals and [0,94] for voxels CARE
quad p;
quad m0; // odd mirror of p;
quad m1; // even mirror of p;
VoxMap vm;
int bugs = 0;
for (int kv = 0; kv < sm.voxels; kv++){
if (bugs>0)printf("kv = %d\r",kv);
int xv[8];
int yv[8];
int c1[8];
int c2[8];
// set voxel octet - good for all lors
for (int s=0; s<8; s++) vm.xy_to_sector(s,xv[s],yv[s],sm.v[kv].nx,sm.v[kv].ny);
// now loop over lors for this voxel octet
if (bugs>0 && sm.v[kv].nx==64 && sm.v[kv].ny==64){
printf("octet %d:",kv);
for (int s=0; s<8; s++) printf(" (%d %d)",xv[s],yv[s]);
printf("\n");
}
else printf("%d\r",kv);
for (int kl = 0; kl < sm.v[kv].lors; kl++){
//printf("kl=%d\n",kl);
uint key = sm.key(kv,kl);
lor_from(key,p);
float sm_val0 = sm.val(kv,kl,0);
float sm_val1 = sm.val(kv,kl,1);
int dz = p.z2-p.z1;
//if (p.c1 > p.c2) p.c2 += F120_NXY;
int dc = abs(p.c2-p.c1);
if (p.c1 > p.c2) dc = 288-dc; // fix logically negative dc values
if (dc < F120_DCmin || dc > F120_DCmax) continue; // check now done in cull program
dc -= F120_DCmin;
int m0check = mirror(p,m0,95); // posn in long detector needed here
int m1check = mirror(p,m1,96);
if (bugs>0){
printf("kv/l %d %d p: (%2d %3d)-(%2d %3d) m0: (%2d %3d)-(%2d %3d) m1: (%2d %3d)-(%2d %3d) vals %8.5f %8.5f\n",kv,kl,p.z1,p.c1,p.z2,p.c2,m0.z1,m0.c1,m0.z2,m0.c2,m1.z1,m1.c1,m1.z2,m1.c2,sm_val0,sm_val1);
bugs--;
}
int zoffset = 0;
for (int s=0; s<8; s++){
c1[s] = vm.c_to_sector(s,p.c1);
c2[s] = vm.c_to_sector(s,p.c2);
}
int stride = F120_NXY*F120_TrueNZ;
//swim each tube along z-axis of detector starting at z=0 and ending at 47-dz
for (int zt=0; zt<F120_TrueNZ-dz; zt++){
int p0_zv = 95 - 2*(p.z1-zt); // zv generated at zvbin on 47/48 crystal boundry (voxel z=95)
int p1_zv = 96 - 2*(p.z1-zt); // zv generated at zvbin centre of crystal 48 (voxel z=96)
int m0_zv = 95 - 2*(m0.z1-zt); // care bug fix 24/08/17 mirros keep primary voxel
int m1_zv = 96 - 2*(m1.z1-zt);
if(bugs>0){
printf("zt=%2d raw p %2d %2d, p0 %2d p1 %2d m0 %2d m1 %2d offset %d\n",zt,p.z1,p.z2,p0_zv,p1_zv,m0_zv,m1_zv,zoffset);
bugs--;
}
//if(zv0 < 0 || zv1 < 0) printf("zt=%d z1 %d z2 %d, zv0 %d zv1 %d\n",zt,p.z1,p.z2,zv0,zv1);
for (int s=0; s<8; s++){
if (p0_zv>=0) {
tsum[(zt*F120_NXY+c1[s])*stride + (zt+dz)*F120_NXY+c2[s]] += sm_val0*voxval[(p0_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
tsum[((zt+dz)*F120_NXY+c2[s])*stride + zt*F120_NXY+c1[s]] += sm_val0*voxval[(p0_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
}
if (m0_zv>=0) {
tsum[(zt*F120_NXY+c1[s])*stride + (zt+dz)*F120_NXY+c2[s]] += sm_val0*voxval[(m0_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
tsum[((zt+dz)*F120_NXY+c2[s])*stride + zt*F120_NXY+c1[s]] += sm_val0*voxval[(m0_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
}
//if(p0_zv>=0) tsum[((zoffset+dz)*F120_NXY+p.c1 )*F120_DCsize+dc] += sm_val0*voxval[(p0_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
//if(p1_zv>=0) tsum[((zoffset+dz)*F120_NXY+p.c1 )*F120_DCsize+dc] += sm_val1*voxval[(p1_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
//if(m0_zv>=0) tsum[((zoffset+dz)*F120_NXY+m0.c1)*F120_DCsize+dc] += sm_val0*voxval[(m0_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
//if(m1_zv>=0) tsum[((zoffset+dz)*F120_NXY+m1.c1)*F120_DCsize+dc] += sm_val0*voxval[(m1_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
}
zoffset += (F120_TrueNZ-zt);
}
}
}
printf("\n");
return 0;
}
int roibox_cut(int x, int y)
{
double dx = ( abs(((double)x-63.5)) - 0.5 )*F120_XYBin;
double dy = ( abs(((double)y-63.5)) - 0.5 )*F120_XYBin; // corner closest to origin
double limit = (double)F120_Rmin/sqrt(2.0);
double dist = sqrt(dx*dx+dy*dy);
//if (dist <= limit) return 0;
return (dist <= limit) ? 1 : 0;
}
// this for both device and host
template <typename T> int make_buffers(T **buf_out,T **dev_buf_out, size_t len, char *tag)
{
T *buf = (T *)calloc(len,sizeof(T));
if (!buf) { printf("calloc error %s\n",tag); return 1; }
T *dev_buf = NULL;
hipError_t cudaStatus = hipMalloc((void**)&dev_buf,len*sizeof(T));
if (cudaStatus != hipSuccess) { printf("hipMalloc dev_%s failed [%s]\n",tag,hipGetErrorString(cudaStatus)); return 1; }
cudaStatus = hipMemset(dev_buf,0,len*sizeof(T));
if (cudaStatus != hipSuccess) { printf("hipMemset to dev_%s failed [%s]",tag,hipGetErrorString(cudaStatus)); return 1; }
// hairy pointer syntax thanks to cuda
*buf_out = buf;
*dev_buf_out = dev_buf;
return 0;
}
template <typename T> int read_buffers(char *name, int len, T *h_buf, T *d_buf, T rescale)
{
if (read_raw<T>(name,h_buf,len)) return 1;
if (rescale != (T)1.0) for (int k=0; k<len; k++) h_buf[k] *= rescale;
hipError_t cudaStatus = hipMemcpy(d_buf,h_buf,len*sizeof(T),hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { fprintf(stderr,"hipMemcpy from file %s failed: [%s]",name,hipGetErrorString(cudaStatus)); return 1; }
//printf("read_buffers for %s\n",name);
return 0;
}
template <typename T> int copy_buffer_to(int len, T *h_buf, T *d_buf)
{
hipError_t cudaStatus = hipMemcpy(d_buf,h_buf,len*sizeof(T),hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) { fprintf(stderr,"hipMemcpy from host buffer failed: [%s]",hipGetErrorString(cudaStatus)); return 1; }
return 0;
}
template <typename T> int copy_buffer_from(int len, T *h_buf, T *d_buf)
{
hipError_t cudaStatus = hipMemcpy(h_buf,d_buf,len*sizeof(T),hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) { fprintf(stderr,"hipMemcpy to host buffer failed: [%s]",hipGetErrorString(cudaStatus)); return 1; }
return 0;
}
template <typename T> int clear_buffer(int len, T *d_buf)
{
hipError_t cudaStatus = hipMemset(d_buf,0,len*sizeof(T));
if (cudaStatus != hipSuccess) { printf("hipMemset to d_buf failed: [%s]",hipGetErrorString(cudaStatus)); return 1; }
return 0;
}
int cyl_buffer_fill_normalized(float *vox,double val)
{
int stride = F120_NXYbins*F120_NXYbins;
int nvox = stride*F120_NZbins;
for (int k=0; k<nvox;k++) vox[k] = 0;
int count = 0;
for (int ky=0; ky<F120_NXYbins; ky++) for (int kx=0; kx<F120_NXYbins; kx++) if (roibox_cut(kx, ky)){
for(int kz=0;kz<F120_NZbins;kz++) vox[kz*stride+(F120_NXYbins*ky+kx)] = (float)val;
count += F120_NZbins;
}
float nval = (float)val / (float)count;
for (int k = 0; k<nvox; k++) vox[k] /= (float)(count);
printf("buffer set to %.5e in ROI of %d voxels\n",nval,count);
write_raw<float>("roi_start.raw",vox,nvox);
return 0;
}
// returns times passed since input argument initialised and this call
double timePassed(LARGE_INTEGER &StartingTime)
{
LARGE_INTEGER EndingTime;
QueryPerformanceCounter(&EndingTime);
LARGE_INTEGER Frequency;
LARGE_INTEGER ElapsedMicroseconds;
QueryPerformanceFrequency(&Frequency);
ElapsedMicroseconds.QuadPart = EndingTime.QuadPart - StartingTime.QuadPart;
ElapsedMicroseconds.QuadPart *= 1000000;
ElapsedMicroseconds.QuadPart /= Frequency.QuadPart;
double timesec = 0.000001*ElapsedMicroseconds.QuadPart;
return timesec;
} | 5441905b4563c5e8144b30e95136f52fd7a07626.cu | // reco based on template.cu
// cuda stuff
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <helper_math.h>
#include <device_functions.h>
// generic stuff
#include "mystuff.h"
#include "aoptions.h"
// pet stuff
#include "F120_long_defs.h"
#include "cudapet.h"
#include "lors.h"
#include "smatrix.h"
#include "reco.h"
#include "reco_kernels.h"
// windows stuff
#include <algorithm>
#include <Windows.h>
int roibox_cut(int x, int y);
int compute_forward(SMfull &sm, float *voxvals, float *tsum);
int bigjob(SMfull &sm,AOptions &opt);
double timePassed(LARGE_INTEGER &StartingTime);
int show_full_tsum(float *tsum);
int simple_check(SMfull &sm,AOptions &opt,float *tsum,int nx,int ny,int sector);
int dump_sm(SMfull &sm,AOptions &opt);
int do_forward_projection(SMfull &sm,AOptions &opt,float *voxval,float *tsum,float *zdzmap);
int cuda_do_forward_projection(SMfull &sm,AOptions &opt,float *voxval,float *tsum,float *zdzmap);
int cyl_fill(float *vox,AOptions &opt,int nxy,int nz,double dxy);
int const_fill(float *vox,AOptions &opt,int nxy,int nz,float val);
int setup_cuda_sm(SMfull &sm,cudaSM &host_sm,cudaSM **dev_sm_out);
int setup_cuda_vmap(VoxMap &vm);
template <typename T> int swizzle_buffer(T *a,int n1,int n2,int n3,int m1,int m2,int m3);
int map_swizz(char *name_in, char *name_out);
int map_chop(char *name_in, char *name_out);
int make_dc(int c1,int c2);
int host_do_forward_project(SMfull &sm,float *voxval,float *zdzmap);
int do_mlem(SMfull&sm, AOptions &opt);
template <typename T> int make_buffers(T **buf_out,T **dev_buf_out, size_t len, char *tag);
template <typename T> int read_buffers(char *name, int len, T *h_buf, T *d_buf,T rescale);
template <typename T> int copy_buffer_to(int len, T *h_buf, T *d_buf);
template <typename T> int copy_buffer_from(int len, T *h_buf, T *d_buf);
template <typename T> int clear_buffer(int len, T *d_buf);
int cyl_buffer_fill_normalized(float *vox,double val);
int do_forward(SMfull &sm,char *vol_in,char *zmap_out);
int do_backward(SMfull &sm,char *zmap_in,char *vol_out);
char *smfile ="D:\\data\\syseff.raw";
char *tvfile ="D:\\data\\big_tveff.raw"; //changed back 13/11/17
char *mini_smfile ="D:\\data\\smbigger.raw";
//char *mini_tvfile ="D:\\data\\tvbigger2.raw"; //changed 01/11/17
char *mini_tvfile ="D:\\data\\tvbigger_szk.raw"; //changed 29/11/17
int main(int argc, char *argv[])
{
LARGE_INTEGER StartingTime;
QueryPerformanceCounter(&StartingTime);
if (argc < 2){
printf("Reco - PET reconstuction with complete system matrix\n",F120_ID);
printf("sysmat:filename system matix (default %s)\n",smfile);
printf("mlem MLEM reco, same as OSEM:1\n");
printf("osem:n OSEM n subsets, n=1,2,4 or 8 supported\n");
printf("maxit:n [24/n] Max full OMEM passes\n");
printf("dzcut:val max dz [47 i.e. all]\n");
printf("sector:val use single sector [-1 i.e, all]\n");
printf("one_x:sx use single voxel sx sy [default use all]\n");
printf("one_y:sy set sy [defaults to sx which must be set]\n");
printf("cylfill use cylinder for activity\n");
printf("cylrad cylinder radius [23.0]\n");
printf("cyltest or cyltestpr write active volume and exit, add pr to print\n");
printf("ones file ROI with 1.0f\n");
printf("voxval write active volume\n");
printf("tsum write full tsum dataset\n");
printf("ones fill ROI voxels with 1.0f\n");
printf("mapsum write tsum summed over slices\n");
printf("cuda use cuda!\n");
printf("minivox use small voxel defaults\n");
printf("mapswizz <fin> <fout> convert full map to swizz form\n");
printf("mapchop <fin> <fout> extract mid 48 z-range from 96 map\n");
printf("doforward <vol in> <zdzmap out> do one forward projection\n");
printf("dobackward <zdzmap in> <vol out> do one backward projection\n");
return 0;
}
char cudalog_name[] = "D:\\logfiles\\cudareco.log";
FILE *logfile = fopen(cudalog_name,"a");
if (!logfile) {
logfile = fopen(cudalog_name,"w");
if (logfile) printf("new %s logfile created\n",cudalog_name);
}
if (!logfile) { printf("can't open %s",cudalog_name); return 1; }
fprintf(logfile,"cudareco %s version 2.0 args: ",F120_ID);
for (int k=0; k<argc; k++) fprintf(logfile," %s",argv[k]);
fprintf(logfile,"\n");
AOptions opt(argc,argv,1);
// misc quick options here before open system matrix
if (opt.isset("mapchop")){
int k= opt.isset("mapchop");
return map_chop(argv[k+1], argv[k+2]);
}
if (opt.isset("mapswizz")){
int k= opt.isset("mapswizz");
return map_swizz(argv[k+1], argv[k+2]);
}
char sm_name[256];
if (opt.isset("sysmat")) strcpy(sm_name, opt.get_string("sysmat"));
else if (opt.isset("minivox")) strcpy(sm_name,mini_smfile);
else strcpy(sm_name,smfile);
SMfull sm(sm_name);
if (sm.numlors <10) return 1;
if (opt.isset("doforward"))
{
int k=opt.isset("doforward");
return do_forward(sm,argv[k+1],argv[k+2]);
}
if (opt.isset("dobackward"))
{
int k=opt.isset("dobackward");
return do_backward(sm,argv[k+1],argv[k+2]);
}
if (opt.isset("mlem") || opt.isset("osem")){
return do_mlem(sm,opt);
}
if (opt.isset("cudatest")) {
cudaSM *dev_sm;
cudaSM host_sm;
setup_cuda_sm(sm,host_sm,&dev_sm);
check_sm<<<1,16>>>(dev_sm);
return 0;
}
//printf("A\n");
if (opt.isset("dump")) dump_sm(sm,opt);
else bigjob(sm,opt);
if (opt.isset("cuda")){
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) { printf("Failed to deinitialize the cuda device?? error=%s\n",cudaGetErrorString(cudaStatus)); }
}
printf("Total time %.3f secs\n",timePassed(StartingTime));
fclose(logfile);
return 0;
}
int do_forward(SMfull &sm,char *vol_in,char *zmap_out)
{
LARGE_INTEGER CudaTime;
QueryPerformanceCounter(&CudaTime);
printf("Running MLEM!\n");
//system matrix
cudaSM *dev_sm = NULL;
cudaSM host_sm;
VoxMap vm;
cudaError_t cudaStatus;
int nlors = F120_NXY * F120_DCsize * F120_DZstride; //zdz format
int nvox = F120_NXYbins * F120_NXYbins * F120_NZbins;
// measured actvity (#lors) init from external file
float *vol = NULL;
float *dev_vol = NULL;
if (make_buffers<float>(&vol,&dev_vol,nvox,"vol")) return 1;
if (read_buffers<float>(vol_in,nvox,vol,dev_vol,1.0f)) return 1;
// forward projection (#lors) int with zeros
float *zdzmap = NULL;
float *dev_zdzmap = NULL;
if (make_buffers<float>(&zdzmap,&dev_zdzmap,nlors,"zdzmap")) return 1;
// efficiency sums (#voxels) init from external file
float *teffs = NULL;
float *dev_teffs = NULL;
if (make_buffers<float>(&teffs,&dev_teffs,nvox,"teffs")) return 1;
if (read_buffers<float>(tvfile,nvox,teffs,dev_teffs,1.0f)) return 1;
// sm for device
if (setup_cuda_sm(sm,host_sm,&dev_sm)) return 1;
if (setup_cuda_vmap(vm)) return 1;
// one fp step
for (int kv=0; kv<sm.voxels; kv++){
// current activity => lor lors
//forward_project_faster<<<64,64>>>(dev_sm,dev_vol,dev_zdzmap,dev_teffs,kv,0); //TODO fix this for osem
// forward_project_faster<<<64,64>>>(dev_sm,dev_vol,dev_zdzmap,dev_teffs,kv,1);
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) { fprintf(stderr,"forward_proj kernel error: [%s]\n",cudaGetErrorString(cudaStatus)); return 1; }
printf("forward projection time %.3f secs\n",timePassed(CudaTime));
cudaDeviceSynchronize();
if (copy_buffer_from<float>(nlors,zdzmap,dev_zdzmap))return 1;
write_raw<float>(zmap_out,zdzmap,nlors);
if (vol) free(vol);
if (dev_vol) cudaFree(dev_vol);
if (zdzmap) free(zdzmap);
if (dev_zdzmap) cudaFree(dev_zdzmap);
return 0;
}
int do_backward(SMfull &sm,char *zmap_in,char *vol_out)
{
LARGE_INTEGER CudaTime;
QueryPerformanceCounter(&CudaTime);
printf("Running MLEM!\n");
//system matrix
cudaSM *dev_sm = NULL;
cudaSM host_sm;
VoxMap vm;
cudaError_t cudaStatus;
int nlors = F120_NXY * F120_DCsize * F120_DZstride; //zdz format
int nvox = F120_NXYbins * F120_NXYbins * F120_NZbins;
// measured actvity (#lors) init from external file
float *vol = NULL;
float *dev_vol = NULL;
if (make_buffers<float>(&vol,&dev_vol,nvox,"vol")) return 1;
// forward projection (#lors) int with zeros
float *zdzmap = NULL;
float *dev_zdzmap = NULL;
if (make_buffers<float>(&zdzmap,&dev_zdzmap,nlors,"zdzmap")) return 1;
if (read_buffers<float>(zmap_in,nlors,zdzmap,dev_zdzmap,1.0f)) return 1;
// efficiency sums (#voxels) init from external file
float *teffs = NULL;
float *dev_teffs = NULL;
if (make_buffers<float>(&teffs,&dev_teffs,nvox,"teffs")) return 1;
if (read_buffers<float>(tvfile,nvox,teffs,dev_teffs,1.0f)) return 1;
// sm for device
if (setup_cuda_sm(sm,host_sm,&dev_sm)) return 1;
if (setup_cuda_vmap(vm)) return 1;
// one fp step
for (int kv = 0; kv<sm.voxels; kv++){
// backward_project_faster<<<64,64>>>(dev_sm,dev_vol,dev_zdzmap,dev_teffs,kv); // TODO fix this for OSEM
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) { fprintf(stderr,"backward_proj kernel error: [%s]\n",cudaGetErrorString(cudaStatus)); return 1; }
printf("backward projection time %.3f secs\n",timePassed(CudaTime));
cudaDeviceSynchronize();
if (copy_buffer_from<float>(nvox,vol,dev_vol))return 1;
write_raw<float>(vol_out,vol,nvox);
if (vol) free(vol);
if (dev_vol) cudaFree(dev_vol);
if (zdzmap) free(zdzmap);
if (dev_zdzmap) cudaFree(dev_zdzmap);
return 0;
}
// osem added 28/10/17
int do_mlem(SMfull &sm, AOptions &opt)
{
LARGE_INTEGER CudaTime;
LARGE_INTEGER FpTime; float Fpsum = 0.0f;
LARGE_INTEGER BpTime; float Bpsum = 0.0f;
LARGE_INTEGER VfTime; //float Vfsum = 0.0f;
LARGE_INTEGER LfTime; //float Lfsum = 0.0f;
LARGE_INTEGER RunTime;
QueryPerformanceCounter(&CudaTime);
QueryPerformanceCounter(&RunTime);
int osem = opt.set_from_opt("osem",1);
//if (osem !=1){ printf("sorry osem is broken at the moment - using mlem\n"); osem = 1; }
if(osem==1)printf("Running MLEM!\n");
else printf("Running OSEM %d subsets!\n",osem);
//system matrix
cudaSM *dev_sm = NULL;
cudaSM host_sm;
VoxMap vm;
cudaError_t cudaStatus;
int nlors = F120_NXY * F120_DCsize * F120_DZstride;
//int nvox = F120_NXYbins * F120_NXYbins * F120_NZbins; //this for cartesian 128*128*95
int nvox = F120_SZKsize; //this for szk 8*95*1661
int big_nvox = nvox*(1+2+4+8); // TODO just store required subsets?
// measured actvity (#lors) init from external file
float *meas = NULL;
float *dev_meas = NULL;
if (make_buffers<float>(&meas,&dev_meas,nlors,"meas")) return 1;
if (read_buffers<float>("measured.raw",nlors,meas,dev_meas,1.0f)) return 1;
// forward projection (#lors) int with zeros
float *fproj = NULL;
float *dev_fproj = NULL;
if (make_buffers<float>(&fproj,&dev_fproj,nlors,"fproj")) return 1;
// backward projection (#voxels) int with zeros
float *bproj = NULL;
float *dev_bproj = NULL;
if (make_buffers<float>(&bproj,&dev_bproj,nvox,"bproj")) return 1;
// estimated activity (#voxels) init using measured lors (maybe use bp not uniform)
float *act = NULL;
float *dev_act = NULL;
if (make_buffers<float>(&act,&dev_act,nvox,"act")) return 1; // this clears dev buffer
// efficiency sums (#voxels) init from external file
float *teffs = NULL;
float *dev_teffs = NULL;
if (make_buffers<float>(&teffs,&dev_teffs,big_nvox,"teffs")) return 1;
if (opt.isset("minivox")){ if(read_buffers<float>(mini_tvfile,big_nvox,teffs,dev_teffs,1.0f)) return 1; }
else if (read_buffers<float>(tvfile,big_nvox,teffs,dev_teffs,1.0f)) return 1;
//for (int k=0; k<nvox; k++) teffs[k] = 1.0f;
// sm for device
if(setup_cuda_sm(sm,host_sm,&dev_sm)) return 1;
if(setup_cuda_vmap(vm)) return 1;
// check for existing roi_start file
if (read_raw_quiet<float>("roi_start.raw", act, nvox)==0){
copy_buffer_to<float>(nvox,act,dev_act);
printf("activity initialised from roi_start.raw\n");
}
else{
// use back projection for initialization of buffer instead of constant filling.
QueryPerformanceCounter(&BpTime);
for (int kv = 0; kv<sm.voxels; kv++){
backward_project_faster<<<64,64>>>(dev_sm,dev_act,dev_meas,1,0,kv); // back proj measured lors to activity.
}
cudaDeviceSynchronize();
printf("initial bp time %.3f secs\n",timePassed(BpTime));
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) { fprintf(stderr,"initial backward_proj kernel error: [%s]\n",cudaGetErrorString(cudaStatus)); return 1; }
if(copy_buffer_from<float>(nvox,act,dev_act))return 1;
write_raw<float>("roi_start.raw",act,nvox);
}
char name[256];
int maxit = 24/osem;
if (opt.isset("maxit")) maxit = opt.set_from_opt("maxit",maxit);
char bugname[256];
//return 0;
for (int iter=0; iter<maxit; iter++) for (int osem_set = 0; osem_set<osem; osem_set++) {
if (clear_buffer(nlors,dev_fproj)) return 1;
QueryPerformanceCounter(&FpTime);
for (int kv=0; kv<sm.voxels; kv++){
// current activity => lor lors
//forward_project_faster<64><<<64,64>>>(dev_sm,dev_act,dev_fproj,osem,osem_set,kv,0);
forward_project_faster<128><<<64,128>>>(dev_sm,dev_act,dev_fproj,osem,osem_set,kv,0);
//cudaStatus = cudaGetLastError();
//if (cudaStatus != cudaSuccess) { fprintf(stderr,"forward_project kernel error it %d: [%s] kv %d even 0\n",iter,cudaGetErrorString(cudaStatus),kv); return 1; }
//forward_project_faster<64><<<64,64>>>(dev_sm,dev_act,dev_fproj,osem,osem_set,kv,1);
forward_project_faster<128><<<64,128>>>(dev_sm,dev_act,dev_fproj,osem,osem_set,kv,1);
//cudaStatus = cudaGetLastError();
//if (cudaStatus != cudaSuccess) { fprintf(stderr,"forward_project kernel error it %d: [%s] kv %d even 1\n",iter,cudaGetErrorString(cudaStatus),kv); return 1; }
}
cudaDeviceSynchronize();
printf("fp time %.3f secs\n",timePassed(FpTime));
Fpsum += (float)timePassed(FpTime);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) { fprintf(stderr,"forward_project kernel error it %d: [%s]\n",iter,cudaGetErrorString(cudaStatus)); return 1; }
if (0){
cudaDeviceSynchronize();
sprintf(bugname,"fpdone%2.2d.raw",iter);
if (copy_buffer_from<float>(nlors,fproj,dev_fproj))return 1;
cudaDeviceSynchronize();
write_raw<float>(bugname,fproj,nlors);
}
QueryPerformanceCounter(&LfTime);
lor_factors <<<F120_DCsize,256>>>(dev_meas,dev_fproj,osem,osem_set);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) { fprintf(stderr,"lor_factors kernel error it %d: [%s]\n",iter,cudaGetErrorString(cudaStatus)); return 1; }
//cudaDeviceSynchronize();
//printf("lf time %.3f secs\n",timePassed(LfTime));
// Lfsum += timePassed(LfTime);
if (0){
cudaDeviceSynchronize();
sprintf(bugname,"lfdone%2.2d.raw",iter);
if (copy_buffer_from<float>(nlors,fproj,dev_fproj))return 1;
cudaDeviceSynchronize();
write_raw<float>(bugname,fproj,nlors);
}
QueryPerformanceCounter(&BpTime);
for (int kv = 0; kv<sm.voxels; kv++){
backward_project_faster<<<64,64>>>(dev_sm,dev_bproj,dev_fproj,osem,osem_set,kv);
//backward_project_faster2<<<64,64>>>(dev_sm,dev_bproj,dev_fproj,osem,osem_set,kv,0); // back proj measured lors to activity.
//backward_project_faster2<<<64,64>>>(dev_sm,dev_bproj,dev_fproj,osem,osem_set,kv,1); // back proj measured lors to activity.
}
cudaDeviceSynchronize();
printf("bp time %.3f secs\n",timePassed(BpTime));
Bpsum += (float)timePassed(BpTime);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) { fprintf(stderr,"backward_proj kernel error it %d: [%s]\n",iter,cudaGetErrorString(cudaStatus)); return 1; }
if (0){
cudaDeviceSynchronize();
sprintf(bugname,"bpdone%2.2d.raw",iter);
if (copy_buffer_from<float>(nvox,bproj,dev_bproj))return 1;
cudaDeviceSynchronize();
write_raw<float>(bugname,bproj,nvox);
}
QueryPerformanceCounter(&VfTime);
vox_factors<<<128,256>>>(dev_teffs,dev_act,dev_bproj,osem,osem_set);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) { fprintf(stderr,"vox_factors kernel error it %d: [%s]\n",iter,cudaGetErrorString(cudaStatus)); return 1; }
//cudaDeviceSynchronize();
//printf("vf time %.3f secs\n",timePassed(VfTime));
// VFsum += timePassed(VfTime);
clear_buffer<float>(nlors,dev_fproj); // BUG fix 16/10/17
clear_buffer<float>(nvox,dev_bproj);
// thats it!
cudaDeviceSynchronize();
if (maxit < 6 || (iter+1)%5==0 || iter+1 == maxit){
if (osem==1){
sprintf(name,"mlem%2.2d.raw",iter+1);
if (copy_buffer_from<float>(nvox,act,dev_act))return 1;
write_raw<float>(name,act,nvox);
}
else if (osem_set==osem-1){
sprintf(name,"osem%2.2d_subset%2.2d_iter%2.2d.raw",osem,osem_set+1,iter+1);
if (copy_buffer_from<float>(nvox,act,dev_act))return 1;
write_raw<float>(name,act,nvox);
}
}
}
if (meas) free(meas);
if (dev_meas) cudaFree(dev_meas);
if (fproj) free(fproj);
if (dev_fproj) cudaFree(dev_fproj);
if (bproj) free(bproj);
if (dev_bproj) cudaFree(dev_bproj);
if (act) free(act);
if (dev_act) cudaFree(dev_act);
if (teffs) free(teffs);
if (dev_teffs) cudaFree(dev_teffs);
printf("total times mlem %.3f, fp %.3f, bp %.3f secs\n",timePassed(RunTime),Fpsum,Bpsum);
return 0;
}
int setup_cuda_vmap(VoxMap &vm)
{
// sector constants to global device memory
int *map = vm.amap_x();
cudaError_t cudaStatus;
cudaStatus = cudaMemcpyToSymbol(dev_map8_x,map,24*sizeof(int));
if (cudaStatus != cudaSuccess) { printf("cudaMemcpyToSymbol map8_x failed [%s]",cudaGetErrorString(cudaStatus)); return 1; }
map = vm.amap_y();
cudaStatus = cudaMemcpyToSymbol(dev_map8_y,map,24*sizeof(int));
if (cudaStatus != cudaSuccess) { printf("cudaMemcpyToSymbol map8_y failed [%s]",cudaGetErrorString(cudaStatus)); return 1; }
map = vm.amap_c();
cudaStatus = cudaMemcpyToSymbol(dev_map8_c,map,16*sizeof(int));
if (cudaStatus != cudaSuccess) { printf("cudaMemcpyToSymbol map8_c failed [%s]",cudaGetErrorString(cudaStatus)); return 1; }
return 0;
}
int setup_cuda_sm(SMfull &sm,cudaSM &host_sm,cudaSM **dev_sm_out)
{
// allocate actual sm buffer on device
cudaSM *dev_sm = NULL;
cudaError_t cudaStatus = cudaMalloc((void**)&dev_sm,sizeof(cudaSM));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc dev_sm failed [%s]\n",cudaGetErrorString(cudaStatus)); return 1; }
// mirror copy of dev_sm on host
//cudaSM host_sm;
host_sm.voxels = sm.voxels;
cudaStatus = cudaMalloc((void**)&host_sm.v,sm.voxels*sizeof(SMfull_vox));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc for sm.voxels failed [%s]\n",cudaGetErrorString(cudaStatus)); return 1; }
// copy voxels to device pointer
cudaStatus = cudaMemcpy(host_sm.v,sm.v,sm.voxels*sizeof(SMfull_vox),cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy to sm.v failed [%s]",cudaGetErrorString(cudaStatus)); return 1; }
host_sm.numlors = sm.numlors;
cudaStatus = cudaMalloc((void**)&host_sm.lors,sm.numlors*sizeof(smlor));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc for sm.lors failed [%s]\n",cudaGetErrorString(cudaStatus)); return 1; }
// copy lors to device pointer
cudaStatus = cudaMemcpy(host_sm.lors,sm.lors,sm.numlors*sizeof(smlor),cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy to sm.lors failed [%s]",cudaGetErrorString(cudaStatus)); return 1; }
// copy struct to device
cudaStatus = cudaMemcpy(dev_sm,&host_sm,sizeof(cudaSM),cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { printf("cudaMemcpy to dev_sm failed [%s]",cudaGetErrorString(cudaStatus)); return 1; }
*dev_sm_out = dev_sm;
return 0;
}
int dump_sm(SMfull &sm,AOptions &opt)
{
int nx = opt.set_from_opt("nx",64);
int ny = opt.set_from_opt("ny",64);
quad p;
for (int kv = 0; kv < sm.voxels; kv++){
if (sm.v[kv].nx != nx || sm.v[kv].ny != ny) continue;
printf("found voxel %d %d at kv=%d with lors = %d\n",nx,ny,kv,sm.v[kv].lors);
for (int kl = 0; kl < sm.v[kv].lors; kl++){
uint key = sm.key(kv,kl);
small_lor_from(key,p);
printf("%5d (%2d %3d)-(%2d %3d) %9.5f %9.5f\n",kl,p.z1,p.c1,p.z2,p.c2,sm.val(kv,kl,0),sm.val(kv,kl,1));
}
}
return 0;
}
double box_bit(double r,float3 &p)
{
// return area under arc for x [p.x,p.y] within box base p.z
double a = p.x;
double b = p.y;
double theta_a = asin(a/r);
double theta_b = asin(b/r);
double area = r*r*0.5*(theta_b - theta_a + 0.5*(sin(2.0*theta_b)-sin(2.0*theta_a)));
area -= p.z*(b-a); // remove contibution below p.z
return area;
}
double box_in_circle(double r,float4 &p)
{
// float4 format {x0,y0,dx,dy}
// |
// case 0 none inside | b-----d
// case 1 just (a) inside | | |
// case 2 both (a) & (b) inside | | |
// case 3 both (a) & (c) inside | a-----c
// case 4 (a) (b) & (c) inside |
// case 5 all inside 0-------------------
float xa = p.x;
float ya = p.y;
double ra = sqrt(xa*xa+ya*ya);
float xb = xa;
float yb = ya+p.w;
double rb = sqrt(xb*xb+yb*yb);
float xc = xa+p.z;
float yc = ya;
double rc = sqrt(xc*xc+yc*yc);
float xd = xc;
float yd = yb;
double rd =sqrt(xd*xd+yd*yd);
if (rd < r ) return p.z*p.w; // inside: easy case 5;
else if (ra >= r) return 0.0; // outside: easy case 0;
else if (rb > r && rc > r) { // a inside: case 1
float xh = (float)sqrt(r*r-ya*ya);
float3 q ={ xa,xh,ya };
return box_bit(r,q);
}
else if (rb < r && rc > r) { // a & b inside: case 2
float xl = (float)sqrt(r*r-yb*yb);
float xh = (float)sqrt(r*r-ya*ya);
float3 q ={ xl,xh,ya };
return box_bit(r,q)+(xl-xa)*(yb-ya);
}
else if (rb > r && rc < r) { // a & c inside: case 3
float3 q ={ xa,xc,ya };
return box_bit(r,q);
}
else if (rb < r && rc < r) { // a, b & c inside: case 4
float xl = (float)sqrt(r*r-yb*yb);
float3 q ={ xl,xc,ya };
return box_bit(r,q) +(xl-xa)*(yb-ya);
}
else printf("unexpected case in box_in_circle p %f %f %f r %f\n",p.x,p.y,p.z,r);
return 0.0;
}
int cyl_fill(float *vox,AOptions &opt,int nxy,int nz,double dxy)
{
double r = opt.set_from_opt("cylrad",F120_XYBin*nxy);
printf("cyl_fill for radius %8.3f nxy %d nz %d\n",r,nxy,nz);
int stride = nxy*nxy;
for (int k=0; k<stride*nz;k++) vox[k] = 0;
int mx = nxy/2;
int my = nxy/2;
float4 p = { 0.0f,0.0f,(float)dxy,(float)dxy };
for (int kx=0; kx<nxy/2; kx++) {
p.x = (float)dxy*kx;
for (int ky=0; ky<nxy/2; ky++){
p.y = (float)dxy*ky;
double val = box_in_circle(r,p)/(dxy*dxy); //normalize to unity per voxel
if(val >0.0 && opt.isset("cyltestpr"))printf("%2d %2d newval %9.5f\n",kx,ky,val);
double dist = sqrt(p.x*p.x + p.y*p.y);
if (dist <= F120_Rmin/sqrt(2.0)){
vox[nxy*(my+ky) +mx+kx] = (float)val;
vox[nxy*(my-ky-1)+mx+kx] = (float)val;
vox[nxy*(my+ky) +mx-kx-1] = (float)val;
vox[nxy*(my-ky-1)+mx-kx-1] = (float)val;
}
}
}
if (opt.isset("cylrange")){
int j = opt.isset("cylrange");
int z0 = opt.iopt(j+1);
int z1 = max(1,z0);
int z2 = opt.iopt(j+2);
for (int z=z1; z<=z2; z++) for (int k=0; k<stride; k++) vox[z*stride+k] = vox[k];
if (z0>0) for (int k=0; k<stride; k++) vox[k] = 0;
printf("cyl z range limited to %d-%d\n",z0,z2);
}
else for (int z=1; z<nz; z++) for (int k=0; k<stride; k++) vox[z*stride+k] = vox[k];
if (opt.isset("cyltest")){
write_raw("cylvox.raw",vox,stride*nz);
return 1;
}
return 0;
}
int const_fill(float *vox,AOptions &opt,int nxy,int nz,float val)
{
int stride = nxy*nxy;
for (int k=0; k<stride*nz; k++) vox[k] = 0.0f;
for (int x=0; x<nxy; x++) for (int y=0; y<nxy; y++) if (roibox_cut(x,y)){
for (int z=0; z<nz; z++) vox[stride*z+(y*nxy+x)]= val;
}
write_raw<float>("cfill_check.raw",vox,stride*nz); // debug
return 0;
}
int bigjob(SMfull &sm,AOptions &opt)
{
int zstride = F120_NXYbins*F120_NXYbins;
float *voxval = mycalloc<float>(zstride*F120_NZbins,"voxval");
if (!voxval)return 1;
if (opt.isset("cylfill")) if(cyl_fill(voxval,opt,F120_NXYbins,F120_NZbins,F120_XYBin)) return 0;
else if (opt.isset("ones")) const_fill(voxval,opt,F120_NXYbins,F120_NZbins,1.0f);
else voxval[47*zstride+64*F120_NXYbins+64] =1.0f; // TODO better phantoms needed!!
// full size lor map here
int stride = F120_NXY*F120_TrueNZ;
float *tsum = NULL;
if (opt.isset("tsum") || opt.isset("mapsum")){
tsum = mycalloc<float>(stride*stride,"tsum/smap");
if (!tsum) return 1;
}
// Compact lor map here. NB Z size based on real detector not long version
float *zdzmap = NULL;
if (opt.isset("zdzmap")){
zdzmap = mycalloc<float>(F120_DZstride*F120_DCstride,"zdzmap");
if (!zdzmap) return 1;
}
//if (compute_forward(sm,voxval,tsum)) return 1;
if (opt.isset("simple")){ // actually this is brocken
int nx = opt.set_from_opt("nx",100);
int ny = opt.set_from_opt("ny",70);
int sector = opt.set_from_opt("sector",-1);
if (simple_check(sm,opt,tsum,nx,ny,sector)) return 1;
}
else if (opt.isset("cuda")) {
//LARGE_INTEGER CudaTime;
//QueryPerformanceCounter(&CudaTime);
cuda_do_forward_projection(sm,opt,voxval,tsum,zdzmap);
//printf("Cuda time %.3f secs\n",timePassed(CudaTime));
}
else {
LARGE_INTEGER ForwardTime;
QueryPerformanceCounter(&ForwardTime);
// do_forward_projection(sm,opt,voxval,tsum,zdzmap);
host_do_forward_project(sm,voxval,zdzmap);
printf("Host Forward time %.3f secs\n",timePassed(ForwardTime));
}
LARGE_INTEGER IOTime;
QueryPerformanceCounter(&IOTime);
if(opt.isset("voxval")) write_raw<float>("voxval.raw",voxval,zstride*F120_NZbins);
if (opt.isset("tsum")) write_raw<float>("tsum.raw",tsum,stride*stride);
if (opt.isset("zdzmap") && !opt.isset("cuda")) {
write_raw<float>("host_small.raw",zdzmap,F120_DZstride*F120_DCstride);
swizzle_buffer(zdzmap,145,288,1176,288,1,288*145);
write_raw<float>("host_swizz.raw",zdzmap,F120_DZstride*F120_DCstride);
}
if (opt.isset("mapsum")){
for (int k = 1; k < stride; k++) for (int j=0;j<stride;j++) tsum[j] += tsum[stride*k+j];
write_raw<float>("mapsum.raw",tsum,stride);
}
//show_full_tsum(tsum);
if(!opt.isset("cuda")) printf("IO time %.3f secs\n",timePassed(IOTime));
if (zdzmap) free(zdzmap);
if (tsum) free(tsum);
if (voxval) free(voxval);
return 0;
}
int make_dc(int c1,int c2)
{
int dc = abs(c2-c1);
if (c1 > c2) dc = F120_NXY-dc; // fix logically negative dc values
//if (dc < F120_DCmin || dc > F120_DCmax) return -1; this check now done in cull program
return dc-F120_DCmin;
}
template <typename T> int swizzle_buffer(T *a,int nz,int ny,int nx,int mz,int my,int mx)
{
// reformat dim[n1,n2,n3] to [m1,m2,m3] ( a permutation of original)
int size = nz*ny*nx;
T *b = (T *)malloc(size*sizeof(T));
if (!b) return 1;
for (int k=0; k<size; k++) b[k] = a[k];
for (int z=0; z<nz; z++) for (int y = 0; y<ny; y++) for (int x=0; x<nx; x++){
int k = (z*ny+y)*nx+x;
int j =z*mz+y*my+x*mx;
a[j] = b[k];
}
free(b);
return 0;
}
int cuda_do_forward_projection(SMfull &sm,AOptions &opt,float *voxval,float *tsum,float *zdzmap)
{
LARGE_INTEGER CudaTime;
QueryPerformanceCounter(&CudaTime);
cudaSM *dev_sm = NULL;
cudaSM host_sm;
VoxMap vm;
cudaError_t cudaStatus;
// sm for device
if(setup_cuda_sm(sm,host_sm,&dev_sm)) return 1;
if(setup_cuda_vmap(vm)) return 1;
// sector constants to global device memory
//int *map = vm.amap_x();
//cudaStatus = cudaMemcpyToSymbol(dev_map8_x,map,24*sizeof(int));
//if (cudaStatus != cudaSuccess) { printf("cudaMemcpyToSymbol map8_x failed [%s]",cudaGetErrorString(cudaStatus)); return 1; }
//map = vm.amap_y();
//cudaStatus = cudaMemcpyToSymbol(dev_map8_y,map,24*sizeof(int));
//if (cudaStatus != cudaSuccess) { printf("cudaMemcpyToSymbol map8_y failed [%s]",cudaGetErrorString(cudaStatus)); return 1; }
//map = vm.amap_c();
//cudaStatus = cudaMemcpyToSymbol(dev_map8_c,map,16*sizeof(int));
//if (cudaStatus != cudaSuccess) { printf("cudaMemcpyToSymbol map8_c failed [%s]",cudaGetErrorString(cudaStatus)); return 1; }
// big buffers for device
int zstride = F120_NXYbins*F120_NXYbins;
float *dev_voxval = NULL;
cudaStatus = cudaMalloc((void**)&dev_voxval,zstride*F120_NZbins*sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc dev_voxval failed [%s]\n",cudaGetErrorString(cudaStatus)); return 1; }
cudaStatus = cudaMemcpy(dev_voxval,voxval,zstride*F120_NZbins*sizeof(float),cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { fprintf(stderr,"cudaMemcpy to corners failed [%s]",cudaGetErrorString(cudaStatus)); return 1; }
float *dev_tsum = NULL;
int stride = F120_NXY*F120_TrueNZ;
cudaStatus = cudaMalloc((void**)&dev_tsum,stride*stride*sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc dev_tsum failed [%s]\n",cudaGetErrorString(cudaStatus)); return 1; }
cudaStatus = cudaMemset(dev_tsum,0,stride*stride*sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMemset to dev_tsum failed [%s]",cudaGetErrorString(cudaStatus)); return 1; }
float *dev_zdzmap = NULL;
cudaStatus = cudaMalloc((void**)&dev_zdzmap,F120_DZstride*F120_DCstride*sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc dev_zdzmap failed [%s]\n",cudaGetErrorString(cudaStatus)); return 1; }
cudaStatus = cudaMemset(dev_zdzmap,0,F120_DZstride*F120_DCstride*sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMemset to dev_zdzmap failed [%s]",cudaGetErrorString(cudaStatus)); return 1; }
cudaDeviceSynchronize();
printf("Cuda setup time %.3f secs\n",timePassed(CudaTime));
// efficiency sums (#voxels) init from external file
float *teffs = NULL;
float *dev_teffs = NULL;
if (make_buffers<float>(&teffs,&dev_teffs,zstride*F120_NZbins,"teffs")) return 1;
if (read_buffers<float>(tvfile,zstride*F120_NZbins,teffs,dev_teffs,1.0f)) return 1;
QueryPerformanceCounter(&CudaTime);
printf("here we cuda go...\n");
// first do forward projection
for (int kv=0; kv<sm.voxels; kv++){
//if (opt.isset("evenfaster")) {
// forward_project_even_faster<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,kv);
//}
if (opt.isset("faster")) {
// forward_project_faster<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,dev_teffs,kv,0);
// forward_project_faster<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,dev_teffs,kv,1);
}
else {
// forward_project<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,kv,0);
// forward_project<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,kv,1);
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) { fprintf(stderr,"forward_project kernel error: [%s]\n",cudaGetErrorString(cudaStatus)); return 1; }
}
cudaDeviceSynchronize();
printf("Cuda forward kernel time %.3f secs\n",timePassed(CudaTime));
QueryPerformanceCounter(&CudaTime);
// clear device buffer first!!!
cudaStatus = cudaMemset(dev_voxval,0,zstride*F120_NZbins*sizeof(float));
if (cudaStatus != cudaSuccess) { printf("cudaMemset to dev_voxval failed [%s]",cudaGetErrorString(cudaStatus)); return 1; }
// then do backward
if (opt.isset("backproj")) for (int kv=0; kv<sm.voxels; kv++){
if (opt.isset("bfast")) {
// backward_project_faster<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,dev_teffs,kv);
//backward_project_faster<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,kv,1);
}
else{
//backward_project<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,kv,0);
//backward_project<<<64,64>>>(dev_sm,dev_voxval,dev_zdzmap,kv,1);
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) { fprintf(stderr,"back_project kernel error: [%s]\n",cudaGetErrorString(cudaStatus)); return 1; }
}
cudaDeviceSynchronize();
printf("Cuda backward kernel time %.3f secs\n",timePassed(CudaTime));
QueryPerformanceCounter(&CudaTime);
cudaStatus = cudaMemcpy(zdzmap,dev_zdzmap,F120_DZstride*F120_DCstride*sizeof(float),cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) { fprintf(stderr,"cudaMemcpy to zdzmap failed: %s\n",cudaGetErrorString(cudaStatus)); return cudaStatus; }
write_raw<float>("cuda_small.raw",zdzmap,F120_DZstride*F120_DCstride);
// zo yo xo z0->yn yo->xn xo->zn
swizzle_buffer(zdzmap,145,288,1176, 288, 1, 288*145);
write_raw<float>("cuda_swizz.raw",zdzmap,F120_DZstride*F120_DCstride);
if (opt.isset("backproj")){
cudaStatus = cudaMemcpy(voxval,dev_voxval,zstride*F120_NZbins*sizeof(float),cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) { fprintf(stderr,"cudaMemcpy to voxval failed: %s\n",cudaGetErrorString(cudaStatus)); return cudaStatus; }
write_raw<float>("cuda_backp.raw",voxval,zstride*F120_NZbins);
}
cudaDeviceSynchronize();
printf("Cuda IO end time time %.3f secs\n",timePassed(CudaTime));
if (dev_zdzmap) cudaFree(dev_zdzmap);
if (dev_tsum) cudaFree(dev_tsum);
if (dev_voxval) cudaFree(dev_voxval);
if (host_sm.lors) cudaFree(host_sm.lors);
if (host_sm.v) cudaFree(host_sm.v);
return 0;
}
// this version modeled on cuda kernel!
int host_do_forward_project(SMfull &sm,float *voxval,float *zdzmap)
{
hex p;
hex q;
VoxMap vm;
for (int kv =0; kv<sm.voxels; kv++){
if(kv==sm.voxels-1) printf("kv %d\n",kv);
else printf("kv %d\r",kv);
int nlors = sm.v[kv].lors;
p.x = sm.v[kv].nx;
p.y = sm.v[kv].ny;
//int vox_xypos = p.y*F120_NXYbins+p.x;
for (int kl=0; kl<16*nlors;kl++) for(int even=0;even<2;even++){
int ids = kl%16;
int idl = kl/16;
uint key = sm.key(kv,idl);
small_lor_from(key,p);
float val = sm.val(kv,idl,even);
if (p.x==p.y) val *= 0.5f; // fix for diagonal bug in sysmat - present in all sectors thus counted twice in code below.
if (val >0.0f){
int dz = p.z2-p.z1;
vm.hex_to_sector(ids%8,p,q);
if (ids>7) mirror(q,F120_TrueNZ-1+even);
int vox_xypos = q.y*F120_NXYbins+q.x;
//int zsm_offset = 0;
int zsm_offset = (dz*(97-dz))/2; // this is ( 48+47+..) dz terms in sum
for (int sz=0; sz<F120_TrueNZ-dz; sz++){ // zloop (odd)
int z1 = sz;
int vzq = F120_TrueNZ-1+even - 2*(q.z1-z1);
vzq = min(F120_NZbins-1,max(0,vzq));
float qval = voxval[vzq*F120_NXYstride+vox_xypos];
int dcq = make_dc(q.c1,q.c2);
zdzmap[(q.c1+dcq*F120_NXY)*F120_DZstride+(sz+zsm_offset)] += val*qval;
}
}
}
}
return 0;
}
int host_do_backward_projection(SMfull &sm,float *voxval,float *zdzmap)
{
//
// Evaluate the Numerator sum: Sum[v] = SM[t][v] T[t] summing over lors t for each voxel v
//
// use 32 threads per lor each handles primary or mirror and even and odd
//int id = threadIdx.x + blockIdx.x*blockDim.x;
//int idt = id/32; // lor to process same for 8 threads per item
//int ids = id%32; // which tread am i within 8 thread set: lor 8 sectors x 2 for proper & mirror
//int even = (id%32)/16; // 0 or 1
//int idstep = blockDim.x*gridDim.x/32;
hex p;
hex q;
VoxMap vm;
for (int kv =0; kv<sm.voxels; kv++){
int nlors = sm.v[kv].lors;
p.x = sm.v[kv].nx;
p.y = sm.v[kv].ny;
//if (ids==0 && kv == 0 )printf("idt %d nlors %d\n",idt,nlors);
float div[2];
div[0] = 1.0f/sm.v[kv].geff[0];
div[1] = 1.0f/sm.v[kv].geff[1];
//int vox_xypos = p.y*F120_NXYbins+p.x;
for (int kl=0; kl<16*nlors; kl++) for (int even=0; even<2; even++){
int ids = kl%16; // sector+ mirror sector
int idl = kl/16;
uint key = sm.key(kv,idl);
lor_from(key,p);
float val = sm.val(kv,idl,even);
if (p.x==p.y) val *= 0.5f;
//val *= 0.000001f;
val *= div[even];
if (val >0.0f){
int dz = p.z2-p.z1;
vm.hex_to_sector((ids/2)%8,p,q);
if (ids%2) mirror(q,F120_TrueNZ-1+even);
int zsm_offset = (dz*(97-dz))/2; // this is ( 48+47+..) dz terms in sum
int dcq = make_dc(q.c1,q.c2);
int dcq_offset = (q.c1+dcq*F120_NXY)*F120_DZstride;
//int dcq_offset = 10;
//if (id <32 && kv==75) printf("%3d (%3d %3d %3d) %5d %5d\n",ids,q.c1,q.c2,dcq,dcq_offset,zsm_offset);
int sz_max = F120_TrueNZ-dz;
for (int sz=0; sz<sz_max; sz++){ // zloop (odd)
int vzq = F120_TrueNZ-1+even - 2*(q.z1-sz);
vzq = min(F120_NZbins-1,max(0,vzq));
float tqval = zdzmap[dcq_offset+(sz+zsm_offset)];
voxval[vzq*F120_NXYstride+q.y*F120_NXYbins+q.x] += tqval*val;
}
}
}
}
return 0;
}
int map_chop(char *name_in, char *name_out)
{
uint *map = mymalloc<uint>(F120_STride, "swizz in"); // single slice of full dataset
uint *map_bit = map+F120_STride/4;
FILE *fin = fopen(name_in, "rb");
if (!fin) { printf("bad open for %s\n",name_in); return 1; }
FILE *fout = fopen(name_out, "wb");
if (!fout) { printf("bad open for %s\n",name_out); return 1; }
int slice_in = 0;
int slice_out = 0;
for (int z1 = 0; z1<F120_NZ-24; z1++) for (int c1=0;c1<F120_NXY;c1++){
if (fread(map,sizeof(uint),F120_STride,fin) != F120_STride) {printf("bad read for map slice %d\n",slice_in); return 1;}
slice_in++;
if (z1 >= 24) {
fwrite(map_bit,sizeof(uint),F120_STride/2,fout);
slice_out++;
}
}
fclose(fout);
fclose(fin);
printf("map %s chopped to %s s_in %d sout %d\n",name_in,name_out,slice_in,slice_out);
return 0;
}
int map_swizz(char *name_in, char *name_out)
{
uint *map = mymalloc<uint>(F120_STride/2, "swizz in"); // single slice of full dataset
float *zdzmap = mycalloc<float>(F120_DZstride*F120_DCstride,"swizz out ");
if (!zdzmap) return 1;
FILE *fin = fopen(name_in, "rb");
if (!fin) { printf("bad open for %s\n",name_in); return 1; }
int slice = 0;
for (int z1 = 0; z1 < F120_TrueNZ; z1++){
printf("z1 %2d slice %d\n",z1,slice);
for (int c1 = 0; c1 < F120_NXY; c1++){
if (fread(map,sizeof(uint),F120_STride/2,fin) != F120_STride/2) {printf("bad read for map slice %d\n",slice); return 1;}
slice ++;
for (int z2=0;z2<F120_TrueNZ;z2++) for (int c2=0;c2<F120_NXY;c2++){
float val = (float)map[z2*F120_NXY+c2];
if (val > 0.0f){
quad p = {z1,c1,z2,c2};
//p.z1 = z1;
//p.c1 = c1;
//p.z2 = z2;
//p.c2 = c2;
proper_lor(p);
int dz = p.z2-p.z1;
int zsm_offset = (dz*(97-dz))/2; // this is ( 48+47+..) dz terms in sum from cuda code!!
int dcp = make_dc(p.c1,p.c2);
//if(p.z1==5 && p.z2==15) printf("lor (%2d %3d)-(%2d %3d) val %8.1f dcp %3d dz %2d offset %4d\n",p.z1,p.c1,p.z2,p.c2,val,dcp,dz,zsm_offset);
//if(dcp >= 0 && dcp <F120_DCsize) zdzmap[(p.c1+dcp*F120_NXY)*F120_DZstride+(p.z1+zsm_offset)] += val;
if(dcp >= 0 && dcp <F120_DCsize) zdzmap[(p.z1+zsm_offset)*F120_DCstride +(p.c1+dcp*F120_NXY)] += val;
}
}
}
}
fclose(fin);
write_raw<float>(name_out,zdzmap,F120_DZstride*F120_DCstride);
free(zdzmap);
free(map);
return 0;
}
int simple_check(SMfull &sm,AOptions &opt,float *tsum,int nx,int ny,int sector)
{
// NB Z in [0,47] for crystals and [0,94] for voxels CARE
printf("simple check for %d %d sector %d\n",nx,ny,sector);
quad p;
quad q;
quad m; // mirror of p;
VoxMap vm;
//int bugs = 0;
float sm_val0 = 0.0f;
float sm_val1 = 0.0f;
int stride = F120_NXY*F120_TrueNZ;
int dz_cut = opt.set_from_opt("dzcut",1);
printf("dz_cut = %d\n",dz_cut);
//return 1;
for (int kv = 0; kv < sm.voxels; kv++){
//if (sm.v[kv].nx != nx || sm.v[kv].ny != ny) continue;
//printf("found voxel kv=%d lors = %d\n",kv,sm.v[kv].lors);
printf("kv %d\r",kv);
for (int kl = 0; kl < sm.v[kv].lors; kl++){
uint key = sm.key(kv,kl);
lor_from(key,p);
//if (p.z1 != p.z2) continue; // debug!!!!
if (abs(p.z1-p.z2) > dz_cut) continue; // debug!!!!
sm_val0 = sm.val(kv,kl,0);
sm_val1 = sm.val(kv,kl,1);
int dz = p.z2-p.z1;
for (int s=0; s<8; s++){
if (sector>=0 && s != sector) continue; // sector = -1 does all
vm.quad_to_sector(s,p,q);
mirror(q,m,95);
// TODO recover Z vertex necessary for real FP!!!!! (=47/48-z1)
//if (sm_val0> 0.0f) for (int vz=1; vz<F120_NZbins; vz+=2){ //oddds TODO smart limits here
//
// int z1 = q.z1 + (vz - 95)/2;
// int z2 = q.z2 + (vz - 95)/2;
// int z3 = m.z1 + (vz - 95)/2;
// int z4 = m.z2 + (vz - 95)/2;
if (sm_val0> 0.0f) for (int sz=0; sz<F120_TrueNZ-dz; sz++){
int z1 = sz;
int z2 = sz+dz;
int z3 = sz;
int z4 = sz+dz;
//printf("lor %5d (%2d %3d)-(%2d %3d) -> %2d %2d val %9.5f\n",kl,p.z1,p.c1,p.z2,p.c2,z1,z2,sm_val);
//if (z1>=0 && z2<F120_TrueNZ){
tsum[(z1*F120_NXY+q.c1)*stride + z2*F120_NXY+q.c2] += sm_val0;
tsum[(z2*F120_NXY+q.c2)*stride + z1*F120_NXY+q.c1] += sm_val0;
//}
//if (z3>=0 && z4<F120_TrueNZ){
tsum[(z3*F120_NXY+m.c1)*stride + z4*F120_NXY+m.c2] += sm_val0;
tsum[(z4*F120_NXY+m.c2)*stride + z3*F120_NXY+m.c1] += sm_val0;
//}
}
// do evens?
mirror(q,m,96);
//if (sm_val1> 0.0f) for (int vz=0; vz<F120_NZbins; vz+=2){ //evens
// int z1 = q.z1 + (vz - 96)/2;
// int z2 = q.z2 + (vz - 96)/2;
// int z3 = m.z1 + (vz - 96)/2;
// int z4 = m.z2 + (vz - 96)/2;
if (sm_val1> 0.0f) for (int sz=0; sz<F120_TrueNZ-dz; sz++){ //evens
int z1 = sz;
int z2 = sz+dz;
int z3 = sz;
int z4 = sz+dz;
//printf("lor %5d (%2d %3d)-(%2d %3d) -> %2d %2d val %9.5f\n",kl,p.z1,p.c1,p.z2,p.c2,z1,z2,sm_val);
//if (z1>=0 && z2 < F120_TrueNZ){
tsum[(z1*F120_NXY+q.c1)*stride + z2*F120_NXY+q.c2] += sm_val1;
tsum[(z2*F120_NXY+q.c2)*stride + z1*F120_NXY+q.c1] += sm_val1;
//}
//if (z3>=0 && z4 < F120_TrueNZ){
tsum[(z3*F120_NXY+m.c1)*stride + z4*F120_NXY+m.c2] += sm_val1;
tsum[(z4*F120_NXY+m.c2)*stride + z3*F120_NXY+m.c1] += sm_val1;
//}
}
} // end s loop
} // end kl loop
} // end kv loop
return 0;
}
int show_full_tsum(float *tsum)
{
int stride = F120_NXY*F120_TrueNZ;
float *smap = mycalloc<float>(stride*stride,"tsum/smap");
if (!smap) return 1;
int zoffset = 0;
for (int z1=0; z1<F120_TrueNZ; z1++) {
for (int z2=z1; z2<F120_TrueNZ; z2++){
for (int c1=0; c1<F120_NXY; c1++) for (int dc=0; dc<F120_DCsize; dc++){
int c2 = c1+dc+F120_DCmin;
int dz = z2-z1;
smap[(z1*F120_NXY+c1)*stride+(z2*F120_NXY+c2)] =tsum[((zoffset+dz)*F120_NXY+c1)*F120_DCsize+dc];
}
}
zoffset += 48-z1;
}
write_raw<float>("tsum_full.raw",smap,stride*stride);
free(smap);
return 0;
}
int compute_forward(SMfull &sm, float *voxval, float *tsum)
{
// NB Z in [0,47] for crystals and [0,94] for voxels CARE
quad p;
quad m0; // odd mirror of p;
quad m1; // even mirror of p;
VoxMap vm;
int bugs = 0;
for (int kv = 0; kv < sm.voxels; kv++){
if (bugs>0)printf("kv = %d\r",kv);
int xv[8];
int yv[8];
int c1[8];
int c2[8];
// set voxel octet - good for all lors
for (int s=0; s<8; s++) vm.xy_to_sector(s,xv[s],yv[s],sm.v[kv].nx,sm.v[kv].ny);
// now loop over lors for this voxel octet
if (bugs>0 && sm.v[kv].nx==64 && sm.v[kv].ny==64){
printf("octet %d:",kv);
for (int s=0; s<8; s++) printf(" (%d %d)",xv[s],yv[s]);
printf("\n");
}
else printf("%d\r",kv);
for (int kl = 0; kl < sm.v[kv].lors; kl++){
//printf("kl=%d\n",kl);
uint key = sm.key(kv,kl);
lor_from(key,p);
float sm_val0 = sm.val(kv,kl,0);
float sm_val1 = sm.val(kv,kl,1);
int dz = p.z2-p.z1;
//if (p.c1 > p.c2) p.c2 += F120_NXY;
int dc = abs(p.c2-p.c1);
if (p.c1 > p.c2) dc = 288-dc; // fix logically negative dc values
if (dc < F120_DCmin || dc > F120_DCmax) continue; // check now done in cull program
dc -= F120_DCmin;
int m0check = mirror(p,m0,95); // posn in long detector needed here
int m1check = mirror(p,m1,96);
if (bugs>0){
printf("kv/l %d %d p: (%2d %3d)-(%2d %3d) m0: (%2d %3d)-(%2d %3d) m1: (%2d %3d)-(%2d %3d) vals %8.5f %8.5f\n",kv,kl,p.z1,p.c1,p.z2,p.c2,m0.z1,m0.c1,m0.z2,m0.c2,m1.z1,m1.c1,m1.z2,m1.c2,sm_val0,sm_val1);
bugs--;
}
int zoffset = 0;
for (int s=0; s<8; s++){
c1[s] = vm.c_to_sector(s,p.c1);
c2[s] = vm.c_to_sector(s,p.c2);
}
int stride = F120_NXY*F120_TrueNZ;
//swim each tube along z-axis of detector starting at z=0 and ending at 47-dz
for (int zt=0; zt<F120_TrueNZ-dz; zt++){
int p0_zv = 95 - 2*(p.z1-zt); // zv generated at zvbin on 47/48 crystal boundry (voxel z=95)
int p1_zv = 96 - 2*(p.z1-zt); // zv generated at zvbin centre of crystal 48 (voxel z=96)
int m0_zv = 95 - 2*(m0.z1-zt); // care bug fix 24/08/17 mirros keep primary voxel
int m1_zv = 96 - 2*(m1.z1-zt);
if(bugs>0){
printf("zt=%2d raw p %2d %2d, p0 %2d p1 %2d m0 %2d m1 %2d offset %d\n",zt,p.z1,p.z2,p0_zv,p1_zv,m0_zv,m1_zv,zoffset);
bugs--;
}
//if(zv0 < 0 || zv1 < 0) printf("zt=%d z1 %d z2 %d, zv0 %d zv1 %d\n",zt,p.z1,p.z2,zv0,zv1);
for (int s=0; s<8; s++){
if (p0_zv>=0) {
tsum[(zt*F120_NXY+c1[s])*stride + (zt+dz)*F120_NXY+c2[s]] += sm_val0*voxval[(p0_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
tsum[((zt+dz)*F120_NXY+c2[s])*stride + zt*F120_NXY+c1[s]] += sm_val0*voxval[(p0_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
}
if (m0_zv>=0) {
tsum[(zt*F120_NXY+c1[s])*stride + (zt+dz)*F120_NXY+c2[s]] += sm_val0*voxval[(m0_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
tsum[((zt+dz)*F120_NXY+c2[s])*stride + zt*F120_NXY+c1[s]] += sm_val0*voxval[(m0_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
}
//if(p0_zv>=0) tsum[((zoffset+dz)*F120_NXY+p.c1 )*F120_DCsize+dc] += sm_val0*voxval[(p0_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
//if(p1_zv>=0) tsum[((zoffset+dz)*F120_NXY+p.c1 )*F120_DCsize+dc] += sm_val1*voxval[(p1_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
//if(m0_zv>=0) tsum[((zoffset+dz)*F120_NXY+m0.c1)*F120_DCsize+dc] += sm_val0*voxval[(m0_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
//if(m1_zv>=0) tsum[((zoffset+dz)*F120_NXY+m1.c1)*F120_DCsize+dc] += sm_val0*voxval[(m1_zv*F120_NZbins+yv[s])*F120_NXYbins+xv[s]];
}
zoffset += (F120_TrueNZ-zt);
}
}
}
printf("\n");
return 0;
}
int roibox_cut(int x, int y)
{
double dx = ( abs(((double)x-63.5)) - 0.5 )*F120_XYBin;
double dy = ( abs(((double)y-63.5)) - 0.5 )*F120_XYBin; // corner closest to origin
double limit = (double)F120_Rmin/sqrt(2.0);
double dist = sqrt(dx*dx+dy*dy);
//if (dist <= limit) return 0;
return (dist <= limit) ? 1 : 0;
}
// this for both device and host
template <typename T> int make_buffers(T **buf_out,T **dev_buf_out, size_t len, char *tag)
{
T *buf = (T *)calloc(len,sizeof(T));
if (!buf) { printf("calloc error %s\n",tag); return 1; }
T *dev_buf = NULL;
cudaError_t cudaStatus = cudaMalloc((void**)&dev_buf,len*sizeof(T));
if (cudaStatus != cudaSuccess) { printf("cudaMalloc dev_%s failed [%s]\n",tag,cudaGetErrorString(cudaStatus)); return 1; }
cudaStatus = cudaMemset(dev_buf,0,len*sizeof(T));
if (cudaStatus != cudaSuccess) { printf("cudaMemset to dev_%s failed [%s]",tag,cudaGetErrorString(cudaStatus)); return 1; }
// hairy pointer syntax thanks to cuda
*buf_out = buf;
*dev_buf_out = dev_buf;
return 0;
}
template <typename T> int read_buffers(char *name, int len, T *h_buf, T *d_buf, T rescale)
{
if (read_raw<T>(name,h_buf,len)) return 1;
if (rescale != (T)1.0) for (int k=0; k<len; k++) h_buf[k] *= rescale;
cudaError_t cudaStatus = cudaMemcpy(d_buf,h_buf,len*sizeof(T),cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { fprintf(stderr,"cudaMemcpy from file %s failed: [%s]",name,cudaGetErrorString(cudaStatus)); return 1; }
//printf("read_buffers for %s\n",name);
return 0;
}
template <typename T> int copy_buffer_to(int len, T *h_buf, T *d_buf)
{
cudaError_t cudaStatus = cudaMemcpy(d_buf,h_buf,len*sizeof(T),cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) { fprintf(stderr,"cudaMemcpy from host buffer failed: [%s]",cudaGetErrorString(cudaStatus)); return 1; }
return 0;
}
template <typename T> int copy_buffer_from(int len, T *h_buf, T *d_buf)
{
cudaError_t cudaStatus = cudaMemcpy(h_buf,d_buf,len*sizeof(T),cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) { fprintf(stderr,"cudaMemcpy to host buffer failed: [%s]",cudaGetErrorString(cudaStatus)); return 1; }
return 0;
}
template <typename T> int clear_buffer(int len, T *d_buf)
{
cudaError_t cudaStatus = cudaMemset(d_buf,0,len*sizeof(T));
if (cudaStatus != cudaSuccess) { printf("cudaMemset to d_buf failed: [%s]",cudaGetErrorString(cudaStatus)); return 1; }
return 0;
}
int cyl_buffer_fill_normalized(float *vox,double val)
{
int stride = F120_NXYbins*F120_NXYbins;
int nvox = stride*F120_NZbins;
for (int k=0; k<nvox;k++) vox[k] = 0;
int count = 0;
for (int ky=0; ky<F120_NXYbins; ky++) for (int kx=0; kx<F120_NXYbins; kx++) if (roibox_cut(kx, ky)){
for(int kz=0;kz<F120_NZbins;kz++) vox[kz*stride+(F120_NXYbins*ky+kx)] = (float)val;
count += F120_NZbins;
}
float nval = (float)val / (float)count;
for (int k = 0; k<nvox; k++) vox[k] /= (float)(count);
printf("buffer set to %.5e in ROI of %d voxels\n",nval,count);
write_raw<float>("roi_start.raw",vox,nvox);
return 0;
}
// returns times passed since input argument initialised and this call
double timePassed(LARGE_INTEGER &StartingTime)
{
LARGE_INTEGER EndingTime;
QueryPerformanceCounter(&EndingTime);
LARGE_INTEGER Frequency;
LARGE_INTEGER ElapsedMicroseconds;
QueryPerformanceFrequency(&Frequency);
ElapsedMicroseconds.QuadPart = EndingTime.QuadPart - StartingTime.QuadPart;
ElapsedMicroseconds.QuadPart *= 1000000;
ElapsedMicroseconds.QuadPart /= Frequency.QuadPart;
double timesec = 0.000001*ElapsedMicroseconds.QuadPart;
return timesec;
} |
a93217baa87e981e33cddaa0aafe337931199bfa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by nicco on 09/03/20.
//
#include "iteration.cuh"
std::vector<float *> allocate_queries(int LEN_PATTERN_SEQ, int NUM_QUERIES, std::string type, int verbose){
/**
* uniform random generator for queries. It's used for emulate an online match query.
* **/
float *queries;
float *queries_ptr;
srand (static_cast <unsigned> (time(0)));
queries = (float *) malloc(NUM_QUERIES * LEN_PATTERN_SEQ * sizeof(float));
for (int q=0; q<NUM_QUERIES; q++) {
for (int l=0; l<LEN_PATTERN_SEQ; l++) {
queries[l + q*LEN_PATTERN_SEQ] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
}
if (verbose > 0) {
for (int q = 0; q < NUM_QUERIES * LEN_PATTERN_SEQ; q++) {
if (q % LEN_PATTERN_SEQ == 0) std::cout << "query " << q / LEN_PATTERN_SEQ << ": [";
std::cout << " " << queries[q] << " ";
if (q % LEN_PATTERN_SEQ == (LEN_PATTERN_SEQ - 1)) std::cout << "]" << std::endl;
}
}
if (type == "c") {
hipMemcpyToSymbol(queries_const, queries, NUM_QUERIES * LEN_PATTERN_SEQ * sizeof(float));
}
else {
CUDA_CHECK_RETURN(hipMalloc((void **) &queries_ptr, NUM_QUERIES * LEN_PATTERN_SEQ * sizeof(float)))
hipMemcpy(queries_ptr, queries, NUM_QUERIES * LEN_PATTERN_SEQ * sizeof(float), hipMemcpyHostToDevice);
}
std::vector<float *> ptrs = {queries, queries_ptr};
return ptrs;
}
std::string one_iteration(int LEN_SEQ, int LEN_PATTERN_SEQ, int NUM_QUERIES, int RUNS, const std::string& type,
std::string mode, int verbose, float *statistic, int it) {
/**
* one iteration of the main loop. It takes some hyper-parameters and compute some (RUNS) runs to compute mean and
* std of the some (type) modalities. Those values are stored in statistic for writing in csv file (see main).
* **/
std::cout << "\nThe new value of LEN_SEQ is " << LEN_SEQ << std::endl;
// compute hyper parameters after initialization
int LEN_RESULT = LEN_SEQ - LEN_PATTERN_SEQ + 1;
// check if the hyper pars are correct
int gridX = ceil(LEN_SEQ / THREADS_PER_BLOCK);
if (gridX < 1) {
std::cout << "len seq is smaller than the THREADS_PER_BLOCK value!! Try again! " << std::endl;
exit(1);
}
dim3 dimGrid(gridX, 1, 1);
dim3 dimBlock(THREADS_PER_BLOCK, 1, 1);
// define ptrs to data
float *data;
float *data_ptr;
// allocate data on host and device
data = (float *) malloc(LEN_SEQ * sizeof(float));
CUDA_CHECK_RETURN(hipMalloc((void **) &data_ptr, LEN_SEQ * sizeof(float)))
// generate data and queries on device
hiprandGenerator_t generator;
hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(generator, 1234ULL);
hiprandGenerateUniform(generator, data_ptr, LEN_SEQ);
// the generator create data on device, if verbose != 0 you'll copy data on host for visualization
if (verbose > 1) {
hipMemcpy(data, data_ptr, LEN_SEQ * sizeof(float), hipMemcpyDeviceToHost);
std::cout << "data : [";
for (int i = 0; i < LEN_SEQ; i++) {
std::cout << " " << data[i] << " ";
}
std::cout << "]" << std::endl;
}
// store the result
float *minSad, *dev_minSad;
int *minSadId, *dev_minSadId;
minSad = (float *) malloc(NUM_QUERIES * sizeof(float));
minSadId = (int *) malloc(NUM_QUERIES * sizeof(int));
CUDA_CHECK_RETURN(hipMalloc((void **) &dev_minSad, NUM_QUERIES * sizeof(float)))
CUDA_CHECK_RETURN(hipMalloc((void **) &dev_minSadId, NUM_QUERIES * sizeof(int)))
// vector for storing computational time
std::vector<float> t_n;
std::vector<float> t_p;
std::vector<float> t_t;
std::vector<float> t_c;
float total_computational_time = 0.0;
/***** Computing SAD on GPU *****/
for (int run = 0; run < RUNS; run++) {
if (run%(RUNS/2) == 0) std::cout << "STARTING RUN " << run << std::endl;
// define data to store statistic of each run
float *result, *result_ptr;
result = (float *) malloc(NUM_QUERIES * LEN_RESULT * sizeof(float));
CUDA_CHECK_RETURN(hipMalloc((void **) &result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float)))
if (type == "n" or type == "a") {
mode = "naive";
total_computational_time = 0.0;
auto start = std::chrono::high_resolution_clock::now();
std::vector<float *> ptrs = allocate_queries(LEN_PATTERN_SEQ, NUM_QUERIES, type, verbose);
float *queries = ptrs[0];
float *queries_ptr = ptrs[1];
hipLaunchKernelGGL(( computeSAD_naive), dim3(dimGrid), dim3(dimBlock), 0, 0, data_ptr, queries_ptr, result_ptr, LEN_RESULT, LEN_PATTERN_SEQ,
NUM_QUERIES, dev_minSad, dev_minSadId);
auto end = std::chrono::high_resolution_clock::now();
total_computational_time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
hipMemcpy(minSad, dev_minSad, NUM_QUERIES * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(minSadId, dev_minSadId, NUM_QUERIES * sizeof(int), hipMemcpyDeviceToHost);
if (verbose > 1) {
hipMemcpy(result, result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float), hipMemcpyDeviceToHost);
for (int r = 0; r < NUM_QUERIES * LEN_RESULT; r++) {
if (r % LEN_RESULT == 0) std::cout << "\nresult " << r / LEN_RESULT << ": [";
std::cout << " " << result[r] << " ";
if (r % LEN_RESULT == (LEN_RESULT - 1)) std::cout << "]" << std::endl;
}
}
if (verbose >= 1) {
std::cout << "\nMode " << mode << " in total computational time: " << total_computational_time
<< " microsec" << std::endl;
for (int s = 0; s < NUM_QUERIES; s++) {
std::cout << "Query " << s << " : min Sad = " << minSad[s] << " in Result ID = " << minSadId[s]
<< std::endl;
}
}
t_n.push_back(total_computational_time);
free(queries);
hipFree(queries_ptr);
}
if (type == "a") {
reset_result(result, result_ptr, LEN_RESULT, NUM_QUERIES);
result = (float *) malloc(NUM_QUERIES * LEN_RESULT * sizeof(float));
CUDA_CHECK_RETURN(hipMalloc((void **) &result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float)))
}
if (type == "p" or type == "a") {
mode = "private";
total_computational_time = 0.0;
auto start = std::chrono::high_resolution_clock::now();
std::vector<float *> ptrs = allocate_queries(LEN_PATTERN_SEQ, NUM_QUERIES, type, verbose);
float *queries = ptrs[0];
float *queries_ptr = ptrs[1];
hipLaunchKernelGGL(( computeSAD_priv), dim3(dimGrid), dim3(dimBlock), 0, 0, data_ptr, queries_ptr, result_ptr, LEN_RESULT, LEN_PATTERN_SEQ,
NUM_QUERIES, dev_minSad, dev_minSadId);
auto end = std::chrono::high_resolution_clock::now();
total_computational_time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
hipMemcpy(minSad, dev_minSad, NUM_QUERIES * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(minSadId, dev_minSadId, NUM_QUERIES * sizeof(int), hipMemcpyDeviceToHost);
if (verbose > 1) {
hipMemcpy(result, result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float), hipMemcpyDeviceToHost);
for (int r = 0; r < NUM_QUERIES * LEN_RESULT; r++) {
if (r % LEN_RESULT == 0) std::cout << "\nresult " << r / LEN_RESULT << ": [";
std::cout << " " << result[r] << " ";
if (r % LEN_RESULT == (LEN_RESULT - 1)) std::cout << "]" << std::endl;
}
}
if (verbose >= 1) {
std::cout << "\nMode " << mode << " in total computational time: " << total_computational_time
<< " microsec" << std::endl;
for (int s = 0; s < NUM_QUERIES; s++) {
std::cout << "Query " << s << " : min Sad = " << minSad[s] << " in Result ID = " << minSadId[s]
<< std::endl;
}
}
t_p.push_back(total_computational_time);
free(queries);
hipFree(queries_ptr);
}
if (type == "a") {
reset_result(result, result_ptr, LEN_RESULT, NUM_QUERIES);
result = (float *) malloc(NUM_QUERIES * LEN_RESULT * sizeof(float));
CUDA_CHECK_RETURN(hipMalloc((void **) &result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float)))
}
if (type == "t" or type == "a") {
mode = "tiling";
total_computational_time = 0.0;
auto start = std::chrono::high_resolution_clock::now();
std::vector<float *> ptrs = allocate_queries(LEN_PATTERN_SEQ, NUM_QUERIES, type, verbose);
float *queries = ptrs[0];
float *queries_ptr = ptrs[1];
hipLaunchKernelGGL(( computeSAD_tiling), dim3(dimGrid), dim3(dimBlock), 0, 0, data_ptr, queries_ptr, result_ptr, LEN_RESULT, LEN_PATTERN_SEQ,
NUM_QUERIES, dev_minSad, dev_minSadId);
auto end = std::chrono::high_resolution_clock::now();
total_computational_time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
hipMemcpy(minSad, dev_minSad, NUM_QUERIES * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(minSadId, dev_minSadId, NUM_QUERIES * sizeof(int), hipMemcpyDeviceToHost);
if (verbose > 1) {
hipMemcpy(result, result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float), hipMemcpyDeviceToHost);
for (int r = 0; r < NUM_QUERIES * LEN_RESULT; r++) {
if (r % LEN_RESULT == 0) std::cout << "\nresult " << r / LEN_RESULT << ": [";
std::cout << " " << result[r] << " ";
if (r % LEN_RESULT == (LEN_RESULT - 1)) std::cout << "]" << std::endl;
}
}
if (verbose >= 1) {
std::cout << "\nMode " << mode << " in total computational time: " << total_computational_time
<< " microsec" << std::endl;
for (int s = 0; s < NUM_QUERIES; s++) {
std::cout << "Query " << s << " : min Sad = " << minSad[s] << " in Result ID = " << minSadId[s]
<< std::endl;
}
}
t_t.push_back(total_computational_time);
free(queries);
hipFree(queries_ptr);
}
if (type == "a") {
reset_result(result, result_ptr, LEN_RESULT, NUM_QUERIES);
result = (float *) malloc(NUM_QUERIES * LEN_RESULT * sizeof(float));
CUDA_CHECK_RETURN(hipMalloc((void **) &result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float)))
}
if (type == "c" or type == "a") {
mode = "constant";
total_computational_time = 0.0;
auto start = std::chrono::high_resolution_clock::now();
std::vector<float *> ptrs = allocate_queries(LEN_PATTERN_SEQ, NUM_QUERIES, type, verbose);
float *queries = ptrs[0];
float *queries_ptr = ptrs[1];
hipLaunchKernelGGL(( computeSAD_constant), dim3(dimGrid), dim3(dimBlock), 0, 0, data_ptr, result_ptr, LEN_RESULT, LEN_PATTERN_SEQ,
NUM_QUERIES, dev_minSad, dev_minSadId);
auto end = std::chrono::high_resolution_clock::now();
total_computational_time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
hipMemcpy(minSad, dev_minSad, NUM_QUERIES * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(minSadId, dev_minSadId, NUM_QUERIES * sizeof(int), hipMemcpyDeviceToHost);
if (verbose > 1) {
hipMemcpy(result, result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float), hipMemcpyDeviceToHost);
for (int r = 0; r < NUM_QUERIES * LEN_RESULT; r++) {
if (r % LEN_RESULT == 0) std::cout << "\nresult " << r / LEN_RESULT << ": [";
std::cout << " " << result[r] << " ";
if (r % LEN_RESULT == (LEN_RESULT - 1)) std::cout << "]" << std::endl;
}
}
if (verbose >= 1) {
std::cout << "\nMode " << mode << " in total computational time: " << total_computational_time
<< " microsec" << std::endl;
for (int s = 0; s < NUM_QUERIES; s++) {
std::cout << "Query " << s << " : min Sad = " << minSad[s] << " in Result ID = " << minSadId[s]
<< std::endl;
}
}
t_c.push_back(total_computational_time);
free(queries);
hipFree(queries_ptr);
}
reset_result(result, result_ptr, LEN_RESULT, NUM_QUERIES);
}
std::cout << std::endl;
// mean and std for each time vector
if (type == "n" or type == "a") {
double t_m_n = compute_mean(t_n);
double std_n = compute_std(t_n);
std::cout << "In " << RUNS << " runs the NAIVE mode reports " << t_m_n
<< " microsec of mean with " << std_n << " of std" << std::endl;
statistic[it] = t_m_n;
statistic[it + 1] = std_n;
}
if (type == "p" or type == "a") {
double t_m_p = compute_mean(t_p);
double std_p = compute_std(t_p);
std::cout << "In " << RUNS << " runs the PRIVATE mode reports " << t_m_p
<< " microsec of mean with " << std_p << " of std" << std::endl;
statistic[it] = t_m_p;
statistic[it + 1] = std_p;
}
if (type == "t" or type == "a") {
double t_m_t = compute_mean(t_t);
double std_t = compute_std(t_t);
std::cout << "In " << RUNS << " runs the TILING mode reports " << t_m_t
<< " microsec of mean with " << std_t << " of std" << std::endl;
statistic[it] = t_m_t;
statistic[it + 1] = std_t;
}
if (type == "c" or type == "a") {
double t_m_c = compute_mean(t_c);
double std_c = compute_std(t_c);
std::cout << "In " << RUNS << " runs the CONSTANT mode reports " << t_m_c
<< " microsec of mean with " << std_c << " of std" << std::endl;
statistic[it] = t_m_c;
statistic[it + 1] = std_c;
}
// FIXME change the var if you change var for test!!!
statistic[it + 2] = LEN_SEQ;
// free host and device data
hiprandDestroyGenerator(generator);
free(data);
hipFree(data_ptr);
free(minSad);
free(minSadId);
hipFree(dev_minSad);
hipFree(dev_minSadId);
hipDeviceReset();
// return mode that is the correct string for csv name
return mode;
} | a93217baa87e981e33cddaa0aafe337931199bfa.cu | //
// Created by nicco on 09/03/20.
//
#include "iteration.cuh"
std::vector<float *> allocate_queries(int LEN_PATTERN_SEQ, int NUM_QUERIES, std::string type, int verbose){
/**
* uniform random generator for queries. It's used for emulate an online match query.
* **/
float *queries;
float *queries_ptr;
srand (static_cast <unsigned> (time(0)));
queries = (float *) malloc(NUM_QUERIES * LEN_PATTERN_SEQ * sizeof(float));
for (int q=0; q<NUM_QUERIES; q++) {
for (int l=0; l<LEN_PATTERN_SEQ; l++) {
queries[l + q*LEN_PATTERN_SEQ] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
}
if (verbose > 0) {
for (int q = 0; q < NUM_QUERIES * LEN_PATTERN_SEQ; q++) {
if (q % LEN_PATTERN_SEQ == 0) std::cout << "query " << q / LEN_PATTERN_SEQ << ": [";
std::cout << " " << queries[q] << " ";
if (q % LEN_PATTERN_SEQ == (LEN_PATTERN_SEQ - 1)) std::cout << "]" << std::endl;
}
}
if (type == "c") {
cudaMemcpyToSymbol(queries_const, queries, NUM_QUERIES * LEN_PATTERN_SEQ * sizeof(float));
}
else {
CUDA_CHECK_RETURN(cudaMalloc((void **) &queries_ptr, NUM_QUERIES * LEN_PATTERN_SEQ * sizeof(float)))
cudaMemcpy(queries_ptr, queries, NUM_QUERIES * LEN_PATTERN_SEQ * sizeof(float), cudaMemcpyHostToDevice);
}
std::vector<float *> ptrs = {queries, queries_ptr};
return ptrs;
}
std::string one_iteration(int LEN_SEQ, int LEN_PATTERN_SEQ, int NUM_QUERIES, int RUNS, const std::string& type,
std::string mode, int verbose, float *statistic, int it) {
/**
* one iteration of the main loop. It takes some hyper-parameters and compute some (RUNS) runs to compute mean and
* std of the some (type) modalities. Those values are stored in statistic for writing in csv file (see main).
* **/
std::cout << "\nThe new value of LEN_SEQ is " << LEN_SEQ << std::endl;
// compute hyper parameters after initialization
int LEN_RESULT = LEN_SEQ - LEN_PATTERN_SEQ + 1;
// check if the hyper pars are correct
int gridX = ceil(LEN_SEQ / THREADS_PER_BLOCK);
if (gridX < 1) {
std::cout << "len seq is smaller than the THREADS_PER_BLOCK value!! Try again! " << std::endl;
exit(1);
}
dim3 dimGrid(gridX, 1, 1);
dim3 dimBlock(THREADS_PER_BLOCK, 1, 1);
// define ptrs to data
float *data;
float *data_ptr;
// allocate data on host and device
data = (float *) malloc(LEN_SEQ * sizeof(float));
CUDA_CHECK_RETURN(cudaMalloc((void **) &data_ptr, LEN_SEQ * sizeof(float)))
// generate data and queries on device
curandGenerator_t generator;
curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(generator, 1234ULL);
curandGenerateUniform(generator, data_ptr, LEN_SEQ);
// the generator create data on device, if verbose != 0 you'll copy data on host for visualization
if (verbose > 1) {
cudaMemcpy(data, data_ptr, LEN_SEQ * sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "data : [";
for (int i = 0; i < LEN_SEQ; i++) {
std::cout << " " << data[i] << " ";
}
std::cout << "]" << std::endl;
}
// store the result
float *minSad, *dev_minSad;
int *minSadId, *dev_minSadId;
minSad = (float *) malloc(NUM_QUERIES * sizeof(float));
minSadId = (int *) malloc(NUM_QUERIES * sizeof(int));
CUDA_CHECK_RETURN(cudaMalloc((void **) &dev_minSad, NUM_QUERIES * sizeof(float)))
CUDA_CHECK_RETURN(cudaMalloc((void **) &dev_minSadId, NUM_QUERIES * sizeof(int)))
// vector for storing computational time
std::vector<float> t_n;
std::vector<float> t_p;
std::vector<float> t_t;
std::vector<float> t_c;
float total_computational_time = 0.0;
/***** Computing SAD on GPU *****/
for (int run = 0; run < RUNS; run++) {
if (run%(RUNS/2) == 0) std::cout << "STARTING RUN " << run << std::endl;
// define data to store statistic of each run
float *result, *result_ptr;
result = (float *) malloc(NUM_QUERIES * LEN_RESULT * sizeof(float));
CUDA_CHECK_RETURN(cudaMalloc((void **) &result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float)))
if (type == "n" or type == "a") {
mode = "naive";
total_computational_time = 0.0;
auto start = std::chrono::high_resolution_clock::now();
std::vector<float *> ptrs = allocate_queries(LEN_PATTERN_SEQ, NUM_QUERIES, type, verbose);
float *queries = ptrs[0];
float *queries_ptr = ptrs[1];
computeSAD_naive<<<dimGrid, dimBlock>>>(data_ptr, queries_ptr, result_ptr, LEN_RESULT, LEN_PATTERN_SEQ,
NUM_QUERIES, dev_minSad, dev_minSadId);
auto end = std::chrono::high_resolution_clock::now();
total_computational_time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
cudaMemcpy(minSad, dev_minSad, NUM_QUERIES * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(minSadId, dev_minSadId, NUM_QUERIES * sizeof(int), cudaMemcpyDeviceToHost);
if (verbose > 1) {
cudaMemcpy(result, result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float), cudaMemcpyDeviceToHost);
for (int r = 0; r < NUM_QUERIES * LEN_RESULT; r++) {
if (r % LEN_RESULT == 0) std::cout << "\nresult " << r / LEN_RESULT << ": [";
std::cout << " " << result[r] << " ";
if (r % LEN_RESULT == (LEN_RESULT - 1)) std::cout << "]" << std::endl;
}
}
if (verbose >= 1) {
std::cout << "\nMode " << mode << " in total computational time: " << total_computational_time
<< " microsec" << std::endl;
for (int s = 0; s < NUM_QUERIES; s++) {
std::cout << "Query " << s << " : min Sad = " << minSad[s] << " in Result ID = " << minSadId[s]
<< std::endl;
}
}
t_n.push_back(total_computational_time);
free(queries);
cudaFree(queries_ptr);
}
if (type == "a") {
reset_result(result, result_ptr, LEN_RESULT, NUM_QUERIES);
result = (float *) malloc(NUM_QUERIES * LEN_RESULT * sizeof(float));
CUDA_CHECK_RETURN(cudaMalloc((void **) &result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float)))
}
if (type == "p" or type == "a") {
mode = "private";
total_computational_time = 0.0;
auto start = std::chrono::high_resolution_clock::now();
std::vector<float *> ptrs = allocate_queries(LEN_PATTERN_SEQ, NUM_QUERIES, type, verbose);
float *queries = ptrs[0];
float *queries_ptr = ptrs[1];
computeSAD_priv<<<dimGrid, dimBlock>>>(data_ptr, queries_ptr, result_ptr, LEN_RESULT, LEN_PATTERN_SEQ,
NUM_QUERIES, dev_minSad, dev_minSadId);
auto end = std::chrono::high_resolution_clock::now();
total_computational_time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
cudaMemcpy(minSad, dev_minSad, NUM_QUERIES * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(minSadId, dev_minSadId, NUM_QUERIES * sizeof(int), cudaMemcpyDeviceToHost);
if (verbose > 1) {
cudaMemcpy(result, result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float), cudaMemcpyDeviceToHost);
for (int r = 0; r < NUM_QUERIES * LEN_RESULT; r++) {
if (r % LEN_RESULT == 0) std::cout << "\nresult " << r / LEN_RESULT << ": [";
std::cout << " " << result[r] << " ";
if (r % LEN_RESULT == (LEN_RESULT - 1)) std::cout << "]" << std::endl;
}
}
if (verbose >= 1) {
std::cout << "\nMode " << mode << " in total computational time: " << total_computational_time
<< " microsec" << std::endl;
for (int s = 0; s < NUM_QUERIES; s++) {
std::cout << "Query " << s << " : min Sad = " << minSad[s] << " in Result ID = " << minSadId[s]
<< std::endl;
}
}
t_p.push_back(total_computational_time);
free(queries);
cudaFree(queries_ptr);
}
if (type == "a") {
reset_result(result, result_ptr, LEN_RESULT, NUM_QUERIES);
result = (float *) malloc(NUM_QUERIES * LEN_RESULT * sizeof(float));
CUDA_CHECK_RETURN(cudaMalloc((void **) &result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float)))
}
if (type == "t" or type == "a") {
mode = "tiling";
total_computational_time = 0.0;
auto start = std::chrono::high_resolution_clock::now();
std::vector<float *> ptrs = allocate_queries(LEN_PATTERN_SEQ, NUM_QUERIES, type, verbose);
float *queries = ptrs[0];
float *queries_ptr = ptrs[1];
computeSAD_tiling<<<dimGrid, dimBlock>>>(data_ptr, queries_ptr, result_ptr, LEN_RESULT, LEN_PATTERN_SEQ,
NUM_QUERIES, dev_minSad, dev_minSadId);
auto end = std::chrono::high_resolution_clock::now();
total_computational_time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
cudaMemcpy(minSad, dev_minSad, NUM_QUERIES * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(minSadId, dev_minSadId, NUM_QUERIES * sizeof(int), cudaMemcpyDeviceToHost);
if (verbose > 1) {
cudaMemcpy(result, result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float), cudaMemcpyDeviceToHost);
for (int r = 0; r < NUM_QUERIES * LEN_RESULT; r++) {
if (r % LEN_RESULT == 0) std::cout << "\nresult " << r / LEN_RESULT << ": [";
std::cout << " " << result[r] << " ";
if (r % LEN_RESULT == (LEN_RESULT - 1)) std::cout << "]" << std::endl;
}
}
if (verbose >= 1) {
std::cout << "\nMode " << mode << " in total computational time: " << total_computational_time
<< " microsec" << std::endl;
for (int s = 0; s < NUM_QUERIES; s++) {
std::cout << "Query " << s << " : min Sad = " << minSad[s] << " in Result ID = " << minSadId[s]
<< std::endl;
}
}
t_t.push_back(total_computational_time);
free(queries);
cudaFree(queries_ptr);
}
if (type == "a") {
reset_result(result, result_ptr, LEN_RESULT, NUM_QUERIES);
result = (float *) malloc(NUM_QUERIES * LEN_RESULT * sizeof(float));
CUDA_CHECK_RETURN(cudaMalloc((void **) &result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float)))
}
if (type == "c" or type == "a") {
mode = "constant";
total_computational_time = 0.0;
auto start = std::chrono::high_resolution_clock::now();
std::vector<float *> ptrs = allocate_queries(LEN_PATTERN_SEQ, NUM_QUERIES, type, verbose);
float *queries = ptrs[0];
float *queries_ptr = ptrs[1];
computeSAD_constant<<<dimGrid, dimBlock>>>(data_ptr, result_ptr, LEN_RESULT, LEN_PATTERN_SEQ,
NUM_QUERIES, dev_minSad, dev_minSadId);
auto end = std::chrono::high_resolution_clock::now();
total_computational_time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
cudaMemcpy(minSad, dev_minSad, NUM_QUERIES * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(minSadId, dev_minSadId, NUM_QUERIES * sizeof(int), cudaMemcpyDeviceToHost);
if (verbose > 1) {
cudaMemcpy(result, result_ptr, NUM_QUERIES * LEN_RESULT * sizeof(float), cudaMemcpyDeviceToHost);
for (int r = 0; r < NUM_QUERIES * LEN_RESULT; r++) {
if (r % LEN_RESULT == 0) std::cout << "\nresult " << r / LEN_RESULT << ": [";
std::cout << " " << result[r] << " ";
if (r % LEN_RESULT == (LEN_RESULT - 1)) std::cout << "]" << std::endl;
}
}
if (verbose >= 1) {
std::cout << "\nMode " << mode << " in total computational time: " << total_computational_time
<< " microsec" << std::endl;
for (int s = 0; s < NUM_QUERIES; s++) {
std::cout << "Query " << s << " : min Sad = " << minSad[s] << " in Result ID = " << minSadId[s]
<< std::endl;
}
}
t_c.push_back(total_computational_time);
free(queries);
cudaFree(queries_ptr);
}
reset_result(result, result_ptr, LEN_RESULT, NUM_QUERIES);
}
std::cout << std::endl;
// mean and std for each time vector
if (type == "n" or type == "a") {
double t_m_n = compute_mean(t_n);
double std_n = compute_std(t_n);
std::cout << "In " << RUNS << " runs the NAIVE mode reports " << t_m_n
<< " microsec of mean with " << std_n << " of std" << std::endl;
statistic[it] = t_m_n;
statistic[it + 1] = std_n;
}
if (type == "p" or type == "a") {
double t_m_p = compute_mean(t_p);
double std_p = compute_std(t_p);
std::cout << "In " << RUNS << " runs the PRIVATE mode reports " << t_m_p
<< " microsec of mean with " << std_p << " of std" << std::endl;
statistic[it] = t_m_p;
statistic[it + 1] = std_p;
}
if (type == "t" or type == "a") {
double t_m_t = compute_mean(t_t);
double std_t = compute_std(t_t);
std::cout << "In " << RUNS << " runs the TILING mode reports " << t_m_t
<< " microsec of mean with " << std_t << " of std" << std::endl;
statistic[it] = t_m_t;
statistic[it + 1] = std_t;
}
if (type == "c" or type == "a") {
double t_m_c = compute_mean(t_c);
double std_c = compute_std(t_c);
std::cout << "In " << RUNS << " runs the CONSTANT mode reports " << t_m_c
<< " microsec of mean with " << std_c << " of std" << std::endl;
statistic[it] = t_m_c;
statistic[it + 1] = std_c;
}
// FIXME change the var if you change var for test!!!
statistic[it + 2] = LEN_SEQ;
// free host and device data
curandDestroyGenerator(generator);
free(data);
cudaFree(data_ptr);
free(minSad);
free(minSadId);
cudaFree(dev_minSad);
cudaFree(dev_minSadId);
cudaDeviceReset();
// return mode that is the correct string for csv name
return mode;
} |
6e2f8b1e08825c102b1daa79ec3fc254a2a7d260.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
#include <iomanip>
#include <fstream>
#include <vector>
#include <string>
#include "tables.hpp"
#include "Timer.cuh"
#include "CheckError.cuh"
using namespace timer;
const int DIM = 128;
__global__
void crc32kernel(uint8_t* data, int length, uint32_t* d_table, uint32_t* tmp) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t crc = 0xffffffff;
uint32_t* current = (uint32_t*)data;
if(id < 4) {
uint32_t one = *current ^ crc;
int i = (256*((length-1)-id))+((one>>(8*id)) & 0xff);
tmp[id] = d_table[i];
}
else if(id > 3 && id < length) {
uint32_t two = *(current+(id / 4));
int i = (256*((length-1)-id))+(two>>(8*(id % 4)) & 0xff);
tmp[id] = d_table[i];
}
}
int main() {
Timer<DEVICE> TM_device;
Timer<HOST> TM_host;
// -------------------------------------------------------------------------
// HOST MEMORY ALLOCATION
uint32_t hcrc = 0xffffffff;
std::ifstream fin("../input3.txt");
std::string temp;
std::string d("");
if(fin.is_open()){
while(getline(fin, temp)){
d.append(temp);
}
}
fin.close();
std::cout << d.length() << std::endl;
uint8_t data[d.length()];
for(int i=0; i<d.length(); i++){
data[i] = d[i];
}
int length = sizeof(data);
auto *table = (uint32_t *)malloc(length * 256 * sizeof(uint32_t));
make_crc_table_reverse(table, length);
// -------------------------------------------------------------------------
// HOST INITILIZATION
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
// -------------------------------------------------------------------------
// HOST EXECUTIION
TM_host.start();
for(int i = 0; i < length; i++) {
hcrc = table[(hcrc ^ data[i]) & 0xFF] ^ (hcrc>>8);
}
hcrc ^= 0xffffffff;
TM_host.stop();
TM_host.print("CRC32C host: ");
// -------------------------------------------------------------------------
// DEVICE MEMORY ALLOCATION
//TM_device.start();
uint32_t* dcrc;
uint8_t* ddata;
uint32_t* d_table;
uint32_t *d_tmp;
hipMalloc(&ddata, length*sizeof(uint8_t));
hipMalloc(&d_table, length * 256 * sizeof(uint32_t));
hipMalloc(&d_tmp, length * sizeof(uint32_t));
// -------------------------------------------------------------------------
// COPY DATA FROM HOST TO DEVICE
hipMemcpy(ddata, data, length*sizeof(uint8_t), hipMemcpyHostToDevice);
hipMemcpy(d_table, table, length * 256 * sizeof(uint32_t), hipMemcpyHostToDevice);
// -------------------------------------------------------------------------
// DEVICE EXECUTION
TM_device.start();
dim3 block_size(DIM, 1, 1);
dim3 num_blocks(ceil((float)length/DIM), 1, 1);
hipLaunchKernelGGL(( crc32kernel), dim3(num_blocks), dim3(block_size) , 0, 0, ddata, length, d_table, d_tmp);
// -------------------------------------------------------------------------
// COPY DATA FROM DEVICE TO HOST
uint32_t* h_crc_tmp = (uint32_t *)malloc(length * sizeof(uint32_t));
hipMemcpy(h_crc_tmp, d_tmp, length*sizeof(uint32_t), hipMemcpyDeviceToHost);
for(int i = 1; i < length; i++) {
h_crc_tmp[0] ^= h_crc_tmp[i];
}
h_crc_tmp[0] ^= 0xffffffff;
TM_device.stop();
hipDeviceSynchronize();
CHECK_CUDA_ERROR
TM_device.print("CRC32 device: ");
std::cout << std::setprecision(1)
<< "Speedup: " << TM_host.duration() / TM_device.duration()
<< "x\n\n";
// -------------------------------------------------------------------------
// RESULT CHECK
printf("0x%x - 0x%x\n", hcrc, h_crc_tmp[0]);
if (hcrc != h_crc_tmp[0]) {
std::cout << "CRC value mismatch\n\n";
//hipDeviceReset();
std::exit(EXIT_FAILURE);
}
std::cout << "<> Correct\n\n";
// -------------------------------------------------------------------------
// HOST MEMORY DEALLOCATION
//delete[] hdata;
free(table);
// -------------------------------------------------------------------------
// DEVICE MEMORY DEALLOCATION
hipFree(d_table);
hipFree(ddata);
hipFree(dcrc);
// -------------------------------------------------------------------------
//hipDeviceReset();
}
| 6e2f8b1e08825c102b1daa79ec3fc254a2a7d260.cu | #include <iostream>
#include <chrono>
#include <iomanip>
#include <fstream>
#include <vector>
#include <string>
#include "tables.hpp"
#include "Timer.cuh"
#include "CheckError.cuh"
using namespace timer;
const int DIM = 128;
__global__
void crc32kernel(uint8_t* data, int length, uint32_t* d_table, uint32_t* tmp) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t crc = 0xffffffff;
uint32_t* current = (uint32_t*)data;
if(id < 4) {
uint32_t one = *current ^ crc;
int i = (256*((length-1)-id))+((one>>(8*id)) & 0xff);
tmp[id] = d_table[i];
}
else if(id > 3 && id < length) {
uint32_t two = *(current+(id / 4));
int i = (256*((length-1)-id))+(two>>(8*(id % 4)) & 0xff);
tmp[id] = d_table[i];
}
}
int main() {
Timer<DEVICE> TM_device;
Timer<HOST> TM_host;
// -------------------------------------------------------------------------
// HOST MEMORY ALLOCATION
uint32_t hcrc = 0xffffffff;
std::ifstream fin("../input3.txt");
std::string temp;
std::string d("");
if(fin.is_open()){
while(getline(fin, temp)){
d.append(temp);
}
}
fin.close();
std::cout << d.length() << std::endl;
uint8_t data[d.length()];
for(int i=0; i<d.length(); i++){
data[i] = d[i];
}
int length = sizeof(data);
auto *table = (uint32_t *)malloc(length * 256 * sizeof(uint32_t));
make_crc_table_reverse(table, length);
// -------------------------------------------------------------------------
// HOST INITILIZATION
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
// -------------------------------------------------------------------------
// HOST EXECUTIION
TM_host.start();
for(int i = 0; i < length; i++) {
hcrc = table[(hcrc ^ data[i]) & 0xFF] ^ (hcrc>>8);
}
hcrc ^= 0xffffffff;
TM_host.stop();
TM_host.print("CRC32C host: ");
// -------------------------------------------------------------------------
// DEVICE MEMORY ALLOCATION
//TM_device.start();
uint32_t* dcrc;
uint8_t* ddata;
uint32_t* d_table;
uint32_t *d_tmp;
cudaMalloc(&ddata, length*sizeof(uint8_t));
cudaMalloc(&d_table, length * 256 * sizeof(uint32_t));
cudaMalloc(&d_tmp, length * sizeof(uint32_t));
// -------------------------------------------------------------------------
// COPY DATA FROM HOST TO DEVICE
cudaMemcpy(ddata, data, length*sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_table, table, length * 256 * sizeof(uint32_t), cudaMemcpyHostToDevice);
// -------------------------------------------------------------------------
// DEVICE EXECUTION
TM_device.start();
dim3 block_size(DIM, 1, 1);
dim3 num_blocks(ceil((float)length/DIM), 1, 1);
crc32kernel<<< num_blocks, block_size >>>(ddata, length, d_table, d_tmp);
// -------------------------------------------------------------------------
// COPY DATA FROM DEVICE TO HOST
uint32_t* h_crc_tmp = (uint32_t *)malloc(length * sizeof(uint32_t));
cudaMemcpy(h_crc_tmp, d_tmp, length*sizeof(uint32_t), cudaMemcpyDeviceToHost);
for(int i = 1; i < length; i++) {
h_crc_tmp[0] ^= h_crc_tmp[i];
}
h_crc_tmp[0] ^= 0xffffffff;
TM_device.stop();
cudaDeviceSynchronize();
CHECK_CUDA_ERROR
TM_device.print("CRC32 device: ");
std::cout << std::setprecision(1)
<< "Speedup: " << TM_host.duration() / TM_device.duration()
<< "x\n\n";
// -------------------------------------------------------------------------
// RESULT CHECK
printf("0x%x - 0x%x\n", hcrc, h_crc_tmp[0]);
if (hcrc != h_crc_tmp[0]) {
std::cout << "CRC value mismatch\n\n";
//cudaDeviceReset();
std::exit(EXIT_FAILURE);
}
std::cout << "<> Correct\n\n";
// -------------------------------------------------------------------------
// HOST MEMORY DEALLOCATION
//delete[] hdata;
free(table);
// -------------------------------------------------------------------------
// DEVICE MEMORY DEALLOCATION
cudaFree(d_table);
cudaFree(ddata);
cudaFree(dcrc);
// -------------------------------------------------------------------------
//cudaDeviceReset();
}
|
f5c2a2cef23eda07df1a0806dd86aafe0063c76d.hip | // !!! This is a file automatically generated by hipify!!!
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: [email protected]) 2018-09-12
*/
#include "Dropout.h"
#include "Dropout.cuh"
#include "Loss.cuh"
#include "../XDevice.h"
#ifdef USE_ROCM
// the CUDA stuff
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#endif
namespace nts{ // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
/*
dropout function (Cuda kernel)
>> x - input data pointer
>> y - output data pointer
>> m - mask indicator to set zero
>> s - the scale factor
>> size - size of input/output
*/
__global__
void KernelDropoutCompute(DTYPE * x, DTYPE * y, DTYPE * m, DTYPE s, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
y[i] = x[i] * m[i] * s;
}
}
/*
dropout function (Cuda version)
>> x - input tensor
>> y - output tensor
>> mask - mask tensor to set 0
>> scaleFactor - the scale factor
*/
void _CudaDropout(const XTensor * x, XTensor * y, const XTensor * mask, DTYPE scaleFactor)
{
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE){
CheckNTErrors(!x->isSparse && !y->isSparse, "the activation function (rectify) does not support sparse matrices.");
CheckNTErrors(x->unitNum && y->unitNum, "we require two vectors with the same length.");
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
hipLaunchKernelGGL(( KernelDropoutCompute), dim3(dim3(gridSize[0])), dim3(dim3(blockSize[0])), 0, 0, (DTYPE*)x->data, (DTYPE*)y->data, (DTYPE*)mask->data, scaleFactor, x->unitNum);
BacktoCudaDev(x->devID, devIDBackup);
}
else
ShowNTErrors("TODO!");
}
/*
backward computation of dropout function (Cuda kernel)
dE/dx = dE/dy * dy/dx
>> dedy - dE/dy
>> dedx - dE/dx
>> m - mask indicator to set zero
>> s - the scale factor
>> size - size of input/output
*/
__global__
void KernelDropoutBackward(DTYPE * dedy, DTYPE * dedx,
DTYPE * m, DTYPE s, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
dedx[i] = dedy[i] * m[i] * s;
}
}
/*
backward computation of dropout function (Cuda version)
dE/dx = dE/dy * dy/dx
>> y - output of the dropout function
>> x - input of the dropout function
>> dedy - dE/dy
>> dedx - dE/dx
>> mask - mask tensor to set 0
>> scaleFactor - the scale factor
*/
void _CudaDropoutBackward(const XTensor * y, const XTensor * x,
const XTensor * dedy, XTensor * dedx,
const XTensor * mask, DTYPE scaleFactor)
{
int gridSize[3], blockSize[3];
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE){
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
/* dE/ds = dE/dy * dy/ds */
hipLaunchKernelGGL(( KernelDropoutBackward), dim3(dim3(gridSize[0])),dim3(dim3(blockSize[0])), 0, 0,
(DTYPE*)dedy->data, (DTYPE*)dedx->data,
(DTYPE*)mask->data, scaleFactor, x->unitNum);
BacktoCudaDev(x->devID, devIDBackup);
}
else
ShowNTErrors("TODO!");
}
#endif
} // namespace nts(NiuTrans.Tensor) | f5c2a2cef23eda07df1a0806dd86aafe0063c76d.cu | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: [email protected]) 2018-09-12
*/
#include "Dropout.h"
#include "Dropout.cuh"
#include "Loss.cuh"
#include "../XDevice.h"
#ifdef USE_CUDA
// the CUDA stuff
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuda.h>
#endif
namespace nts{ // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
dropout function (Cuda kernel)
>> x - input data pointer
>> y - output data pointer
>> m - mask indicator to set zero
>> s - the scale factor
>> size - size of input/output
*/
__global__
void KernelDropoutCompute(DTYPE * x, DTYPE * y, DTYPE * m, DTYPE s, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
y[i] = x[i] * m[i] * s;
}
}
/*
dropout function (Cuda version)
>> x - input tensor
>> y - output tensor
>> mask - mask tensor to set 0
>> scaleFactor - the scale factor
*/
void _CudaDropout(const XTensor * x, XTensor * y, const XTensor * mask, DTYPE scaleFactor)
{
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE){
CheckNTErrors(!x->isSparse && !y->isSparse, "the activation function (rectify) does not support sparse matrices.");
CheckNTErrors(x->unitNum && y->unitNum, "we require two vectors with the same length.");
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
KernelDropoutCompute<<<dim3(gridSize[0]), dim3(blockSize[0])>>>((DTYPE*)x->data, (DTYPE*)y->data, (DTYPE*)mask->data, scaleFactor, x->unitNum);
BacktoCudaDev(x->devID, devIDBackup);
}
else
ShowNTErrors("TODO!");
}
/*
backward computation of dropout function (Cuda kernel)
dE/dx = dE/dy * dy/dx
>> dedy - dE/dy
>> dedx - dE/dx
>> m - mask indicator to set zero
>> s - the scale factor
>> size - size of input/output
*/
__global__
void KernelDropoutBackward(DTYPE * dedy, DTYPE * dedx,
DTYPE * m, DTYPE s, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
dedx[i] = dedy[i] * m[i] * s;
}
}
/*
backward computation of dropout function (Cuda version)
dE/dx = dE/dy * dy/dx
>> y - output of the dropout function
>> x - input of the dropout function
>> dedy - dE/dy
>> dedx - dE/dx
>> mask - mask tensor to set 0
>> scaleFactor - the scale factor
*/
void _CudaDropoutBackward(const XTensor * y, const XTensor * x,
const XTensor * dedy, XTensor * dedx,
const XTensor * mask, DTYPE scaleFactor)
{
int gridSize[3], blockSize[3];
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE){
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
/* dE/ds = dE/dy * dy/ds */
KernelDropoutBackward<<<dim3(gridSize[0]),dim3(blockSize[0])>>>
((DTYPE*)dedy->data, (DTYPE*)dedx->data,
(DTYPE*)mask->data, scaleFactor, x->unitNum);
BacktoCudaDev(x->devID, devIDBackup);
}
else
ShowNTErrors("TODO!");
}
#endif
} // namespace nts(NiuTrans.Tensor) |
9414b5fbaa09e9cfe1ed3d934294f1218e19d204.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <stdbool.h>
#include <assert.h>
typedef struct
{
char name[40];
int n_atoms;
int n_bonds;
double atoms[500];
int bonds[300];
}molecola;
typedef struct
{
int head;
int tail;
int elements[500];
}queue;
#define repetitions 10
#define enable_refiniment false
#define high_precision_step 1
#define low_precision_step 30
#define threshold 0.2
inline hipError_t checkCuda(hipError_t result)
{
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
return result;
}
void create_molecola(char* molecola_name,molecola* m1) {
FILE *input_file;
int line_index = 0;
char* res;
char line[500];
int number_of_atoms;
int number_of_bounds;
char path[50];
strcpy(path,"molecules/");
strcat(path, molecola_name);
input_file = fopen(path, "r");
if (input_file == NULL) {
printf("fopen non funziona\n");
return;
}
res = fgets(line, 100, input_file);
fgets(line, 100, input_file);
fgets(line, 100, input_file);
char* numero = strtok(line, " ");
number_of_atoms = atoi(numero);
numero = strtok(NULL, " ");
number_of_bounds = atoi(numero);
m1->n_atoms = number_of_atoms;
m1->n_bonds = number_of_bounds;
fgets(line, 100, input_file);
fgets(line, 100, input_file);
fgets(line, 100, input_file);
fgets(line, 100, input_file);
while(1){
char * token = strtok(line, " ");
line_index = atoi(token) - 1;
token = strtok(NULL, " ");
token = strtok(NULL, " ");
m1->atoms[3*line_index] = atof(token);
token = strtok(NULL, " ");
m1->atoms[3*line_index+1] = atof(token);
token = strtok(NULL, " ");
m1->atoms[3*line_index + 2] = atof(token);
fgets(line,100,input_file);
if(strncmp(line,"@<TRIPOS>",5)==0){
break;
}
}
fgets(line, 100, input_file);
while (strcmp(line, "@<TRIPOS>SUBSTRUCTURE\n") != 0 && res != NULL && strcmp(res,"\n")!=0) {
char * token = strtok(line, " ");
line_index = atoi(token) - 1;
token = strtok(NULL, " ");
m1->bonds[2*line_index] = atoi(token);
token = strtok(NULL, " ");
m1->bonds[2*line_index+1] = atoi(token);
res = fgets(line, 100, input_file);
}
fclose(input_file);
strcpy(m1->name,molecola_name);
}
bool isPresent(int node, queue* queue){
for (int i = 0; i < queue->tail; i++) {
if (queue->elements[i] == node) {
return true; }
}
return false;
}
void bfs(int bond_index,molecola*molecola,queue* queue){
int node = queue->elements[queue->head];
int n_bonds = molecola -> n_bonds;
while(queue->head < queue->tail){
for(int i = 0; i<n_bonds;i++){
if(i!=bond_index){
if (molecola->bonds[2 * i] == node && !isPresent(molecola->bonds[2 * i + 1], queue)) {
queue->elements[queue->tail] = molecola->bonds[2 * i + 1];
queue->tail += 1;
}
else if(molecola->bonds[2 * i+1] == node && !isPresent(molecola->bonds[2 * i], queue)){
queue->elements[queue->tail] = molecola->bonds[2 * i];
queue->tail += 1;
}
}
}
queue->head +=1;
node = queue -> elements[queue->head];
}
}
void bfs2(int bond_index,molecola*molecola,queue* queue){
int node = queue->elements[queue->head];
int n_bonds = molecola -> n_bonds;
while(queue->head < queue->tail){
for(int i = 0; i<n_bonds;i++){
if(i!=bond_index){
if (molecola->bonds[2 * i] == node && !isPresent(molecola->bonds[2 * i + 1], queue)) {
queue->elements[queue->tail] = molecola->bonds[2 * i + 1];
queue->tail += 1;
}
else if(molecola->bonds[2 * i+1] == node && !isPresent(molecola->bonds[2 * i], queue)){
queue->elements[queue->tail] = molecola->bonds[2 * i];
queue->tail += 1;
}
}
}
queue->head +=1;
node = queue -> elements[queue->head];
}
}
void find_adjacent_nodes(molecola* molecola, queue* queue, int atom, int bond_index) {
queue->elements[0] = atom;
queue->head = 0;
queue->tail = 1;
bfs2(bond_index, molecola, queue);
}
bool isRotamer(int bond_index, molecola* molecola) {
int first_node, second_node;
bool isRotamer;
queue q1;
queue q2;
first_node = molecola->bonds[2*bond_index];
second_node = molecola->bonds[2*bond_index+1];
q1.tail = 1;
q1.head = 0;
q1.elements[0] = first_node;
q2.tail = 1;
q2.head = 0;
q2.elements[0] = second_node;
bfs(bond_index, molecola, &q1);
bfs(bond_index, molecola, &q2);
isRotamer = true;
for (int i = 0; i < q1.tail; i++) {
for (int j = 0; j < q2.tail; j++) {
if (q1.elements[i] == q2.elements[j]){
isRotamer = false;
}
}
}
if (q1.tail == 1 || q2.tail == 1) {
isRotamer = false;
}
return isRotamer;
}
int* find_rotamers(molecola* molecola, int* number_of_rotamers) {
//sempre chiamarlo con un n_rotamers = 0
int size = molecola->n_bonds;
bool* x;
int n_rotamers = 0;
int* rotamer_list;
int rotamer_index = 0;
x = (bool*)malloc(size* sizeof(int));
for (int i = 0; i < size; i++) {
if (isRotamer(i, molecola)) { x[i] = true; }
else { x[i] = false; }
}
for (int i = 0; i < size; i++) {
if (x[i]) {
n_rotamers += 1;
}
}
rotamer_list = (int*)malloc(n_rotamers * sizeof(int));
for (int i = 0; i < size; i++) {
if (x[i]) {
rotamer_list[rotamer_index] = i;
rotamer_index += 1;
}
}
free(x);
*number_of_rotamers = n_rotamers;
return rotamer_list;
}
void normalise(double* x, double* y, double* z) {
double w = sqrt(*x * *x + *y * *y + *z * *z);
*x = *x / w;
*y = *y / w;
*z = *z / w;
}
void rotate_atom(molecola*molecola, int atom, int rotamer, int angle) {
double px, py, pz, p1x, p1y, p1z, p2x, p2y, p2z, rx, ry, rz, qx, qy, qz;
double tetha = angle*M_PI / 180;
int rotamer1_index, rotamer2_index;
double costheta, sintheta;
px = molecola->atoms[3 * (atom - 1)];
py = molecola->atoms[3 * (atom - 1) + 1];
pz = molecola->atoms[3 * (atom - 1) + 2];
rotamer1_index = molecola->bonds[2 * rotamer];
rotamer2_index = molecola->bonds[2 * rotamer + 1];
p1x = molecola->atoms[3 * (rotamer1_index - 1)];
p1y = molecola->atoms[3 * (rotamer1_index - 1) + 1];
p1z = molecola->atoms[3 * (rotamer1_index - 1) + 2];
p2x = molecola->atoms[3 * (rotamer2_index - 1)];
p2y = molecola->atoms[3 * (rotamer2_index - 1) + 1];
p2z = molecola->atoms[3 * (rotamer2_index - 1) + 2];
rx = p2x - p1x;
ry = p2y - p1y;
rz = p2z - p1z;
px = px - p1x;
py = py - p1y;
pz = pz - p1z;
normalise(&rx, &ry, &rz);
costheta = cos(tetha);
sintheta = sin(tetha);
qx = 0;
qy = 0;
qz = 0;
qx += (costheta + (1 - costheta)* rx*rx)*px;
qx += ((1 - costheta) * rx * ry - rz * sintheta) * py;
qx += ((1 - costheta) * rx * rz + ry * sintheta) * pz;
qy += ((1 - costheta) * rx * ry + rz * sintheta) * px;
qy += (costheta + (1 - costheta) * ry * ry) * py;
qy += ((1 - costheta) * ry * rz - rx * sintheta) * pz;
qz += ((1 - costheta) * rx * rz - ry * sintheta) * px;
qz += ((1 - costheta) * ry * rz + rx * sintheta) * py;
qz += (costheta + (1 - costheta) * rz * rz) * pz;
qx += p1x;
qy += p1y;
qz += p1z;
molecola->atoms[3 * (atom - 1)] = qx;
molecola->atoms[3 * (atom - 1) + 1] = qy;
molecola->atoms[3 * (atom - 1) + 2] = qz;
}
__host__ __device__ double distance(molecola* molecola, int index_1, int index_2) {
double distance;
double x1, y1, z1, x2, y2, z2;
x1 = molecola->atoms[3 * (index_1)];
y1 = molecola->atoms[3 * (index_1) + 1];
z1 = molecola->atoms[3 * (index_1) + 2];
x2 = molecola->atoms[3 * (index_2)];
y2 = molecola->atoms[3 * (index_2) + 1];
z2 = molecola->atoms[3 * (index_2) + 2];
distance = sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2) + pow(z1 - z2, 2));
return distance;
}
double measure_expansion(molecola* molecola) {
double expansion = 0;
for (int i = 0; i < molecola->n_atoms; i++) {
for (int j = 0; j < molecola->n_atoms; j++) {
if (j > i) {
expansion += distance(molecola, i, j);
}
}
}
return expansion;
}
void rotate_molecola(molecola* molecola, int rotamer, int atom, int angle) {
queue q1;
find_adjacent_nodes(molecola, &q1, atom, rotamer);
for (int i = 0; i < q1.tail; i++) {
rotate_atom(molecola, q1.elements[i], rotamer, angle);
}
}
__global__ void is_ligand_feasible(molecola* molecola, bool* isFeasible) {
__shared__ bool feasibility[200];
int thread_id = threadIdx.x + blockIdx.x*blockDim.x;
if(thread_id < (molecola -> n_atoms)-1){
for(int i = thread_id+1; i<molecola->n_atoms; i++){
if (distance(molecola,thread_id,i) < 0.8){
feasibility[thread_id] = false;
}
else {feasibility[thread_id] = true;}
}
}
if(thread_id == 0){
*isFeasible = true;
for(int i = 0; i< molecola->n_atoms;i++){
if(feasibility[i] == false){
*isFeasible = false;
}
}
}
}
double fragment_size(molecola* molecola, int bond, int index) {
//index 1 if left fragment , 2 right
queue q1;
q1.tail = 1;
q1.head = 0;
double size_pct;
if (index == 1) {
q1.elements[0] = molecola->bonds[2 * bond];
}
else if (index == 2) {
q1.elements[0] = molecola->bonds[2 * bond + 1];
}
else {
printf("Fragment size: Index must be between 1 and 2");
return 0;
}
bfs(bond, molecola, &q1);
size_pct = (double)q1.tail / molecola->n_atoms;
return size_pct;
}
void place_in_best_angle(molecola* mol, int bond, int atom, int step, int min_range, int max_range) {
int best_expansion_angle;
double best_expansion=0;
double expansion;
best_expansion_angle = 0;
bool* isLigandFeasible;
checkCuda(hipMallocManaged(&isLigandFeasible,sizeof(bool)));
for (int i = min_range; i <= max_range; i += step) {
rotate_molecola(mol, bond, atom, step);
hipLaunchKernelGGL(( is_ligand_feasible), dim3(1),dim3(200), 0, 0, mol,isLigandFeasible);
checkCuda(hipDeviceSynchronize());
if (*isLigandFeasible) {
expansion = measure_expansion(mol);
if (expansion > best_expansion) {
best_expansion = expansion;
best_expansion_angle = i;
}
}
}
checkCuda(hipFree(isLigandFeasible));
rotate_molecola(mol, bond, atom, (360-max_range) + best_expansion_angle);
}
int find_best_tile(molecola* mol, int n_tiles, int bond, int atom) {
//controllo i bound del ciclo, cerco di usare n_tiles divisore di 360
molecola mol2;
mol2 = *mol;
int tile_size;
int index;
double expansion;
double best_expansion=0;
int best_expansion_tile=0;
bool* isLigandFeasible;
checkCuda(hipMallocManaged(&isLigandFeasible,sizeof(bool)));
tile_size = floor(360 / n_tiles);
rotate_molecola(&mol2, bond, atom, tile_size / 2);
hipLaunchKernelGGL(( is_ligand_feasible), dim3(1),dim3(200), 0, 0, mol,isLigandFeasible);
checkCuda(hipDeviceSynchronize());
if (*isLigandFeasible) {
best_expansion = measure_expansion(&mol2);
best_expansion_tile = 0;
}
for (int i = 1; i < n_tiles; i++) {
index = tile_size / 2;
rotate_molecola(&mol2, bond, atom, tile_size);
hipLaunchKernelGGL(( is_ligand_feasible), dim3(1),dim3(200), 0, 0, mol,isLigandFeasible);
checkCuda(hipDeviceSynchronize());
if (*isLigandFeasible) {
expansion = measure_expansion(&mol2);
if (expansion > best_expansion) {
best_expansion = expansion;
best_expansion_tile = i;
}
}
}
checkCuda(hipFree(isLigandFeasible));
return best_expansion_tile;
}
double match_probe_shape(molecola* molecola) {
int* rotamer_list;
int rotamer_index = 0;
int n_rotamers = 0;
int best_tile;
int n_tiles = 18;
int tile_size = 360/n_tiles;
//non sono sicuro sia giusto
rotamer_list = find_rotamers(molecola, &n_rotamers);
for (int j = 0; j < repetitions; j++) {
for (int i = 0; i < n_rotamers; i++) {
rotamer_index = rotamer_list[i];
if (fragment_size(molecola, rotamer_index, 1) < threshold) {
place_in_best_angle(molecola, rotamer_index, molecola->bonds[2 * rotamer_index], low_precision_step, 0, 360);
}
else {
if (enable_refiniment) {
best_tile = find_best_tile(molecola, n_tiles, rotamer_index, molecola->bonds[2 * rotamer_index]);
place_in_best_angle(molecola, rotamer_index, molecola->bonds[2 * rotamer_index], high_precision_step, best_tile*tile_size, (best_tile + 1)*tile_size);
}
else {
place_in_best_angle(molecola, rotamer_index, molecola->bonds[2 * rotamer_index], high_precision_step, 0, 360);
}
}
if (fragment_size(molecola, rotamer_index, 2) < threshold) {
place_in_best_angle(molecola, rotamer_index, molecola->bonds[2 * rotamer_index + 1], low_precision_step, 0, 360);
}
else {
if (enable_refiniment) {
best_tile = find_best_tile(molecola, n_tiles, rotamer_index, molecola->bonds[2 * rotamer_index + 1]);
place_in_best_angle(molecola, rotamer_index, molecola->bonds[2 * rotamer_index + 1], high_precision_step, best_tile*tile_size, (best_tile + 1)*tile_size);
}
else {
place_in_best_angle(molecola, rotamer_index, molecola->bonds[2 * rotamer_index + 1], high_precision_step, 0, 360);
}
}
}
}
free(rotamer_list);
return measure_expansion(molecola);
}
int main() {
clock_t begin = clock();
int n_molecole = 3;
double espansion;
molecola* m1;
//molecola list_of_molecole[1];
char* molecole_list[] = {"Aspirin.mol2","Diclofenac.mol2","Diplosalsalate.mol2","Flurbiprofen.mol2","Focalin.mol2","Losmiprofen.mol2","Melatonin.mol2","Myfortic.mol2","Nifuradene.mol2","Oxybenzone.mol2","Propiomazine.mol2","Raloxifene.mol2","Relacatib.mol2", "Ribasphere.mol2","Roxoperone.mol2","Sulindac.mol2",
"1b9v_deposited_1.mol2", "1br6_deposited_1.mol2","1bxq_ligand.mol2", "1c1b_deposited_1.mol2","1ctr_deposited_1.mol2","1cvu_deposited_1.mol2","1cx2_deposited_1.mol2",
"1ezq_deposited_1.mol2", "1fcx_deposited_1.mol2", "1fl3_deposited_1.mol2", "1fm6_deposited_1.mol2","1fm9_deposited_1.mol2","1fmz_ligand.mol2","1fq5_deposited_1.mol2",
"1gvx_ligand.mol2", "1gwx_deposited_1.mol2","1h23_ligand.mol2", "1hp0_deposited_1.mol2","1hvy_deposited_1.mol2", "1iiq_ligand.mol2","1lpz_deposited_1.mol2",
"1mq6_deposited_1.mol2","1oyt_deposited_1.mol2", "1pso_deposited_1.mol2","1s19_deposited_1.mol2","1uml_deposited_1.mol2","1ydt_deposited_1.mol2","2hnx_ligand.mol2",
"3l3n_ligand.mol2", "3nhi_ligand.mol2","4djp_ligand.mol2","4gid_ligand.mol2"};
for (int i = 0; i < n_molecole; i++) {
checkCuda(hipMallocManaged(&m1, sizeof(molecola)));
create_molecola(molecole_list[i],m1);
espansion = measure_expansion(m1);
printf("Before expansion Molecola: %s , espansion: %f\n", m1->name,espansion);
espansion = match_probe_shape(m1);
printf("Molecola: %s, expansion: %f\n", m1->name, espansion);
hipFree(m1);
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\nTime spent: %f\n", time_spent);
return 0;
} | 9414b5fbaa09e9cfe1ed3d934294f1218e19d204.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <stdbool.h>
#include <assert.h>
typedef struct
{
char name[40];
int n_atoms;
int n_bonds;
double atoms[500];
int bonds[300];
}molecola;
typedef struct
{
int head;
int tail;
int elements[500];
}queue;
#define repetitions 10
#define enable_refiniment false
#define high_precision_step 1
#define low_precision_step 30
#define threshold 0.2
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
void create_molecola(char* molecola_name,molecola* m1) {
FILE *input_file;
int line_index = 0;
char* res;
char line[500];
int number_of_atoms;
int number_of_bounds;
char path[50];
strcpy(path,"molecules/");
strcat(path, molecola_name);
input_file = fopen(path, "r");
if (input_file == NULL) {
printf("fopen non funziona\n");
return;
}
res = fgets(line, 100, input_file);
fgets(line, 100, input_file);
fgets(line, 100, input_file);
char* numero = strtok(line, " ");
number_of_atoms = atoi(numero);
numero = strtok(NULL, " ");
number_of_bounds = atoi(numero);
m1->n_atoms = number_of_atoms;
m1->n_bonds = number_of_bounds;
fgets(line, 100, input_file);
fgets(line, 100, input_file);
fgets(line, 100, input_file);
fgets(line, 100, input_file);
while(1){
char * token = strtok(line, " ");
line_index = atoi(token) - 1;
token = strtok(NULL, " ");
token = strtok(NULL, " ");
m1->atoms[3*line_index] = atof(token);
token = strtok(NULL, " ");
m1->atoms[3*line_index+1] = atof(token);
token = strtok(NULL, " ");
m1->atoms[3*line_index + 2] = atof(token);
fgets(line,100,input_file);
if(strncmp(line,"@<TRIPOS>",5)==0){
break;
}
}
fgets(line, 100, input_file);
while (strcmp(line, "@<TRIPOS>SUBSTRUCTURE\n") != 0 && res != NULL && strcmp(res,"\n")!=0) {
char * token = strtok(line, " ");
line_index = atoi(token) - 1;
token = strtok(NULL, " ");
m1->bonds[2*line_index] = atoi(token);
token = strtok(NULL, " ");
m1->bonds[2*line_index+1] = atoi(token);
res = fgets(line, 100, input_file);
}
fclose(input_file);
strcpy(m1->name,molecola_name);
}
bool isPresent(int node, queue* queue){
for (int i = 0; i < queue->tail; i++) {
if (queue->elements[i] == node) {
return true; }
}
return false;
}
void bfs(int bond_index,molecola*molecola,queue* queue){
int node = queue->elements[queue->head];
int n_bonds = molecola -> n_bonds;
while(queue->head < queue->tail){
for(int i = 0; i<n_bonds;i++){
if(i!=bond_index){
if (molecola->bonds[2 * i] == node && !isPresent(molecola->bonds[2 * i + 1], queue)) {
queue->elements[queue->tail] = molecola->bonds[2 * i + 1];
queue->tail += 1;
}
else if(molecola->bonds[2 * i+1] == node && !isPresent(molecola->bonds[2 * i], queue)){
queue->elements[queue->tail] = molecola->bonds[2 * i];
queue->tail += 1;
}
}
}
queue->head +=1;
node = queue -> elements[queue->head];
}
}
void bfs2(int bond_index,molecola*molecola,queue* queue){
int node = queue->elements[queue->head];
int n_bonds = molecola -> n_bonds;
while(queue->head < queue->tail){
for(int i = 0; i<n_bonds;i++){
if(i!=bond_index){
if (molecola->bonds[2 * i] == node && !isPresent(molecola->bonds[2 * i + 1], queue)) {
queue->elements[queue->tail] = molecola->bonds[2 * i + 1];
queue->tail += 1;
}
else if(molecola->bonds[2 * i+1] == node && !isPresent(molecola->bonds[2 * i], queue)){
queue->elements[queue->tail] = molecola->bonds[2 * i];
queue->tail += 1;
}
}
}
queue->head +=1;
node = queue -> elements[queue->head];
}
}
void find_adjacent_nodes(molecola* molecola, queue* queue, int atom, int bond_index) {
queue->elements[0] = atom;
queue->head = 0;
queue->tail = 1;
bfs2(bond_index, molecola, queue);
}
bool isRotamer(int bond_index, molecola* molecola) {
int first_node, second_node;
bool isRotamer;
queue q1;
queue q2;
first_node = molecola->bonds[2*bond_index];
second_node = molecola->bonds[2*bond_index+1];
q1.tail = 1;
q1.head = 0;
q1.elements[0] = first_node;
q2.tail = 1;
q2.head = 0;
q2.elements[0] = second_node;
bfs(bond_index, molecola, &q1);
bfs(bond_index, molecola, &q2);
isRotamer = true;
for (int i = 0; i < q1.tail; i++) {
for (int j = 0; j < q2.tail; j++) {
if (q1.elements[i] == q2.elements[j]){
isRotamer = false;
}
}
}
if (q1.tail == 1 || q2.tail == 1) {
isRotamer = false;
}
return isRotamer;
}
int* find_rotamers(molecola* molecola, int* number_of_rotamers) {
//sempre chiamarlo con un n_rotamers = 0
int size = molecola->n_bonds;
bool* x;
int n_rotamers = 0;
int* rotamer_list;
int rotamer_index = 0;
x = (bool*)malloc(size* sizeof(int));
for (int i = 0; i < size; i++) {
if (isRotamer(i, molecola)) { x[i] = true; }
else { x[i] = false; }
}
for (int i = 0; i < size; i++) {
if (x[i]) {
n_rotamers += 1;
}
}
rotamer_list = (int*)malloc(n_rotamers * sizeof(int));
for (int i = 0; i < size; i++) {
if (x[i]) {
rotamer_list[rotamer_index] = i;
rotamer_index += 1;
}
}
free(x);
*number_of_rotamers = n_rotamers;
return rotamer_list;
}
void normalise(double* x, double* y, double* z) {
double w = sqrt(*x * *x + *y * *y + *z * *z);
*x = *x / w;
*y = *y / w;
*z = *z / w;
}
void rotate_atom(molecola*molecola, int atom, int rotamer, int angle) {
double px, py, pz, p1x, p1y, p1z, p2x, p2y, p2z, rx, ry, rz, qx, qy, qz;
double tetha = angle*M_PI / 180;
int rotamer1_index, rotamer2_index;
double costheta, sintheta;
px = molecola->atoms[3 * (atom - 1)];
py = molecola->atoms[3 * (atom - 1) + 1];
pz = molecola->atoms[3 * (atom - 1) + 2];
rotamer1_index = molecola->bonds[2 * rotamer];
rotamer2_index = molecola->bonds[2 * rotamer + 1];
p1x = molecola->atoms[3 * (rotamer1_index - 1)];
p1y = molecola->atoms[3 * (rotamer1_index - 1) + 1];
p1z = molecola->atoms[3 * (rotamer1_index - 1) + 2];
p2x = molecola->atoms[3 * (rotamer2_index - 1)];
p2y = molecola->atoms[3 * (rotamer2_index - 1) + 1];
p2z = molecola->atoms[3 * (rotamer2_index - 1) + 2];
rx = p2x - p1x;
ry = p2y - p1y;
rz = p2z - p1z;
px = px - p1x;
py = py - p1y;
pz = pz - p1z;
normalise(&rx, &ry, &rz);
costheta = cos(tetha);
sintheta = sin(tetha);
qx = 0;
qy = 0;
qz = 0;
qx += (costheta + (1 - costheta)* rx*rx)*px;
qx += ((1 - costheta) * rx * ry - rz * sintheta) * py;
qx += ((1 - costheta) * rx * rz + ry * sintheta) * pz;
qy += ((1 - costheta) * rx * ry + rz * sintheta) * px;
qy += (costheta + (1 - costheta) * ry * ry) * py;
qy += ((1 - costheta) * ry * rz - rx * sintheta) * pz;
qz += ((1 - costheta) * rx * rz - ry * sintheta) * px;
qz += ((1 - costheta) * ry * rz + rx * sintheta) * py;
qz += (costheta + (1 - costheta) * rz * rz) * pz;
qx += p1x;
qy += p1y;
qz += p1z;
molecola->atoms[3 * (atom - 1)] = qx;
molecola->atoms[3 * (atom - 1) + 1] = qy;
molecola->atoms[3 * (atom - 1) + 2] = qz;
}
__host__ __device__ double distance(molecola* molecola, int index_1, int index_2) {
double distance;
double x1, y1, z1, x2, y2, z2;
x1 = molecola->atoms[3 * (index_1)];
y1 = molecola->atoms[3 * (index_1) + 1];
z1 = molecola->atoms[3 * (index_1) + 2];
x2 = molecola->atoms[3 * (index_2)];
y2 = molecola->atoms[3 * (index_2) + 1];
z2 = molecola->atoms[3 * (index_2) + 2];
distance = sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2) + pow(z1 - z2, 2));
return distance;
}
double measure_expansion(molecola* molecola) {
double expansion = 0;
for (int i = 0; i < molecola->n_atoms; i++) {
for (int j = 0; j < molecola->n_atoms; j++) {
if (j > i) {
expansion += distance(molecola, i, j);
}
}
}
return expansion;
}
void rotate_molecola(molecola* molecola, int rotamer, int atom, int angle) {
queue q1;
find_adjacent_nodes(molecola, &q1, atom, rotamer);
for (int i = 0; i < q1.tail; i++) {
rotate_atom(molecola, q1.elements[i], rotamer, angle);
}
}
__global__ void is_ligand_feasible(molecola* molecola, bool* isFeasible) {
__shared__ bool feasibility[200];
int thread_id = threadIdx.x + blockIdx.x*blockDim.x;
if(thread_id < (molecola -> n_atoms)-1){
for(int i = thread_id+1; i<molecola->n_atoms; i++){
if (distance(molecola,thread_id,i) < 0.8){
feasibility[thread_id] = false;
}
else {feasibility[thread_id] = true;}
}
}
if(thread_id == 0){
*isFeasible = true;
for(int i = 0; i< molecola->n_atoms;i++){
if(feasibility[i] == false){
*isFeasible = false;
}
}
}
}
double fragment_size(molecola* molecola, int bond, int index) {
//index 1 if left fragment , 2 right
queue q1;
q1.tail = 1;
q1.head = 0;
double size_pct;
if (index == 1) {
q1.elements[0] = molecola->bonds[2 * bond];
}
else if (index == 2) {
q1.elements[0] = molecola->bonds[2 * bond + 1];
}
else {
printf("Fragment size: Index must be between 1 and 2");
return 0;
}
bfs(bond, molecola, &q1);
size_pct = (double)q1.tail / molecola->n_atoms;
return size_pct;
}
void place_in_best_angle(molecola* mol, int bond, int atom, int step, int min_range, int max_range) {
int best_expansion_angle;
double best_expansion=0;
double expansion;
best_expansion_angle = 0;
bool* isLigandFeasible;
checkCuda(cudaMallocManaged(&isLigandFeasible,sizeof(bool)));
for (int i = min_range; i <= max_range; i += step) {
rotate_molecola(mol, bond, atom, step);
is_ligand_feasible<<<1,200>>>(mol,isLigandFeasible);
checkCuda(cudaDeviceSynchronize());
if (*isLigandFeasible) {
expansion = measure_expansion(mol);
if (expansion > best_expansion) {
best_expansion = expansion;
best_expansion_angle = i;
}
}
}
checkCuda(cudaFree(isLigandFeasible));
rotate_molecola(mol, bond, atom, (360-max_range) + best_expansion_angle);
}
int find_best_tile(molecola* mol, int n_tiles, int bond, int atom) {
//controllo i bound del ciclo, cerco di usare n_tiles divisore di 360
molecola mol2;
mol2 = *mol;
int tile_size;
int index;
double expansion;
double best_expansion=0;
int best_expansion_tile=0;
bool* isLigandFeasible;
checkCuda(cudaMallocManaged(&isLigandFeasible,sizeof(bool)));
tile_size = floor(360 / n_tiles);
rotate_molecola(&mol2, bond, atom, tile_size / 2);
is_ligand_feasible<<<1,200>>>(mol,isLigandFeasible);
checkCuda(cudaDeviceSynchronize());
if (*isLigandFeasible) {
best_expansion = measure_expansion(&mol2);
best_expansion_tile = 0;
}
for (int i = 1; i < n_tiles; i++) {
index = tile_size / 2;
rotate_molecola(&mol2, bond, atom, tile_size);
is_ligand_feasible<<<1,200>>>(mol,isLigandFeasible);
checkCuda(cudaDeviceSynchronize());
if (*isLigandFeasible) {
expansion = measure_expansion(&mol2);
if (expansion > best_expansion) {
best_expansion = expansion;
best_expansion_tile = i;
}
}
}
checkCuda(cudaFree(isLigandFeasible));
return best_expansion_tile;
}
double match_probe_shape(molecola* molecola) {
int* rotamer_list;
int rotamer_index = 0;
int n_rotamers = 0;
int best_tile;
int n_tiles = 18;
int tile_size = 360/n_tiles;
//non sono sicuro sia giusto
rotamer_list = find_rotamers(molecola, &n_rotamers);
for (int j = 0; j < repetitions; j++) {
for (int i = 0; i < n_rotamers; i++) {
rotamer_index = rotamer_list[i];
if (fragment_size(molecola, rotamer_index, 1) < threshold) {
place_in_best_angle(molecola, rotamer_index, molecola->bonds[2 * rotamer_index], low_precision_step, 0, 360);
}
else {
if (enable_refiniment) {
best_tile = find_best_tile(molecola, n_tiles, rotamer_index, molecola->bonds[2 * rotamer_index]);
place_in_best_angle(molecola, rotamer_index, molecola->bonds[2 * rotamer_index], high_precision_step, best_tile*tile_size, (best_tile + 1)*tile_size);
}
else {
place_in_best_angle(molecola, rotamer_index, molecola->bonds[2 * rotamer_index], high_precision_step, 0, 360);
}
}
if (fragment_size(molecola, rotamer_index, 2) < threshold) {
place_in_best_angle(molecola, rotamer_index, molecola->bonds[2 * rotamer_index + 1], low_precision_step, 0, 360);
}
else {
if (enable_refiniment) {
best_tile = find_best_tile(molecola, n_tiles, rotamer_index, molecola->bonds[2 * rotamer_index + 1]);
place_in_best_angle(molecola, rotamer_index, molecola->bonds[2 * rotamer_index + 1], high_precision_step, best_tile*tile_size, (best_tile + 1)*tile_size);
}
else {
place_in_best_angle(molecola, rotamer_index, molecola->bonds[2 * rotamer_index + 1], high_precision_step, 0, 360);
}
}
}
}
free(rotamer_list);
return measure_expansion(molecola);
}
int main() {
clock_t begin = clock();
int n_molecole = 3;
double espansion;
molecola* m1;
//molecola list_of_molecole[1];
char* molecole_list[] = {"Aspirin.mol2","Diclofenac.mol2","Diplosalsalate.mol2","Flurbiprofen.mol2","Focalin.mol2","Losmiprofen.mol2","Melatonin.mol2","Myfortic.mol2","Nifuradene.mol2","Oxybenzone.mol2","Propiomazine.mol2","Raloxifene.mol2","Relacatib.mol2", "Ribasphere.mol2","Roxoperone.mol2","Sulindac.mol2",
"1b9v_deposited_1.mol2", "1br6_deposited_1.mol2","1bxq_ligand.mol2", "1c1b_deposited_1.mol2","1ctr_deposited_1.mol2","1cvu_deposited_1.mol2","1cx2_deposited_1.mol2",
"1ezq_deposited_1.mol2", "1fcx_deposited_1.mol2", "1fl3_deposited_1.mol2", "1fm6_deposited_1.mol2","1fm9_deposited_1.mol2","1fmz_ligand.mol2","1fq5_deposited_1.mol2",
"1gvx_ligand.mol2", "1gwx_deposited_1.mol2","1h23_ligand.mol2", "1hp0_deposited_1.mol2","1hvy_deposited_1.mol2", "1iiq_ligand.mol2","1lpz_deposited_1.mol2",
"1mq6_deposited_1.mol2","1oyt_deposited_1.mol2", "1pso_deposited_1.mol2","1s19_deposited_1.mol2","1uml_deposited_1.mol2","1ydt_deposited_1.mol2","2hnx_ligand.mol2",
"3l3n_ligand.mol2", "3nhi_ligand.mol2","4djp_ligand.mol2","4gid_ligand.mol2"};
for (int i = 0; i < n_molecole; i++) {
checkCuda(cudaMallocManaged(&m1, sizeof(molecola)));
create_molecola(molecole_list[i],m1);
espansion = measure_expansion(m1);
printf("Before expansion Molecola: %s , espansion: %f\n", m1->name,espansion);
espansion = match_probe_shape(m1);
printf("Molecola: %s, expansion: %f\n", m1->name, espansion);
cudaFree(m1);
}
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("\nTime spent: %f\n", time_spent);
return 0;
} |
d2f74350bf011727936119dd84465eef789b48e9.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <hip/hip_runtime.h>
#include "device_launch_parameters.h"
#include <assert.h>
#include <chrono>
#include "helper_cuda.h"
#include <iomanip>
template <typename C, typename T>
__global__
void naivemultikernel(int block, T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
int idx = threadIdx.x + (block*1024);
if (idx < length) {
if (idx == 0) tooffsets[0] = 0;
for (int i = block*1024; i < ::min((int)length, (block+1)*1024); i++) {
__syncthreads();
if (i == idx) {
C start = fromstarts[startsoffset + i];
C stop = fromstops[stopsoffset + i];
assert(start < stop);
tooffsets[i + 1] = tooffsets[i] + (stop - start);
}
}
}
}
template <typename T, typename C>
int naivemulti(T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
int* d_tooffsets, * d_fromstarts, * d_fromstops;
hipMalloc((void**)&d_tooffsets, (length+1) * sizeof(T));
hipMalloc((void**)&d_fromstarts, length * sizeof(T));
hipMemcpy(d_fromstarts, fromstarts, length * sizeof(T), hipMemcpyHostToDevice);
hipMalloc((void**)&d_fromstops, length * sizeof(C));
hipMemcpy(d_fromstops, fromstops, length * sizeof(C), hipMemcpyHostToDevice);
int block, thread;
if (length > 1024) {
block = (length / 1024) + 1;
thread = 1024;
}
else {
thread = length;
block = 1;
}
auto start1 = std::chrono::high_resolution_clock::now();
for (int i=0; i<block; i++) {
hipLaunchKernelGGL(( naivemultikernel<T, C>), dim3(1), dim3(thread), 0, 0, i, d_tooffsets, d_fromstarts, d_fromstops, startsoffset, stopsoffset, length);
}
hipDeviceSynchronize();
auto stop1 = std::chrono::high_resolution_clock::now();
auto time1 = std::chrono::duration_cast<std::chrono::nanoseconds>(stop1 - start1);
hipMemcpy(tooffsets, d_tooffsets, (length + 1) * sizeof(T), hipMemcpyDeviceToHost);
hipFree(d_tooffsets);
hipFree(d_fromstarts);
hipFree(d_fromstops);
return time1.count();
}
template <typename C, typename T>
__global__
void naivesinglekernel(T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx < length) {
if (idx == 0) {
for (int i = 0; i < length; i++) {
C start = fromstarts[startsoffset + i];
C stop = fromstops[stopsoffset + i];
assert (start <= stop);
tooffsets[i + 1] = tooffsets[i] + (stop - start);
}
}
}
}
template <typename T, typename C>
int naivesingle(T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
int* d_tooffsets, * d_fromstarts, * d_fromstops;
checkCudaErrors(hipMalloc((void**)&d_tooffsets, (length + 1) * sizeof(T)));
checkCudaErrors(hipMalloc((void**)&d_fromstarts, length * sizeof(T)));
checkCudaErrors(hipMemcpy(d_fromstarts, fromstarts, length * sizeof(T), hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**)&d_fromstops, length * sizeof(C)));
checkCudaErrors(hipMemcpy(d_fromstops, fromstops, length * sizeof(C), hipMemcpyHostToDevice));
int block, thread;
if (length > 1024) {
block = (length / 1024) + 1;
thread = 1024;
}
else {
thread = length;
block = 1;
}
auto start1 = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( naivesinglekernel<T, C>), dim3(block), dim3(thread), sizeof(T), 0, d_tooffsets, d_fromstarts, d_fromstops, startsoffset, stopsoffset, length);
hipDeviceSynchronize();
auto stop1 = std::chrono::high_resolution_clock::now();
auto time1 = std::chrono::duration_cast<std::chrono::nanoseconds>(stop1 - start1);
hipDeviceSynchronize();
hipMemcpy(tooffsets, d_tooffsets, (length + 1) * sizeof(T), hipMemcpyDeviceToHost);
hipFree(d_tooffsets);
hipFree(d_fromstarts);
hipFree(d_fromstops);
return time1.count();
}
template <typename T, typename C>
__global__
void sub(T* output, const C* starter, const C* stopper, int64_t startsoffset, int64_t stopsoffset, int64_t n) {
int thid = threadIdx.x + blockIdx.x * blockDim.x;
if (thid < n) {
C start = starter[thid + startsoffset];
C stop = stopper[thid + stopsoffset];
assert(start <= stop);
output[thid] = stop - start;
}
}
template <typename T, typename C>
int gpupar(T* output, const C* arr, const C* arr2, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
int block, thread;
if (length > 1024) {
block = (length / 1024) + 1;
thread = 1024;
}
else {
thread = length;
block = 1;
}
T* d_output;
C* d_arr, * d_arr2;
hipMalloc((void**)&d_output, length * sizeof(T));
hipMalloc((void**)&d_arr, length * sizeof(C));
hipMemcpy(d_arr, arr, length * sizeof(C), hipMemcpyHostToDevice);
hipMalloc((void**)&d_arr2, length * sizeof(C));
hipMemcpy(d_arr2, arr2, length * sizeof(C), hipMemcpyHostToDevice);
auto start1 = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( sub<T, C>) , dim3(block), dim3(thread), 0, 0, d_output, d_arr, d_arr2, startsoffset, stopsoffset, length);
hipDeviceSynchronize();
auto stop1 = std::chrono::high_resolution_clock::now();
auto time1 = std::chrono::duration_cast<std::chrono::nanoseconds>(stop1 - start1);
auto start2 = std::chrono::high_resolution_clock::now();
thrust::device_vector<T> data(d_output, d_output + length);
thrust::device_vector<T> temp(data.size() + 1);
thrust::exclusive_scan(data.begin(), data.end(), temp.begin());
temp[data.size()] = data.back() + temp[data.size() - 1];
auto stop2 = std::chrono::high_resolution_clock::now();
auto time2 = std::chrono::duration_cast<std::chrono::nanoseconds>(stop1 - start1);
thrust::copy(temp.begin(), temp.end(), output);
hipFree(d_output);
hipFree(d_arr);
hipFree(d_arr2);
auto time = time1.count() + time2.count();
return (int)time;
}
template <typename C, typename T>
void foo(T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
tooffsets[0] = 0;
for (int64_t i = 0; i < length; i++) {
C start = fromstarts[startsoffset + i];
C stop = fromstops[stopsoffset + i];
assert(start <= stop);
tooffsets[i + 1] = tooffsets[i] + (stop - start);
}
}
// https://stackoverflow.com/a/14038590/4647107
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
template <typename T, typename C>
__global__
void prefix_sum1(T* base, const C* basestart, const C* basestop, int64_t basestartoffset, int64_t basestopoffset, int length, T* sums) {
int thid = threadIdx.x + (blockIdx.x * blockDim.x);
extern __shared__ T temp[];
int pout = 0, pin = 1;
if (thid < length) {
temp[threadIdx.x] = basestop[basestopoffset + thid] - basestart[basestartoffset + thid];
__syncthreads();
for (int offset = 1; offset < 1024; offset *=2) {
pout = 1 - pout;
pin = 1 - pout;
if (threadIdx.x >= offset) {
temp[pout*1024 + threadIdx.x] = temp[pin*1024 + threadIdx.x - offset] + temp[pin*1024 + threadIdx.x];
}
else {
temp[pout*1024 + threadIdx.x] = temp[pin*1024 + threadIdx.x];
}
__syncthreads();
}
base[thid] = temp[pout*1024 + threadIdx.x];
__syncthreads();
if ((thid == 1023) || ((blockIdx.x != 0) && (thid == ((1024 * (blockIdx.x + 1))-1))) || (thid == length-1)) {
sums[blockIdx.x] = base[thid];
}
}
}
// Need another kernel because of conditional __syncthreads()
template <typename T>
__global__
void prefix_sum2(T* base, int length) {
int thid = threadIdx.x + (blockIdx.x * blockDim.x);
extern __shared__ T temp[];
int pout = 0, pin = 1;
if (thid < length) {
temp[thid] = base[thid];
__syncthreads();
for (int offset = 1; offset < length; offset *=2) {
pout = 1 - pout;
pin = 1 - pout;
if (thid >= offset)
temp[pout*length + thid] = temp[pin*length + thid - offset] + temp[pin*length + thid];
else
temp[pout*length + thid] = temp[pin*length + thid];
__syncthreads();
}
base[thid] = temp[pout*length + thid];
}
}
template<typename T>
__global__
void adder(T* base, T* sums, int64_t length) {
int thid = threadIdx.x + (blockIdx.x * blockDim.x);
if (blockIdx.x != 0 && thid < length)
base[thid] += sums[blockIdx.x - 1];
}
template <typename T, typename C>
int offload(T* base, C* basestart1, C* basestop1, int64_t basestartoffset, int64_t basestopoffset, int64_t length) {
int block, thread=1024;
if (length > 1024) {
if (length%1024 != 0)
block = (length / 1024) + 1;
else
block = length/1024;
}
else {
block = 1;
}
int modlength = block*thread;
// Padding the input arrays
C basestart[modlength], basestop[modlength];
for (int i=0; i<modlength; i++) {
if (i<length){
basestart[i] = basestart1[i];
basestop[i] = basestop1[i];
}
else {
basestart[i] = 0;
basestop[i] = 0;
}
}
T* d_tooffsets, * d_sums;
C* d_fromstarts, * d_fromstops;
gpuErrchk(hipMalloc((void**)&d_tooffsets, (modlength+1) * sizeof(T)));
gpuErrchk(hipMalloc((void**)&d_fromstarts, modlength * sizeof(C)));
gpuErrchk(hipMemcpy(d_fromstarts, basestart, modlength * sizeof(C), hipMemcpyHostToDevice));
gpuErrchk(hipMalloc((void**)&d_fromstops, modlength * sizeof(C)));
gpuErrchk(hipMemcpy(d_fromstops, basestop, modlength * sizeof(C), hipMemcpyHostToDevice));
gpuErrchk(hipMalloc((void**)&d_sums, block*sizeof(T)));
auto start1 = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( prefix_sum1<T, C>), dim3(block), dim3(thread), thread*2*sizeof(T), 0, d_tooffsets, d_fromstarts, d_fromstops, basestartoffset, basestopoffset, modlength, d_sums);
hipDeviceSynchronize();
hipLaunchKernelGGL(( prefix_sum2<T>), dim3(1), dim3(block), block*2*sizeof(T), 0, d_sums, block);
hipDeviceSynchronize();
hipLaunchKernelGGL(( adder<T>), dim3(block), dim3(thread), 0, 0, d_tooffsets, d_sums, modlength);
hipDeviceSynchronize();
auto stop1 = std::chrono::high_resolution_clock::now();
auto time1 = std::chrono::duration_cast<std::chrono::nanoseconds>(stop1 - start1);
gpuErrchk(hipMemcpy(base, d_tooffsets, (length + 1) * sizeof(T), hipMemcpyDeviceToHost));
base[length] = base[length - 1] + basestop[length - 1 + basestopoffset] - basestart[length - 1 + basestartoffset];
gpuErrchk(hipFree(d_tooffsets));
gpuErrchk(hipFree(d_fromstarts));
gpuErrchk(hipFree(d_fromstops));
gpuErrchk(hipFree(d_sums));
auto time = time1.count();
return (int)time;
}
int main() {
// Warm up GPU
const int t_size = 1024;
int t_starter[t_size], t_stopper[t_size], t_output[t_size + 1];
for (int i = 0; i < t_size; i++) {
t_starter[i] = i;
t_stopper[i] = i + 1;
}
int throwaway = gpupar<int, int>(t_output, t_starter, t_stopper, 0, 0, t_size);
// -----------------------------------------------------------
std::ofstream outfile;
outfile.open("data.txt");
for (int counter=10; counter<400000; counter+=1000) {
const int size = counter;
std::cout << "Benchmark for array of size " << counter << "\n";
outfile << counter;
int starter[size], stopper[size], output[size + 1];
for (int i = 0; i < size; i++) {
starter[i] = i;
stopper[i] = i + 1;
}
int tot = 0;
double time = 0.00;
for (int i = 0; i < 5; i++) {
tot = tot + gpupar<int, int>(output, starter, stopper, 0, 0, size);
}
time = ((double)tot)/5;
std::cout << "Time taken for final GPU Thrust algo = " << std::fixed << std::setprecision(1) << time << "\n";
outfile << " " << std::fixed << std::setprecision(1) << time;
tot = 0;
for (int i=0; i<5; i++) {
tot = tot + offload<int, int>(output, starter, stopper, 0, 0, size);
}
time = ((double)tot)/5;
std::cout << "Time taken for final Hillis Steele algo = " << std::fixed << std::setprecision(1) << time << "\n";
outfile << " " << std::fixed << std::setprecision(1) << time << "\n";
}
return 0;
}
| d2f74350bf011727936119dd84465eef789b48e9.cu | #include <iostream>
#include <fstream>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <assert.h>
#include <chrono>
#include "helper_cuda.h"
#include <iomanip>
template <typename C, typename T>
__global__
void naivemultikernel(int block, T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
int idx = threadIdx.x + (block*1024);
if (idx < length) {
if (idx == 0) tooffsets[0] = 0;
for (int i = block*1024; i < std::min((int)length, (block+1)*1024); i++) {
__syncthreads();
if (i == idx) {
C start = fromstarts[startsoffset + i];
C stop = fromstops[stopsoffset + i];
assert(start < stop);
tooffsets[i + 1] = tooffsets[i] + (stop - start);
}
}
}
}
template <typename T, typename C>
int naivemulti(T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
int* d_tooffsets, * d_fromstarts, * d_fromstops;
cudaMalloc((void**)&d_tooffsets, (length+1) * sizeof(T));
cudaMalloc((void**)&d_fromstarts, length * sizeof(T));
cudaMemcpy(d_fromstarts, fromstarts, length * sizeof(T), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_fromstops, length * sizeof(C));
cudaMemcpy(d_fromstops, fromstops, length * sizeof(C), cudaMemcpyHostToDevice);
int block, thread;
if (length > 1024) {
block = (length / 1024) + 1;
thread = 1024;
}
else {
thread = length;
block = 1;
}
auto start1 = std::chrono::high_resolution_clock::now();
for (int i=0; i<block; i++) {
naivemultikernel<T, C><<<1, thread>>>(i, d_tooffsets, d_fromstarts, d_fromstops, startsoffset, stopsoffset, length);
}
cudaDeviceSynchronize();
auto stop1 = std::chrono::high_resolution_clock::now();
auto time1 = std::chrono::duration_cast<std::chrono::nanoseconds>(stop1 - start1);
cudaMemcpy(tooffsets, d_tooffsets, (length + 1) * sizeof(T), cudaMemcpyDeviceToHost);
cudaFree(d_tooffsets);
cudaFree(d_fromstarts);
cudaFree(d_fromstops);
return time1.count();
}
template <typename C, typename T>
__global__
void naivesinglekernel(T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
if (idx < length) {
if (idx == 0) {
for (int i = 0; i < length; i++) {
C start = fromstarts[startsoffset + i];
C stop = fromstops[stopsoffset + i];
assert (start <= stop);
tooffsets[i + 1] = tooffsets[i] + (stop - start);
}
}
}
}
template <typename T, typename C>
int naivesingle(T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
int* d_tooffsets, * d_fromstarts, * d_fromstops;
checkCudaErrors(cudaMalloc((void**)&d_tooffsets, (length + 1) * sizeof(T)));
checkCudaErrors(cudaMalloc((void**)&d_fromstarts, length * sizeof(T)));
checkCudaErrors(cudaMemcpy(d_fromstarts, fromstarts, length * sizeof(T), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**)&d_fromstops, length * sizeof(C)));
checkCudaErrors(cudaMemcpy(d_fromstops, fromstops, length * sizeof(C), cudaMemcpyHostToDevice));
int block, thread;
if (length > 1024) {
block = (length / 1024) + 1;
thread = 1024;
}
else {
thread = length;
block = 1;
}
auto start1 = std::chrono::high_resolution_clock::now();
naivesinglekernel<T, C><<<block, thread, sizeof(T)>>>(d_tooffsets, d_fromstarts, d_fromstops, startsoffset, stopsoffset, length);
cudaDeviceSynchronize();
auto stop1 = std::chrono::high_resolution_clock::now();
auto time1 = std::chrono::duration_cast<std::chrono::nanoseconds>(stop1 - start1);
cudaDeviceSynchronize();
cudaMemcpy(tooffsets, d_tooffsets, (length + 1) * sizeof(T), cudaMemcpyDeviceToHost);
cudaFree(d_tooffsets);
cudaFree(d_fromstarts);
cudaFree(d_fromstops);
return time1.count();
}
template <typename T, typename C>
__global__
void sub(T* output, const C* starter, const C* stopper, int64_t startsoffset, int64_t stopsoffset, int64_t n) {
int thid = threadIdx.x + blockIdx.x * blockDim.x;
if (thid < n) {
C start = starter[thid + startsoffset];
C stop = stopper[thid + stopsoffset];
assert(start <= stop);
output[thid] = stop - start;
}
}
template <typename T, typename C>
int gpupar(T* output, const C* arr, const C* arr2, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
int block, thread;
if (length > 1024) {
block = (length / 1024) + 1;
thread = 1024;
}
else {
thread = length;
block = 1;
}
T* d_output;
C* d_arr, * d_arr2;
cudaMalloc((void**)&d_output, length * sizeof(T));
cudaMalloc((void**)&d_arr, length * sizeof(C));
cudaMemcpy(d_arr, arr, length * sizeof(C), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_arr2, length * sizeof(C));
cudaMemcpy(d_arr2, arr2, length * sizeof(C), cudaMemcpyHostToDevice);
auto start1 = std::chrono::high_resolution_clock::now();
sub<T, C> <<<block, thread>>>(d_output, d_arr, d_arr2, startsoffset, stopsoffset, length);
cudaDeviceSynchronize();
auto stop1 = std::chrono::high_resolution_clock::now();
auto time1 = std::chrono::duration_cast<std::chrono::nanoseconds>(stop1 - start1);
auto start2 = std::chrono::high_resolution_clock::now();
thrust::device_vector<T> data(d_output, d_output + length);
thrust::device_vector<T> temp(data.size() + 1);
thrust::exclusive_scan(data.begin(), data.end(), temp.begin());
temp[data.size()] = data.back() + temp[data.size() - 1];
auto stop2 = std::chrono::high_resolution_clock::now();
auto time2 = std::chrono::duration_cast<std::chrono::nanoseconds>(stop1 - start1);
thrust::copy(temp.begin(), temp.end(), output);
cudaFree(d_output);
cudaFree(d_arr);
cudaFree(d_arr2);
auto time = time1.count() + time2.count();
return (int)time;
}
template <typename C, typename T>
void foo(T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
tooffsets[0] = 0;
for (int64_t i = 0; i < length; i++) {
C start = fromstarts[startsoffset + i];
C stop = fromstops[stopsoffset + i];
assert(start <= stop);
tooffsets[i + 1] = tooffsets[i] + (stop - start);
}
}
// https://stackoverflow.com/a/14038590/4647107
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
template <typename T, typename C>
__global__
void prefix_sum1(T* base, const C* basestart, const C* basestop, int64_t basestartoffset, int64_t basestopoffset, int length, T* sums) {
int thid = threadIdx.x + (blockIdx.x * blockDim.x);
extern __shared__ T temp[];
int pout = 0, pin = 1;
if (thid < length) {
temp[threadIdx.x] = basestop[basestopoffset + thid] - basestart[basestartoffset + thid];
__syncthreads();
for (int offset = 1; offset < 1024; offset *=2) {
pout = 1 - pout;
pin = 1 - pout;
if (threadIdx.x >= offset) {
temp[pout*1024 + threadIdx.x] = temp[pin*1024 + threadIdx.x - offset] + temp[pin*1024 + threadIdx.x];
}
else {
temp[pout*1024 + threadIdx.x] = temp[pin*1024 + threadIdx.x];
}
__syncthreads();
}
base[thid] = temp[pout*1024 + threadIdx.x];
__syncthreads();
if ((thid == 1023) || ((blockIdx.x != 0) && (thid == ((1024 * (blockIdx.x + 1))-1))) || (thid == length-1)) {
sums[blockIdx.x] = base[thid];
}
}
}
// Need another kernel because of conditional __syncthreads()
template <typename T>
__global__
void prefix_sum2(T* base, int length) {
int thid = threadIdx.x + (blockIdx.x * blockDim.x);
extern __shared__ T temp[];
int pout = 0, pin = 1;
if (thid < length) {
temp[thid] = base[thid];
__syncthreads();
for (int offset = 1; offset < length; offset *=2) {
pout = 1 - pout;
pin = 1 - pout;
if (thid >= offset)
temp[pout*length + thid] = temp[pin*length + thid - offset] + temp[pin*length + thid];
else
temp[pout*length + thid] = temp[pin*length + thid];
__syncthreads();
}
base[thid] = temp[pout*length + thid];
}
}
template<typename T>
__global__
void adder(T* base, T* sums, int64_t length) {
int thid = threadIdx.x + (blockIdx.x * blockDim.x);
if (blockIdx.x != 0 && thid < length)
base[thid] += sums[blockIdx.x - 1];
}
template <typename T, typename C>
int offload(T* base, C* basestart1, C* basestop1, int64_t basestartoffset, int64_t basestopoffset, int64_t length) {
int block, thread=1024;
if (length > 1024) {
if (length%1024 != 0)
block = (length / 1024) + 1;
else
block = length/1024;
}
else {
block = 1;
}
int modlength = block*thread;
// Padding the input arrays
C basestart[modlength], basestop[modlength];
for (int i=0; i<modlength; i++) {
if (i<length){
basestart[i] = basestart1[i];
basestop[i] = basestop1[i];
}
else {
basestart[i] = 0;
basestop[i] = 0;
}
}
T* d_tooffsets, * d_sums;
C* d_fromstarts, * d_fromstops;
gpuErrchk(cudaMalloc((void**)&d_tooffsets, (modlength+1) * sizeof(T)));
gpuErrchk(cudaMalloc((void**)&d_fromstarts, modlength * sizeof(C)));
gpuErrchk(cudaMemcpy(d_fromstarts, basestart, modlength * sizeof(C), cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**)&d_fromstops, modlength * sizeof(C)));
gpuErrchk(cudaMemcpy(d_fromstops, basestop, modlength * sizeof(C), cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**)&d_sums, block*sizeof(T)));
auto start1 = std::chrono::high_resolution_clock::now();
prefix_sum1<T, C><<<block, thread, thread*2*sizeof(T)>>>(d_tooffsets, d_fromstarts, d_fromstops, basestartoffset, basestopoffset, modlength, d_sums);
cudaDeviceSynchronize();
prefix_sum2<T><<<1, block, block*2*sizeof(T)>>>(d_sums, block);
cudaDeviceSynchronize();
adder<T><<<block, thread>>>(d_tooffsets, d_sums, modlength);
cudaDeviceSynchronize();
auto stop1 = std::chrono::high_resolution_clock::now();
auto time1 = std::chrono::duration_cast<std::chrono::nanoseconds>(stop1 - start1);
gpuErrchk(cudaMemcpy(base, d_tooffsets, (length + 1) * sizeof(T), cudaMemcpyDeviceToHost));
base[length] = base[length - 1] + basestop[length - 1 + basestopoffset] - basestart[length - 1 + basestartoffset];
gpuErrchk(cudaFree(d_tooffsets));
gpuErrchk(cudaFree(d_fromstarts));
gpuErrchk(cudaFree(d_fromstops));
gpuErrchk(cudaFree(d_sums));
auto time = time1.count();
return (int)time;
}
int main() {
// Warm up GPU
const int t_size = 1024;
int t_starter[t_size], t_stopper[t_size], t_output[t_size + 1];
for (int i = 0; i < t_size; i++) {
t_starter[i] = i;
t_stopper[i] = i + 1;
}
int throwaway = gpupar<int, int>(t_output, t_starter, t_stopper, 0, 0, t_size);
// -----------------------------------------------------------
std::ofstream outfile;
outfile.open("data.txt");
for (int counter=10; counter<400000; counter+=1000) {
const int size = counter;
std::cout << "Benchmark for array of size " << counter << "\n";
outfile << counter;
int starter[size], stopper[size], output[size + 1];
for (int i = 0; i < size; i++) {
starter[i] = i;
stopper[i] = i + 1;
}
int tot = 0;
double time = 0.00;
for (int i = 0; i < 5; i++) {
tot = tot + gpupar<int, int>(output, starter, stopper, 0, 0, size);
}
time = ((double)tot)/5;
std::cout << "Time taken for final GPU Thrust algo = " << std::fixed << std::setprecision(1) << time << "\n";
outfile << " " << std::fixed << std::setprecision(1) << time;
tot = 0;
for (int i=0; i<5; i++) {
tot = tot + offload<int, int>(output, starter, stopper, 0, 0, size);
}
time = ((double)tot)/5;
std::cout << "Time taken for final Hillis Steele algo = " << std::fixed << std::setprecision(1) << time << "\n";
outfile << " " << std::fixed << std::setprecision(1) << time << "\n";
}
return 0;
}
|
e3833d86b977975f5cab00f24e5d549864ed74fb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kern_CalcGradStep.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *sinkBuffer = NULL;
hipMalloc(&sinkBuffer, XSIZE*YSIZE);
float *incBuffer = NULL;
hipMalloc(&incBuffer, XSIZE*YSIZE);
float *divBuffer = NULL;
hipMalloc(&divBuffer, XSIZE*YSIZE);
float *labelBuffer = NULL;
hipMalloc(&labelBuffer, XSIZE*YSIZE);
float stepSize = XSIZE*YSIZE;
float iCC = 1;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kern_CalcGradStep), dim3(gridBlock),dim3(threadBlock), 0, 0, sinkBuffer,incBuffer,divBuffer,labelBuffer,stepSize,iCC,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kern_CalcGradStep), dim3(gridBlock),dim3(threadBlock), 0, 0, sinkBuffer,incBuffer,divBuffer,labelBuffer,stepSize,iCC,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kern_CalcGradStep), dim3(gridBlock),dim3(threadBlock), 0, 0, sinkBuffer,incBuffer,divBuffer,labelBuffer,stepSize,iCC,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e3833d86b977975f5cab00f24e5d549864ed74fb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kern_CalcGradStep.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *sinkBuffer = NULL;
cudaMalloc(&sinkBuffer, XSIZE*YSIZE);
float *incBuffer = NULL;
cudaMalloc(&incBuffer, XSIZE*YSIZE);
float *divBuffer = NULL;
cudaMalloc(&divBuffer, XSIZE*YSIZE);
float *labelBuffer = NULL;
cudaMalloc(&labelBuffer, XSIZE*YSIZE);
float stepSize = XSIZE*YSIZE;
float iCC = 1;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kern_CalcGradStep<<<gridBlock,threadBlock>>>(sinkBuffer,incBuffer,divBuffer,labelBuffer,stepSize,iCC,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kern_CalcGradStep<<<gridBlock,threadBlock>>>(sinkBuffer,incBuffer,divBuffer,labelBuffer,stepSize,iCC,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kern_CalcGradStep<<<gridBlock,threadBlock>>>(sinkBuffer,incBuffer,divBuffer,labelBuffer,stepSize,iCC,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d87a9e4d103ba8e0b80927ae6bf2e3b59be6844c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hip/hip_fp16.h>
#define ELEMENT_SIZE 64
#define BLOCK_SIZE 64
#define WEIGHT_MAX_LENGTH 2048
extern "C"
//use constant memory for weights if needs to be faster
__global__ void weighted_sum_kernel(__half *ret,
const long *input,
const __half *weights,
const int ret0, const int ret1,
const int input0, const int input1, const int input2
) {
__shared__ __half weight_cache[BLOCK_SIZE];
__shared__ long cache[BLOCK_SIZE][BLOCK_SIZE / ELEMENT_SIZE];
const int z = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y;
const int tid = threadIdx.x;
const int ratio = BLOCK_SIZE / ELEMENT_SIZE;
const int ratio_sq = BLOCK_SIZE / ratio;
const int x_cache = tid / ratio;
const int z_cache = tid - x_cache * ratio;
const int z_offset = blockIdx.x * ratio;
const int z_element = tid / ELEMENT_SIZE;
const int z_bit = tid - z_element * ELEMENT_SIZE;
float tmp = 0; // half precision performance crippled on Pascal?
// __half tmp = 0;
for (int x_offset = 0; x_offset < input0; x_offset += BLOCK_SIZE) {
for (int x = 0; x < ratio; x++) {
const int x_block = x * ratio_sq + x_cache;
cache[x_block][z_cache] = x_offset + x_block < input0 && z_cache + z_offset < input2 ?
input[((x_offset + x_block) * input1 + y) * input2 + z_cache + z_offset] : 0;
}
weight_cache[tid] = weights[tid + x_offset];
__syncthreads();
#pragma unroll
for (int x = 0; x < BLOCK_SIZE; x++) {
if ((cache[x][z_element] >> z_bit) & 1) {
tmp += (float) weight_cache[x];
// tmp += weight_cache[x];
}
}
__syncthreads();
}
if (z<ret1) {
ret[y * ret1 + z] = (__half) tmp;
}
} | d87a9e4d103ba8e0b80927ae6bf2e3b59be6844c.cu | #include <cuda_fp16.h>
#define ELEMENT_SIZE 64
#define BLOCK_SIZE 64
#define WEIGHT_MAX_LENGTH 2048
extern "C"
//use constant memory for weights if needs to be faster
__global__ void weighted_sum_kernel(__half *ret,
const long *input,
const __half *weights,
const int ret0, const int ret1,
const int input0, const int input1, const int input2
) {
__shared__ __half weight_cache[BLOCK_SIZE];
__shared__ long cache[BLOCK_SIZE][BLOCK_SIZE / ELEMENT_SIZE];
const int z = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y;
const int tid = threadIdx.x;
const int ratio = BLOCK_SIZE / ELEMENT_SIZE;
const int ratio_sq = BLOCK_SIZE / ratio;
const int x_cache = tid / ratio;
const int z_cache = tid - x_cache * ratio;
const int z_offset = blockIdx.x * ratio;
const int z_element = tid / ELEMENT_SIZE;
const int z_bit = tid - z_element * ELEMENT_SIZE;
float tmp = 0; // half precision performance crippled on Pascal?
// __half tmp = 0;
for (int x_offset = 0; x_offset < input0; x_offset += BLOCK_SIZE) {
for (int x = 0; x < ratio; x++) {
const int x_block = x * ratio_sq + x_cache;
cache[x_block][z_cache] = x_offset + x_block < input0 && z_cache + z_offset < input2 ?
input[((x_offset + x_block) * input1 + y) * input2 + z_cache + z_offset] : 0;
}
weight_cache[tid] = weights[tid + x_offset];
__syncthreads();
#pragma unroll
for (int x = 0; x < BLOCK_SIZE; x++) {
if ((cache[x][z_element] >> z_bit) & 1) {
tmp += (float) weight_cache[x];
// tmp += weight_cache[x];
}
}
__syncthreads();
}
if (z<ret1) {
ret[y * ret1 + z] = (__half) tmp;
}
} |
9237b1fc005f6d317787b9136fd81693a13941a2.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
Please check example 07, 08 and 17 for the basics of dense tensor op gemm kernels. NVIDIA Ampere
architecture also supports structured sparse tensor op for tf32, fp16, int8 and int4.
Sparse GEMM kernels needs to takes an additional E matrix which stores the meta data. The format of
meta data is different for every data types. CUTLASS templates can automatically infer it based on
input A and B. Check code below.
Moreover, matrix E needs to be preprocessed so that it can use ldmatrix to load into the registers
efficiently.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_sparse.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/host_uncompress.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = int32_t; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::int4b_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::int4b_t; // <- data type of elements in input matrix B
using ElementOutput = int32_t; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Row Major for
// Matrix A, Column Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 256>; // <- threadblock tile M = 128, N = 128, K = 256
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 256>; // <- warp tile M = 64, N = 64, K = 256
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 128>; // <- MMA Op tile M = 16, N = 8, K = 128
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 3;
using Gemm = cutlass::gemm::device::SparseGemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
// Data type and layout of meta data matrix E can be inferred from template Gemm.
using ElementInputE = typename Gemm::ElementE;
using LayoutInputE = cutlass::layout::RowMajor;
using ReorderedLayoutInputE = typename Gemm::LayoutE;
// Blow property is defined in include/cutlass/arch/sp_mma_sm80.h
// 50% Sparsity on Ampere
constexpr int kSparse = Gemm::kSparse;
// How many elements of A are covered per ElementE
constexpr int kElementsPerElementE = Gemm::kElementsPerElementE;
// The size of individual meta data
constexpr int kMetaSizeInBits = Gemm::kMetaSizeInBits;
int run() {
const int length_m = 512;
const int length_n = 512;
const int length_k = 1024;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse)); // <- Create matrix A with dimensions M x (K / 2)
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a_uncompressed(
problem_size.mk()); // <- Create uncompressed matrix A with dimensions M x K for reference computing
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Create matrix E with dimensions M x (K / 2 / kElementsPerElementE). This one is used by reference computing.
cutlass::HostTensor<ElementInputE, LayoutInputE> tensor_e(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
// Same size as the above. The above one needs to be reordered and stored in this one.
cutlass::HostTensor<ElementInputE, ReorderedLayoutInputE> tensor_e_reordered(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(2),
ElementInputA(-2),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(2),
ElementInputB(-2),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(2),
ElementOutput(-2),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomSparseMeta(
tensor_e.host_view(),
1,
kMetaSizeInBits); // <- Fill matrix E on host with uniform-distribution random meta data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Reorder the meta data matrix so that we can use ldmatrix to load them to tensor core
// instructions.
cutlass::reorder_meta(tensor_e_reordered.host_ref(), tensor_e.host_ref(),
{problem_size.m(), problem_size.n(),
problem_size.k() / kSparse / kElementsPerElementE});
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_e_reordered.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(0);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
tensor_e_reordered.device_ref(), // <- reference to matrix E on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
// uncompress tensor_a based on meta data tensor_e. We need it for reference computing.
cutlass::uncompress(tensor_a_uncompressed.host_ref(), tensor_a.host_ref(),
tensor_e.host_ref(), problem_size.m(), problem_size.k());
// Create instantiation for host reference gemm kernel
cutlass::reference::host::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue,
typename Gemm::Operator>
gemm_host;
// Launch host reference gemm kernel
gemm_host(problem_size,
alpha,
tensor_a_uncompressed.host_ref(),
tensor_b.host_ref(),
beta,
tensor_c.host_ref(),
tensor_ref_d.host_ref());
// Copy output data from CUTLASS host for comparison
tensor_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main() {
bool notSupported = false;
// Ampere Sparse Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.1.
//
// CUTLASS must be compiled with CUDA 11.1 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.1 Toolkit or later." << std::endl;
notSupported = true;
}
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
return run();
}
| 9237b1fc005f6d317787b9136fd81693a13941a2.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
Please check example 07, 08 and 17 for the basics of dense tensor op gemm kernels. NVIDIA Ampere
architecture also supports structured sparse tensor op for tf32, fp16, int8 and int4.
Sparse GEMM kernels needs to takes an additional E matrix which stores the meta data. The format of
meta data is different for every data types. CUTLASS templates can automatically infer it based on
input A and B. Check code below.
Moreover, matrix E needs to be preprocessed so that it can use ldmatrix to load into the registers
efficiently.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_sparse.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/host_uncompress.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = int32_t; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::int4b_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::int4b_t; // <- data type of elements in input matrix B
using ElementOutput = int32_t; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Row Major for
// Matrix A, Column Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 256>; // <- threadblock tile M = 128, N = 128, K = 256
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 256>; // <- warp tile M = 64, N = 64, K = 256
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 128>; // <- MMA Op tile M = 16, N = 8, K = 128
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 3;
using Gemm = cutlass::gemm::device::SparseGemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
// Data type and layout of meta data matrix E can be inferred from template Gemm.
using ElementInputE = typename Gemm::ElementE;
using LayoutInputE = cutlass::layout::RowMajor;
using ReorderedLayoutInputE = typename Gemm::LayoutE;
// Blow property is defined in include/cutlass/arch/sp_mma_sm80.h
// 50% Sparsity on Ampere
constexpr int kSparse = Gemm::kSparse;
// How many elements of A are covered per ElementE
constexpr int kElementsPerElementE = Gemm::kElementsPerElementE;
// The size of individual meta data
constexpr int kMetaSizeInBits = Gemm::kMetaSizeInBits;
int run() {
const int length_m = 512;
const int length_n = 512;
const int length_k = 1024;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse)); // <- Create matrix A with dimensions M x (K / 2)
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a_uncompressed(
problem_size.mk()); // <- Create uncompressed matrix A with dimensions M x K for reference computing
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Create matrix E with dimensions M x (K / 2 / kElementsPerElementE). This one is used by reference computing.
cutlass::HostTensor<ElementInputE, LayoutInputE> tensor_e(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
// Same size as the above. The above one needs to be reordered and stored in this one.
cutlass::HostTensor<ElementInputE, ReorderedLayoutInputE> tensor_e_reordered(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(2),
ElementInputA(-2),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(2),
ElementInputB(-2),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(2),
ElementOutput(-2),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomSparseMeta(
tensor_e.host_view(),
1,
kMetaSizeInBits); // <- Fill matrix E on host with uniform-distribution random meta data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Reorder the meta data matrix so that we can use ldmatrix to load them to tensor core
// instructions.
cutlass::reorder_meta(tensor_e_reordered.host_ref(), tensor_e.host_ref(),
{problem_size.m(), problem_size.n(),
problem_size.k() / kSparse / kElementsPerElementE});
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_e_reordered.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(0);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
tensor_e_reordered.device_ref(), // <- reference to matrix E on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
// uncompress tensor_a based on meta data tensor_e. We need it for reference computing.
cutlass::uncompress(tensor_a_uncompressed.host_ref(), tensor_a.host_ref(),
tensor_e.host_ref(), problem_size.m(), problem_size.k());
// Create instantiation for host reference gemm kernel
cutlass::reference::host::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue,
typename Gemm::Operator>
gemm_host;
// Launch host reference gemm kernel
gemm_host(problem_size,
alpha,
tensor_a_uncompressed.host_ref(),
tensor_b.host_ref(),
beta,
tensor_c.host_ref(),
tensor_ref_d.host_ref());
// Copy output data from CUTLASS host for comparison
tensor_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main() {
bool notSupported = false;
// Ampere Sparse Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.1.
//
// CUTLASS must be compiled with CUDA 11.1 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.1 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
return run();
}
|
d18b0d786ebcbed3d47690dfc47f40782b9bc1f0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2020 Aiman bin Murhiz, Timotius Pujianto, James Schubach. All rights reserved.
* Bachelor of Computer Science Final project, Semester 1 2020, Monash University, Australia.
* Please contact one of the developers to have permission to use this software.
* Any kind of use, reproduction, distribution of this software without our permission
* is against the law.
*/
/*
* Project: "GPU Acceleration of Raster Filters."
*
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
// cuda kernel
#include "GradientOperator.cuh"
#include "ClampToRange.cuh"
#include "BlurRow.cuh"
#include "TransposeGrid.cuh"
// std
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <math.h>
#include <time.h>
#include <errno.h>
float ncols, nrows, xllcorner, yllcorner, cellsize, nodata_value;
float *LowPass1st, *LowPass2nd, *ClampToRange, *transGrid1, *transGrid2;
int numberOfBlocks, numberOfThreads=1024;
int readHeader(char* strFromFile, float valueHolder){
/*
* Read header function
* this function intended to get the value of the header format
* and put it in the right variable.
* strFromFile = the header name
* valueHolder = the value from the header part
* return -1 when the value and the header name do not match
* it means that the buffer is in grid value instead of header part
*/
if (strcmp(strFromFile,"ncols") == 0){
ncols = valueHolder;
}else if (strcmp(strFromFile, "nrows") == 0){
nrows = valueHolder;
}else if (strcmp(strFromFile, "xllcorner") == 0){
xllcorner = valueHolder;
}else if (strcmp(strFromFile, "yllcorner") == 0){
yllcorner = valueHolder;
}else if (strcmp(strFromFile, "cellsize") == 0){
cellsize = valueHolder;
}else if (strcmp(strFromFile, "nodata_value") == 0){
nodata_value = valueHolder;
return -1;
}else {
return -1;
}
return 0;
}
void LowPassOperator(float *input, float *output, float sigma, bool firstpass){
/*
* Low Pass Operator function
* this function applies gaussian algorithm to each row in order to blur the image
*
* Taken from Eduard java, under the AbstractFrequencyOperator.java
*
* * For an introduction to the topic:
* https://fgiesen.wordpress.com/2012/07/30/fast-blurs-1/
* https://fgiesen.wordpress.com/2012/08/01/fast-blurs-2/
*
* Paper comparing different blur algorithms with reference code:
*
* Getreuer 2013 A Survey of Gaussian Convolution Algorithms
* http://dev.ipol.im/~getreuer/code/doc/gaussian_20131215_doc/index.html
*
* Original paper introducing extended box filters:
*
* P. Gwosdek, S. Grewenig, A. Bruhn, J. Weickert, Theoretical foundations of
* Gaussian convolution by extended box filtering, International Conference on
* Scale Space and Variational Methods in Computer Vision, pp. 447458, 2011.
* http://dx.doi.org/10.1007/ 978-3-642-24785-9_38
*
* Input = the input grid
* Output = the result grid of the low pass operation
* sigma = the sigma blur coefficient
* firstpass = a flag indication if the input grid has been transposed or not
*/
float *dstRow, *tmpRow;
float alpha, c1,c2;
int r, ITERATIONS = 4, block, N, totalrow;
r = floor((0.5 * sqrt(((12.0*sigma*sigma)/ITERATIONS)+1.0)) - 0.5);
alpha = (2 * r + 1) * (r * (r + 1) - 3.0 * sigma * sigma / ITERATIONS) / (6.0 * (sigma * sigma / ITERATIONS - (r + 1) * (r + 1)));
c1 = (alpha / (2.0 * (alpha + r) + 1));
c2 = ((1.0 - alpha) / (2.0 * (alpha + r) + 1));
hipMallocManaged(&dstRow, nrows*ncols*sizeof(float));
hipMallocManaged(&tmpRow, nrows*ncols*sizeof(float));
// Setting up the number of block will be used in cuda multi processing
// Firstpass means that the grid has not been transposed
// then the value of coloumn and rows will not be switched.
if (firstpass){
block = (nrows/1024)+1;
N = ncols;
totalrow = nrows;
}else{
block = (ncols/1024)+1;
N = nrows;
totalrow = ncols;
}
/* Calling cuda function to do blur
* If the number of iterations is change,
* the following code block must be change.
*/
hipLaunchKernelGGL(( blurRow), dim3(block), dim3(numberOfThreads), 0, 0, input, tmpRow, N, totalrow, r, c1,c2);
hipDeviceSynchronize();
hipLaunchKernelGGL(( blurRow), dim3(block), dim3(numberOfThreads), 0, 0, tmpRow, dstRow, N, totalrow, r, c1,c2);
hipDeviceSynchronize();
hipLaunchKernelGGL(( blurRow), dim3(block), dim3(numberOfThreads), 0, 0, dstRow, tmpRow, N, totalrow, r, c1,c2);
hipDeviceSynchronize();
hipLaunchKernelGGL(( blurRow), dim3(block), dim3(numberOfThreads), 0, 0, tmpRow, output, N, totalrow, r, c1,c2);
hipDeviceSynchronize();
// Free the memory
hipFree(tmpRow);
hipFree(dstRow);
}
float toRadians(float deg){
/*
* To radians function, convert the given input (in degree) into radians
* Paramater1 = the degree
* return = radians in float data type
*/
return deg*22/7/180;
}
void writeOutput(float *transGrid2, int gridTotalSize){
/*
* Write output function to write the filtered grid into the asc file
* The asc file will be used in Eduard to convert it into a png file
*/
// Open the output file (or create it if it jas not been created before)
char* fileName = "out.asc";
FILE *file = fopen(fileName, "w+");
// Error handling if the program failed to create the file
if (file == NULL){
fprintf(stderr, "%s\n", strerror(errno));
return;
}else{
//header part
fprintf(file, "%s %d\n", "ncols", int(ncols));
fprintf(file, "%s %d\n", "nrows", int(nrows));
fprintf(file, "%s %.1f\n", "xllcorner", xllcorner);
fprintf(file, "%s %.1f\n", "yllcorner", yllcorner);
fprintf(file, "%s %.1f\n", "cellsize", cellsize);
fprintf(file, "%s %.1f\n", "nodata_value", nodata_value);
// Writing the grid value into the file.
for (int index =0; index<gridTotalSize; index++){
fprintf(file, "%.3f ", *(transGrid2+index));
}
}
fclose(file);
}
int main(int argc, char** argv){
/*
* Main function to run the masking filter in cuda.
*
*/
char strHeader[256];
float valueHolder, gridTotalSize;
float *inputGrid, *resultGradientOperator;
time_t start, end, subStart, subEnd;
double totalTime;
char* fileName;
FILE *f;
if (argc>0){
fileName = argv[1];
}else{
return 1;
}
// Open the input grid file ***.asc
f = fopen(fileName, "r");
if (f==NULL){
fprintf(stderr, "%s\n", strerror(errno));
}else{
fscanf(f,"%s %f", strHeader, &valueHolder);
// Flag to indicate where the buffer is. 0 means it is still in the header part,
// -1 means it reached the grid value.
int flag = readHeader(strHeader, valueHolder);
// A loop to read trough the header of the input file
// flag = 0 means that it is still reading the header
while (flag == 0){
fscanf(f,"%s %f", strHeader, &valueHolder);
// Make the string of the header all lower case.
for (int i = 0; strHeader[i]; i++){
strHeader[i] = tolower(strHeader[i]);
}
// update the flag
flag = readHeader(strHeader, valueHolder);
}
// Check if the grid header is valid or not
if ((ncols<0 || nrows < 0 || cellsize < 0) == 1){
// return error here
return 1;
}
gridTotalSize = ncols * nrows;
hipMallocManaged(&inputGrid, gridTotalSize * sizeof(float));
hipMallocManaged(&resultGradientOperator, gridTotalSize * sizeof(float));
// Scan the grid values and put them in the buffer called inputGrid
for (int i = 0; i < int(gridTotalSize); i++){
fscanf(f,"%f", &valueHolder);
*(inputGrid+i) = valueHolder;
}
fclose(f);
if (int(gridTotalSize)%numberOfThreads == 0){
numberOfBlocks = gridTotalSize / numberOfThreads;
}else{
numberOfBlocks = (gridTotalSize / numberOfThreads) + 1;
}
// compute grid with dimensionless rise/run slope values instead of slope in
// degrees, which would require an expensive atan() operation for each
// cell. Results with rise/run are almost identical to results with degrees.
start = clock();
subStart= clock();
hipLaunchKernelGGL(( gradientOperator), dim3(numberOfBlocks),dim3(numberOfThreads), 0, 0, ncols, nrows,cellsize, gridTotalSize, inputGrid, resultGradientOperator);
hipDeviceSynchronize();
hipFree(inputGrid);
subEnd = clock();
totalTime = (double) (subEnd-subStart)/ CLOCKS_PER_SEC;
printf("The gradient operator time : %lf\n", totalTime);
// Allocate shared memory between host and device (gpu)
hipMallocManaged(&LowPass1st, gridTotalSize*sizeof(float));
hipMallocManaged(&LowPass2nd, gridTotalSize*sizeof(float));
hipMallocManaged(&transGrid1, gridTotalSize*sizeof(float));
hipMallocManaged(&transGrid2, gridTotalSize*sizeof(float));
// Blur row to smooth the sharp edge via lowpassoperator
float sigma = 6.;
subStart = clock();
LowPassOperator(resultGradientOperator, LowPass1st, sigma, true);
hipLaunchKernelGGL(( transposeGrid), dim3(numberOfBlocks),dim3(numberOfThreads), 0, 0, LowPass1st, transGrid1, ncols,nrows);
hipDeviceSynchronize();
LowPassOperator(transGrid1, LowPass2nd, sigma, false);
hipLaunchKernelGGL(( transposeGrid), dim3(numberOfBlocks),dim3(numberOfThreads), 0, 0, LowPass2nd, transGrid2, nrows,ncols);
hipDeviceSynchronize();
subEnd = clock();
totalTime = (double) (subEnd-subStart)/ CLOCKS_PER_SEC;
printf("The first low pass operator time : %lf\n", totalTime);
// Clamp slope values to range between gainSlopeThreshold and slopeThreshold
float relativeGain =0.5, slopeThresholdDeg = 6.;
float slopeThreshold = tan(toRadians(slopeThresholdDeg));
float gainSlopeThresholdDeg = slopeThreshold * fmin(0.995, relativeGain);
float gainSlopeThreshold = tan(toRadians(gainSlopeThresholdDeg));
subStart = clock();
hipLaunchKernelGGL(( clampToRange), dim3(numberOfBlocks),dim3(numberOfThreads), 0, 0, transGrid2, gainSlopeThreshold, slopeThreshold, gridTotalSize);
hipDeviceSynchronize();
subEnd = clock();
totalTime = (double) (subEnd-subStart)/ CLOCKS_PER_SEC;
printf("The clamp to range operator time : %lf\n", totalTime);
// Blur the sharp edges once more via lowpassoperator
sigma = 20.;
subStart = clock();
LowPassOperator(transGrid2, LowPass1st, sigma, true);
hipLaunchKernelGGL(( transposeGrid), dim3(numberOfBlocks),dim3(numberOfThreads), 0, 0, LowPass1st, transGrid1, ncols,nrows);
hipDeviceSynchronize();
LowPassOperator(transGrid1, LowPass2nd, sigma, false);
hipLaunchKernelGGL(( transposeGrid), dim3(numberOfBlocks),dim3(numberOfThreads), 0, 0, LowPass2nd, transGrid2, nrows,ncols);
hipDeviceSynchronize();
subEnd = clock();
totalTime = (double) (subEnd-subStart)/ CLOCKS_PER_SEC;
printf("The second low pass operator time : %lf\n", totalTime);
// Mask Filter
float scale = 1/(slopeThreshold-gainSlopeThreshold);
subStart = clock();
hipLaunchKernelGGL(( maskFilter), dim3(numberOfBlocks),dim3(numberOfThreads), 0, 0, transGrid2, gridTotalSize, gainSlopeThreshold, scale);
hipDeviceSynchronize();
subEnd = clock();
totalTime = (double) (subEnd-subStart)/ CLOCKS_PER_SEC;
printf("The gradient operator time : %lf\n", totalTime);
end = clock();
totalTime = (double) (end-start)/ CLOCKS_PER_SEC;
printf("The total filter time : %lf\n", totalTime);
writeOutput(transGrid2, gridTotalSize);
}
return 0;
} | d18b0d786ebcbed3d47690dfc47f40782b9bc1f0.cu | /*
* Copyright 2020 Aiman bin Murhiz, Timotius Pujianto, James Schubach. All rights reserved.
* Bachelor of Computer Science Final project, Semester 1 2020, Monash University, Australia.
* Please contact one of the developers to have permission to use this software.
* Any kind of use, reproduction, distribution of this software without our permission
* is against the law.
*/
/*
* Project: "GPU Acceleration of Raster Filters."
*
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// cuda kernel
#include "GradientOperator.cuh"
#include "ClampToRange.cuh"
#include "BlurRow.cuh"
#include "TransposeGrid.cuh"
// std
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <math.h>
#include <time.h>
#include <errno.h>
float ncols, nrows, xllcorner, yllcorner, cellsize, nodata_value;
float *LowPass1st, *LowPass2nd, *ClampToRange, *transGrid1, *transGrid2;
int numberOfBlocks, numberOfThreads=1024;
int readHeader(char* strFromFile, float valueHolder){
/*
* Read header function
* this function intended to get the value of the header format
* and put it in the right variable.
* strFromFile = the header name
* valueHolder = the value from the header part
* return -1 when the value and the header name do not match
* it means that the buffer is in grid value instead of header part
*/
if (strcmp(strFromFile,"ncols") == 0){
ncols = valueHolder;
}else if (strcmp(strFromFile, "nrows") == 0){
nrows = valueHolder;
}else if (strcmp(strFromFile, "xllcorner") == 0){
xllcorner = valueHolder;
}else if (strcmp(strFromFile, "yllcorner") == 0){
yllcorner = valueHolder;
}else if (strcmp(strFromFile, "cellsize") == 0){
cellsize = valueHolder;
}else if (strcmp(strFromFile, "nodata_value") == 0){
nodata_value = valueHolder;
return -1;
}else {
return -1;
}
return 0;
}
void LowPassOperator(float *input, float *output, float sigma, bool firstpass){
/*
* Low Pass Operator function
* this function applies gaussian algorithm to each row in order to blur the image
*
* Taken from Eduard java, under the AbstractFrequencyOperator.java
*
* * For an introduction to the topic:
* https://fgiesen.wordpress.com/2012/07/30/fast-blurs-1/
* https://fgiesen.wordpress.com/2012/08/01/fast-blurs-2/
*
* Paper comparing different blur algorithms with reference code:
*
* Getreuer 2013 A Survey of Gaussian Convolution Algorithms
* http://dev.ipol.im/~getreuer/code/doc/gaussian_20131215_doc/index.html
*
* Original paper introducing extended box filters:
*
* P. Gwosdek, S. Grewenig, A. Bruhn, J. Weickert, “Theoretical foundations of
* Gaussian convolution by extended box filtering,” International Conference on
* Scale Space and Variational Methods in Computer Vision, pp. 447–458, 2011.
* http://dx.doi.org/10.1007/ 978-3-642-24785-9_38
*
* Input = the input grid
* Output = the result grid of the low pass operation
* sigma = the sigma blur coefficient
* firstpass = a flag indication if the input grid has been transposed or not
*/
float *dstRow, *tmpRow;
float alpha, c1,c2;
int r, ITERATIONS = 4, block, N, totalrow;
r = floor((0.5 * sqrt(((12.0*sigma*sigma)/ITERATIONS)+1.0)) - 0.5);
alpha = (2 * r + 1) * (r * (r + 1) - 3.0 * sigma * sigma / ITERATIONS) / (6.0 * (sigma * sigma / ITERATIONS - (r + 1) * (r + 1)));
c1 = (alpha / (2.0 * (alpha + r) + 1));
c2 = ((1.0 - alpha) / (2.0 * (alpha + r) + 1));
cudaMallocManaged(&dstRow, nrows*ncols*sizeof(float));
cudaMallocManaged(&tmpRow, nrows*ncols*sizeof(float));
// Setting up the number of block will be used in cuda multi processing
// Firstpass means that the grid has not been transposed
// then the value of coloumn and rows will not be switched.
if (firstpass){
block = (nrows/1024)+1;
N = ncols;
totalrow = nrows;
}else{
block = (ncols/1024)+1;
N = nrows;
totalrow = ncols;
}
/* Calling cuda function to do blur
* If the number of iterations is change,
* the following code block must be change.
*/
blurRow<<<block, numberOfThreads>>>(input, tmpRow, N, totalrow, r, c1,c2);
cudaDeviceSynchronize();
blurRow<<<block, numberOfThreads>>>(tmpRow, dstRow, N, totalrow, r, c1,c2);
cudaDeviceSynchronize();
blurRow<<<block, numberOfThreads>>>(dstRow, tmpRow, N, totalrow, r, c1,c2);
cudaDeviceSynchronize();
blurRow<<<block, numberOfThreads>>>(tmpRow, output, N, totalrow, r, c1,c2);
cudaDeviceSynchronize();
// Free the memory
cudaFree(tmpRow);
cudaFree(dstRow);
}
float toRadians(float deg){
/*
* To radians function, convert the given input (in degree) into radians
* Paramater1 = the degree
* return = radians in float data type
*/
return deg*22/7/180;
}
void writeOutput(float *transGrid2, int gridTotalSize){
/*
* Write output function to write the filtered grid into the asc file
* The asc file will be used in Eduard to convert it into a png file
*/
// Open the output file (or create it if it jas not been created before)
char* fileName = "out.asc";
FILE *file = fopen(fileName, "w+");
// Error handling if the program failed to create the file
if (file == NULL){
fprintf(stderr, "%s\n", strerror(errno));
return;
}else{
//header part
fprintf(file, "%s %d\n", "ncols", int(ncols));
fprintf(file, "%s %d\n", "nrows", int(nrows));
fprintf(file, "%s %.1f\n", "xllcorner", xllcorner);
fprintf(file, "%s %.1f\n", "yllcorner", yllcorner);
fprintf(file, "%s %.1f\n", "cellsize", cellsize);
fprintf(file, "%s %.1f\n", "nodata_value", nodata_value);
// Writing the grid value into the file.
for (int index =0; index<gridTotalSize; index++){
fprintf(file, "%.3f ", *(transGrid2+index));
}
}
fclose(file);
}
int main(int argc, char** argv){
/*
* Main function to run the masking filter in cuda.
*
*/
char strHeader[256];
float valueHolder, gridTotalSize;
float *inputGrid, *resultGradientOperator;
time_t start, end, subStart, subEnd;
double totalTime;
char* fileName;
FILE *f;
if (argc>0){
fileName = argv[1];
}else{
return 1;
}
// Open the input grid file ***.asc
f = fopen(fileName, "r");
if (f==NULL){
fprintf(stderr, "%s\n", strerror(errno));
}else{
fscanf(f,"%s %f", strHeader, &valueHolder);
// Flag to indicate where the buffer is. 0 means it is still in the header part,
// -1 means it reached the grid value.
int flag = readHeader(strHeader, valueHolder);
// A loop to read trough the header of the input file
// flag = 0 means that it is still reading the header
while (flag == 0){
fscanf(f,"%s %f", strHeader, &valueHolder);
// Make the string of the header all lower case.
for (int i = 0; strHeader[i]; i++){
strHeader[i] = tolower(strHeader[i]);
}
// update the flag
flag = readHeader(strHeader, valueHolder);
}
// Check if the grid header is valid or not
if ((ncols<0 || nrows < 0 || cellsize < 0) == 1){
// return error here
return 1;
}
gridTotalSize = ncols * nrows;
cudaMallocManaged(&inputGrid, gridTotalSize * sizeof(float));
cudaMallocManaged(&resultGradientOperator, gridTotalSize * sizeof(float));
// Scan the grid values and put them in the buffer called inputGrid
for (int i = 0; i < int(gridTotalSize); i++){
fscanf(f,"%f", &valueHolder);
*(inputGrid+i) = valueHolder;
}
fclose(f);
if (int(gridTotalSize)%numberOfThreads == 0){
numberOfBlocks = gridTotalSize / numberOfThreads;
}else{
numberOfBlocks = (gridTotalSize / numberOfThreads) + 1;
}
// compute grid with dimensionless rise/run slope values instead of slope in
// degrees, which would require an expensive atan() operation for each
// cell. Results with rise/run are almost identical to results with degrees.
start = clock();
subStart= clock();
gradientOperator<<<numberOfBlocks,numberOfThreads>>>(ncols, nrows,cellsize, gridTotalSize, inputGrid, resultGradientOperator);
cudaDeviceSynchronize();
cudaFree(inputGrid);
subEnd = clock();
totalTime = (double) (subEnd-subStart)/ CLOCKS_PER_SEC;
printf("The gradient operator time : %lf\n", totalTime);
// Allocate shared memory between host and device (gpu)
cudaMallocManaged(&LowPass1st, gridTotalSize*sizeof(float));
cudaMallocManaged(&LowPass2nd, gridTotalSize*sizeof(float));
cudaMallocManaged(&transGrid1, gridTotalSize*sizeof(float));
cudaMallocManaged(&transGrid2, gridTotalSize*sizeof(float));
// Blur row to smooth the sharp edge via lowpassoperator
float sigma = 6.;
subStart = clock();
LowPassOperator(resultGradientOperator, LowPass1st, sigma, true);
transposeGrid<<<numberOfBlocks,numberOfThreads>>>(LowPass1st, transGrid1, ncols,nrows);
cudaDeviceSynchronize();
LowPassOperator(transGrid1, LowPass2nd, sigma, false);
transposeGrid<<<numberOfBlocks,numberOfThreads>>>(LowPass2nd, transGrid2, nrows,ncols);
cudaDeviceSynchronize();
subEnd = clock();
totalTime = (double) (subEnd-subStart)/ CLOCKS_PER_SEC;
printf("The first low pass operator time : %lf\n", totalTime);
// Clamp slope values to range between gainSlopeThreshold and slopeThreshold
float relativeGain =0.5, slopeThresholdDeg = 6.;
float slopeThreshold = tan(toRadians(slopeThresholdDeg));
float gainSlopeThresholdDeg = slopeThreshold * fmin(0.995, relativeGain);
float gainSlopeThreshold = tan(toRadians(gainSlopeThresholdDeg));
subStart = clock();
clampToRange<<<numberOfBlocks,numberOfThreads>>>(transGrid2, gainSlopeThreshold, slopeThreshold, gridTotalSize);
cudaDeviceSynchronize();
subEnd = clock();
totalTime = (double) (subEnd-subStart)/ CLOCKS_PER_SEC;
printf("The clamp to range operator time : %lf\n", totalTime);
// Blur the sharp edges once more via lowpassoperator
sigma = 20.;
subStart = clock();
LowPassOperator(transGrid2, LowPass1st, sigma, true);
transposeGrid<<<numberOfBlocks,numberOfThreads>>>(LowPass1st, transGrid1, ncols,nrows);
cudaDeviceSynchronize();
LowPassOperator(transGrid1, LowPass2nd, sigma, false);
transposeGrid<<<numberOfBlocks,numberOfThreads>>>(LowPass2nd, transGrid2, nrows,ncols);
cudaDeviceSynchronize();
subEnd = clock();
totalTime = (double) (subEnd-subStart)/ CLOCKS_PER_SEC;
printf("The second low pass operator time : %lf\n", totalTime);
// Mask Filter
float scale = 1/(slopeThreshold-gainSlopeThreshold);
subStart = clock();
maskFilter<<<numberOfBlocks,numberOfThreads>>>(transGrid2, gridTotalSize, gainSlopeThreshold, scale);
cudaDeviceSynchronize();
subEnd = clock();
totalTime = (double) (subEnd-subStart)/ CLOCKS_PER_SEC;
printf("The gradient operator time : %lf\n", totalTime);
end = clock();
totalTime = (double) (end-start)/ CLOCKS_PER_SEC;
printf("The total filter time : %lf\n", totalTime);
writeOutput(transGrid2, gridTotalSize);
}
return 0;
} |
e54efc45363eddbf54ae82ffcfe49b1ce0cc5e92.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matMult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matMult), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matMult), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matMult), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e54efc45363eddbf54ae82ffcfe49b1ce0cc5e92.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matMult.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matMult<<<gridBlock,threadBlock>>>(A,B,C);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matMult<<<gridBlock,threadBlock>>>(A,B,C);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matMult<<<gridBlock,threadBlock>>>(A,B,C);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6cc1446e99ef4abb562fdda2f964ff2138aa3c8b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#define min(a,b) ((a)<(b))?(a):(b)
#define max(a,b) ((a)<(b))?(b):(a)
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
//__shared__ float s[8][8];
int row=threadIdx.y + blockIdx.y*blockDim.y;
int col=threadIdx.x + blockIdx.x*blockDim.x;
//s[threadIdx.y][threadIdx.x]=static_cast<float>(inputChannel[row*numCols+col]);
//__syncthreads();
if ( col >= numCols || row >= numRows )
{
return;
}
int index=row*numCols+col;
float result=0.f;
for(int filter_r=-filterWidth/2;filter_r<=filterWidth/2;++filter_r)
for(int filter_c=-filterWidth/2;filter_c<=filterWidth/2;++filter_c)
{
//clamping
int image_r = min(max(row + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(col + filter_c, 0), static_cast<int>(numCols - 1));
// float image_val = s[image_r][image_c];
float image_val = static_cast<float>(inputChannel[image_r*numCols+image_c]);
float filter_val=filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_val*filter_val;
}
outputChannel[index]=result;
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
int row=threadIdx.y + blockIdx.y*blockDim.y;
int col=threadIdx.x + blockIdx.x*blockDim.x;
if ( col >= numCols || row >= numRows )
{
return;
}
int index=row*numCols+col;
uchar4 rgba=inputImageRGBA[index];
redChannel[index]=rgba.x;
greenChannel[index]=rgba.y;
blueChannel[index]=rgba.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter,h_filter,sizeof(float)*filterWidth*filterWidth,hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
assert(filterWidth%2==1);
std::cout << "filterWidth= " << filterWidth << std::endl;
//const dim3 blockSize(filterWidth,filterWidth);
const dim3 blockSize(8,8);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize((numCols/blockSize.y)%2==0 ?
numCols/blockSize.y : (numCols/blockSize.y)+1,(numRows/blockSize.x)%2 == 0 ?
numRows/blockSize.x : (numRows/blockSize.x)+1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize),dim3(blockSize), 0, 0, d_inputImageRGBA,
numRows,numCols,
d_red,
d_green,
d_blue);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_red,
d_redBlurred,
numRows, numCols,
d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_green,
d_greenBlurred,
numRows, numCols,
d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_blue,
d_blueBlurred,
numRows, numCols,
d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_filter));
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| 6cc1446e99ef4abb562fdda2f964ff2138aa3c8b.cu | #include <cuda.h>
#include <iostream>
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#define min(a,b) ((a)<(b))?(a):(b)
#define max(a,b) ((a)<(b))?(b):(a)
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
//__shared__ float s[8][8];
int row=threadIdx.y + blockIdx.y*blockDim.y;
int col=threadIdx.x + blockIdx.x*blockDim.x;
//s[threadIdx.y][threadIdx.x]=static_cast<float>(inputChannel[row*numCols+col]);
//__syncthreads();
if ( col >= numCols || row >= numRows )
{
return;
}
int index=row*numCols+col;
float result=0.f;
for(int filter_r=-filterWidth/2;filter_r<=filterWidth/2;++filter_r)
for(int filter_c=-filterWidth/2;filter_c<=filterWidth/2;++filter_c)
{
//clamping
int image_r = min(max(row + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(col + filter_c, 0), static_cast<int>(numCols - 1));
// float image_val = s[image_r][image_c];
float image_val = static_cast<float>(inputChannel[image_r*numCols+image_c]);
float filter_val=filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_val*filter_val;
}
outputChannel[index]=result;
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
int row=threadIdx.y + blockIdx.y*blockDim.y;
int col=threadIdx.x + blockIdx.x*blockDim.x;
if ( col >= numCols || row >= numRows )
{
return;
}
int index=row*numCols+col;
uchar4 rgba=inputImageRGBA[index];
redChannel[index]=rgba.x;
greenChannel[index]=rgba.y;
blueChannel[index]=rgba.z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter,h_filter,sizeof(float)*filterWidth*filterWidth,cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
assert(filterWidth%2==1);
std::cout << "filterWidth= " << filterWidth << std::endl;
//const dim3 blockSize(filterWidth,filterWidth);
const dim3 blockSize(8,8);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize((numCols/blockSize.y)%2==0 ?
numCols/blockSize.y : (numCols/blockSize.y)+1,(numRows/blockSize.x)%2 == 0 ?
numRows/blockSize.x : (numRows/blockSize.x)+1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize,blockSize>>>(d_inputImageRGBA,
numRows,numCols,
d_red,
d_green,
d_blue);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize,blockSize>>>(d_red,
d_redBlurred,
numRows, numCols,
d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize,blockSize>>>(d_green,
d_greenBlurred,
numRows, numCols,
d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize,blockSize>>>(d_blue,
d_blueBlurred,
numRows, numCols,
d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_filter));
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
cf8268542ff2ed036d21caf5aa3ee73d28be5b3e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_erfcinvf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_erfcinvf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_erfcinvf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_erfcinvf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | cf8268542ff2ed036d21caf5aa3ee73d28be5b3e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_erfcinvf.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_erfcinvf<<<gridBlock,threadBlock>>>(n,result,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_erfcinvf<<<gridBlock,threadBlock>>>(n,result,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_erfcinvf<<<gridBlock,threadBlock>>>(n,result,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
70eb4d4c60f2514d85401fc99c78b303ddf80614.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 2048 * 2048 // Number of elements in each vector
__global__ void saxpy(float scalar, float * x, float * y)
{
// Determine our unique global thread ID, so we know which element to process
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < N ) // Make sure we don't do more work than we have data!
y[tid] = scalar * x[tid] + y[tid];
}
int main()
{
float *x, *y;
int size = N * sizeof (float); // The total number of bytes per vector
hipError_t ierrAsync;
hipError_t ierrSync;
// Allocate memory
hipMallocManaged(&x, size);
hipMallocManaged(&y, size);
// Initialize memory
for( int i = 0; i < N; ++i )
{
x[i] = 1.0f;
y[i] = 2.0f;
}
int threads_per_block = 256;
int number_of_blocks = (N / threads_per_block) + 1;
hipLaunchKernelGGL(( saxpy) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, 2.0f, x, y );
ierrSync = hipGetLastError();
ierrAsync = hipDeviceSynchronize(); // Wait for the GPU to finish
if (ierrSync != hipSuccess) { printf("Sync error: %s\n", hipGetErrorString(ierrSync)); }
if (ierrAsync != hipSuccess) { printf("Async error: %s\n", hipGetErrorString(ierrAsync)); }
// Print out our Max Error
float maxError = 0;
for( int i = 0; i < N; ++i )
if (abs(4-y[i]) > maxError) { maxError = abs(4-y[i]); }
printf("Max Error: %.5f", maxError);
// Free all our allocated memory
hipFree( x ); hipFree( y );
} | 70eb4d4c60f2514d85401fc99c78b303ddf80614.cu | #include <stdio.h>
#define N 2048 * 2048 // Number of elements in each vector
__global__ void saxpy(float scalar, float * x, float * y)
{
// Determine our unique global thread ID, so we know which element to process
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < N ) // Make sure we don't do more work than we have data!
y[tid] = scalar * x[tid] + y[tid];
}
int main()
{
float *x, *y;
int size = N * sizeof (float); // The total number of bytes per vector
cudaError_t ierrAsync;
cudaError_t ierrSync;
// Allocate memory
cudaMallocManaged(&x, size);
cudaMallocManaged(&y, size);
// Initialize memory
for( int i = 0; i < N; ++i )
{
x[i] = 1.0f;
y[i] = 2.0f;
}
int threads_per_block = 256;
int number_of_blocks = (N / threads_per_block) + 1;
saxpy <<< number_of_blocks, threads_per_block >>> ( 2.0f, x, y );
ierrSync = cudaGetLastError();
ierrAsync = cudaDeviceSynchronize(); // Wait for the GPU to finish
if (ierrSync != cudaSuccess) { printf("Sync error: %s\n", cudaGetErrorString(ierrSync)); }
if (ierrAsync != cudaSuccess) { printf("Async error: %s\n", cudaGetErrorString(ierrAsync)); }
// Print out our Max Error
float maxError = 0;
for( int i = 0; i < N; ++i )
if (abs(4-y[i]) > maxError) { maxError = abs(4-y[i]); }
printf("Max Error: %.5f", maxError);
// Free all our allocated memory
cudaFree( x ); cudaFree( y );
} |
50cdcb2ee5d8280088f74d08bc7ffd4b3ba6693a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <torch/torch.h>
__global__ void cudakernel(int* buf)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
buf[i] = buf[i]+1;
}
int main() {
auto a = torch::zeros({16, 16}, at::device(at::kCUDA).dtype(at::kInt));
std::cout << a << "\n";
hipLaunchKernelGGL(( cudakernel), dim3(16), dim3(16), 0, 0, (int*)a.data_ptr());
std::cout << a << "\n";
} | 50cdcb2ee5d8280088f74d08bc7ffd4b3ba6693a.cu | #include <torch/torch.h>
__global__ void cudakernel(int* buf)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
buf[i] = buf[i]+1;
}
int main() {
auto a = torch::zeros({16, 16}, at::device(at::kCUDA).dtype(at::kInt));
std::cout << a << "\n";
cudakernel<<<16, 16>>>((int*)a.data_ptr());
std::cout << a << "\n";
} |
54daced4e144d91e55fdcb1c2e368ef06edb9d1a.hip | // !!! This is a file automatically generated by hipify!!!
//fail
//--blockDim=1024 --gridDim=1024 --no-inline
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <math_functions.h>
#define DIM 2 //1024 in the future
#define N 2//DIM*DIM
__global__ void mul24_test (int* A, int* B)
{
int idxa = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
unsigned int idxb = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
A[idxa] = idxa;
B[idxb] = idxa;
}
int main (){
int *a, *b;
int *dev_a, *dev_b;
int size = N*sizeof(int);
hipMalloc((void**)&dev_a, size);
hipMalloc((void**)&dev_b, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 1;
for (int i = 0; i < N; i++)
b[i] = 1;
hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mul24_test), dim3(DIM),dim3(DIM), 0, 0, dev_a,dev_b);
//ESBMC_verify_kernel(mul24_test,1,N,dev_a,dev_b);
hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost);
hipMemcpy(b,dev_b,size,hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
assert (a[i] != i);
for (int i = 0; i < N; i++)
assert (b[i] != i);
free(a); free(b);
hipFree(dev_a);
hipFree(dev_b);
return 0;
}
| 54daced4e144d91e55fdcb1c2e368ef06edb9d1a.cu | //fail
//--blockDim=1024 --gridDim=1024 --no-inline
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math_functions.h>
#define DIM 2 //1024 in the future
#define N 2//DIM*DIM
__global__ void mul24_test (int* A, int* B)
{
int idxa = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
unsigned int idxb = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
A[idxa] = idxa;
B[idxb] = idxa;
}
int main (){
int *a, *b;
int *dev_a, *dev_b;
int size = N*sizeof(int);
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
for (int i = 0; i < N; i++)
a[i] = 1;
for (int i = 0; i < N; i++)
b[i] = 1;
cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size, cudaMemcpyHostToDevice);
mul24_test<<<DIM,DIM>>>(dev_a,dev_b);
//ESBMC_verify_kernel(mul24_test,1,N,dev_a,dev_b);
cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost);
cudaMemcpy(b,dev_b,size,cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
assert (a[i] != i);
for (int i = 0; i < N; i++)
assert (b[i] != i);
free(a); free(b);
cudaFree(dev_a);
cudaFree(dev_b);
return 0;
}
|
c45d800c50cc7d8b18a42a370a979aad278bc06a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "cutil_inline.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <time.h>
// CUDA global constant variables
__constant__ int N;
__constant__ float a, b, c;
// Kernel Function
__global__ void Polynomial(float *d_z, float *d_v)
{
float y1, sum = 0.0f;
// version1
//d_z = d_z + threadIdx.x + 2 * N * blockIdx.x * blockDim.x;
// version2
d_z = d_z + 2 * N * threadIdx.x + 2 * N * blockIdx.x * blockDim.x;
d_v = d_v + threadIdx.x + blockIdx.x * blockDim.x;
for (int n = 0; n < N; n++)
{
y1 = (*d_z);
// version1
//d_z += blockDim.x;
// version2
d_z += 1;
sum += a * y1 * y1 + b * y1 + c;
}
*d_v = sum / N;
}
int main_poly(int argc, char* argv[]) {
int NPATH = 960000, h_N = 100;
float h_a, h_b, h_c;
float *h_v, *d_v, *d_z;
double sum1, sum2;
//double timer, elapsed;
clock_t timer; // for counting the CPU time
double elapsed; // elapsed time
hiprandGenerator_t gen;
// initialise card
cutilDeviceInit(argc, argv);
// allocate memory on host and device
h_v = (float *)malloc(sizeof(float)*NPATH);
cudaSafeCall(hipMalloc((void **)&d_v, sizeof(float)*NPATH));
cudaSafeCall(hipMalloc((void **)&d_z, sizeof(float)* 2 * h_N * NPATH));
// define constants and transfer to GPU
h_a = 1.0f;
h_b = 2.0f;
h_c = 0.0f;
cudaSafeCall(hipMemcpyToSymbol(N, &h_N, sizeof(h_N)));
cudaSafeCall(hipMemcpyToSymbol(a, &h_a, sizeof(h_a)));
cudaSafeCall(hipMemcpyToSymbol(b, &h_b, sizeof(h_b)));
cudaSafeCall(hipMemcpyToSymbol(c, &h_c, sizeof(h_c)));
// random number generation
timer = clock(); // initialise timer
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
hiprandGenerateNormal(gen, d_z, 2 * h_N * NPATH, 0.0f, 1.0f);
cudaSafeCall(hipDeviceSynchronize());
elapsed = elapsed_time(&timer);
printf("\nCURAND normal RNG execution time (ms): %f , samples/sec: %e \n",
elapsed, 2.0*h_N*NPATH / elapsed);
// execute kernel and time it
Polynomial << <NPATH / 64, 64 >> >(d_z, d_v);
cudaCheckMsg("pathcalc execution failed\n");
cudaSafeCall(hipDeviceSynchronize());
elapsed = elapsed_time(&timer);
printf("Polynomial kernel execution time (ms): %f \n", elapsed);
// copy back results
cudaSafeCall(hipMemcpy(h_v, d_v, sizeof(float)*NPATH,
hipMemcpyDeviceToHost));
// compute average
sum1 = 0.0;
sum2 = 0.0;
for (int i = 0; i < NPATH; i++) {
sum1 += h_v[i];
//printf("%f\n", h_v[i]);
sum2 += h_v[i] * h_v[i];
}
printf("\nAverage value and standard deviation of error = %13.8f %13.8f\n\n",
sum1 / NPATH, sqrt((sum2 / NPATH - (sum1 / NPATH)*(sum1 / NPATH)) / NPATH));
// Tidy up library
hiprandDestroyGenerator(gen);
// Release memory and exit cleanly
free(h_v);
cudaSafeCall(hipFree(d_v));
cudaSafeCall(hipFree(d_z));
// CUDA exit -- needed to flush printf write buffer
hipDeviceReset();
system("pause");
return 0;
}
| c45d800c50cc7d8b18a42a370a979aad278bc06a.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "cutil_inline.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cuda.h>
#include <curand.h>
#include <time.h>
// CUDA global constant variables
__constant__ int N;
__constant__ float a, b, c;
// Kernel Function
__global__ void Polynomial(float *d_z, float *d_v)
{
float y1, sum = 0.0f;
// version1
//d_z = d_z + threadIdx.x + 2 * N * blockIdx.x * blockDim.x;
// version2
d_z = d_z + 2 * N * threadIdx.x + 2 * N * blockIdx.x * blockDim.x;
d_v = d_v + threadIdx.x + blockIdx.x * blockDim.x;
for (int n = 0; n < N; n++)
{
y1 = (*d_z);
// version1
//d_z += blockDim.x;
// version2
d_z += 1;
sum += a * y1 * y1 + b * y1 + c;
}
*d_v = sum / N;
}
int main_poly(int argc, char* argv[]) {
int NPATH = 960000, h_N = 100;
float h_a, h_b, h_c;
float *h_v, *d_v, *d_z;
double sum1, sum2;
//double timer, elapsed;
clock_t timer; // for counting the CPU time
double elapsed; // elapsed time
curandGenerator_t gen;
// initialise card
cutilDeviceInit(argc, argv);
// allocate memory on host and device
h_v = (float *)malloc(sizeof(float)*NPATH);
cudaSafeCall(cudaMalloc((void **)&d_v, sizeof(float)*NPATH));
cudaSafeCall(cudaMalloc((void **)&d_z, sizeof(float)* 2 * h_N * NPATH));
// define constants and transfer to GPU
h_a = 1.0f;
h_b = 2.0f;
h_c = 0.0f;
cudaSafeCall(cudaMemcpyToSymbol(N, &h_N, sizeof(h_N)));
cudaSafeCall(cudaMemcpyToSymbol(a, &h_a, sizeof(h_a)));
cudaSafeCall(cudaMemcpyToSymbol(b, &h_b, sizeof(h_b)));
cudaSafeCall(cudaMemcpyToSymbol(c, &h_c, sizeof(h_c)));
// random number generation
timer = clock(); // initialise timer
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
curandGenerateNormal(gen, d_z, 2 * h_N * NPATH, 0.0f, 1.0f);
cudaSafeCall(cudaDeviceSynchronize());
elapsed = elapsed_time(&timer);
printf("\nCURAND normal RNG execution time (ms): %f , samples/sec: %e \n",
elapsed, 2.0*h_N*NPATH / elapsed);
// execute kernel and time it
Polynomial << <NPATH / 64, 64 >> >(d_z, d_v);
cudaCheckMsg("pathcalc execution failed\n");
cudaSafeCall(cudaDeviceSynchronize());
elapsed = elapsed_time(&timer);
printf("Polynomial kernel execution time (ms): %f \n", elapsed);
// copy back results
cudaSafeCall(cudaMemcpy(h_v, d_v, sizeof(float)*NPATH,
cudaMemcpyDeviceToHost));
// compute average
sum1 = 0.0;
sum2 = 0.0;
for (int i = 0; i < NPATH; i++) {
sum1 += h_v[i];
//printf("%f\n", h_v[i]);
sum2 += h_v[i] * h_v[i];
}
printf("\nAverage value and standard deviation of error = %13.8f %13.8f\n\n",
sum1 / NPATH, sqrt((sum2 / NPATH - (sum1 / NPATH)*(sum1 / NPATH)) / NPATH));
// Tidy up library
curandDestroyGenerator(gen);
// Release memory and exit cleanly
free(h_v);
cudaSafeCall(cudaFree(d_v));
cudaSafeCall(cudaFree(d_z));
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
system("pause");
return 0;
}
|
bb58806f974102b418d6ebacc027eda3929194e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void SMSV(float* M, float* V, float* R, int* maddr, int* addr, int N) {
int tid = threadIdx.x;
if (tid < N) {
__shared__ int psum[LENGTH];
psum[tid] = 0; // initialize psum with 0s
__syncthreads(); // psum is finished being written to
int numCols = (int)(*addr - (intptr_t)&V[0])/4; // end of SST for vector
for (int i = 0; i <= numCols; i++) { // loop through columns
int vid = (int)V[i+N]; // vector index
int cEnd = (int)(maddr[vid] - (intptr_t)&M[2*N*vid])/4; // end of SST for column
if (tid <= cEnd) {
int mid = (int)M[2*N*vid + tid+N]; // matrix index
psum[mid] += M[2*N*vid + tid] * V[i];
}
__syncthreads();
}
R[tid] = psum[tid];
}
} | bb58806f974102b418d6ebacc027eda3929194e9.cu | #include "includes.h"
__global__ void SMSV(float* M, float* V, float* R, int* maddr, int* addr, int N) {
int tid = threadIdx.x;
if (tid < N) {
__shared__ int psum[LENGTH];
psum[tid] = 0; // initialize psum with 0s
__syncthreads(); // psum is finished being written to
int numCols = (int)(*addr - (intptr_t)&V[0])/4; // end of SST for vector
for (int i = 0; i <= numCols; i++) { // loop through columns
int vid = (int)V[i+N]; // vector index
int cEnd = (int)(maddr[vid] - (intptr_t)&M[2*N*vid])/4; // end of SST for column
if (tid <= cEnd) {
int mid = (int)M[2*N*vid + tid+N]; // matrix index
psum[mid] += M[2*N*vid + tid] * V[i];
}
__syncthreads();
}
R[tid] = psum[tid];
}
} |
982534288c4158354236637f8db411ca4c9e2e33.hip | // !!! This is a file automatically generated by hipify!!!
#include <dmlc/filesystem.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cmath>
#include <thrust/device_vector.h>
#include <xgboost/data.h>
#include <xgboost/c_api.h>
#include "test_hist_util.h"
#include "../helpers.h"
#include "../data/test_array_interface.h"
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/hist_util.h"
#include "../../../src/common/hist_util.cuh"
#include "../../../src/data/device_adapter.cuh"
#include "../../../src/common/math.h"
#include "../../../src/data/simple_dmatrix.h"
#include "../../../include/xgboost/logging.h"
namespace xgboost {
namespace common {
template <typename AdapterT>
HistogramCuts GetHostCuts(AdapterT *adapter, int num_bins, float missing) {
data::SimpleDMatrix dmat(adapter, missing, 1);
HistogramCuts cuts = SketchOnDMatrix(&dmat, num_bins);
return cuts;
}
TEST(HistUtil, DeviceSketch) {
int num_columns = 1;
int num_bins = 4;
std::vector<float> x = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 7.0f, -1.0f};
int num_rows = x.size();
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
HistogramCuts host_cuts = SketchOnDMatrix(dmat.get(), num_bins);
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, SketchBatchNumElements) {
#if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
LOG(WARNING) << "Test not runnable with RMM enabled.";
return;
#endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
size_t constexpr kCols = 10000;
int device;
dh::safe_cuda(hipGetDevice(&device));
auto avail = static_cast<size_t>(dh::AvailableMemory(device) * 0.8);
auto per_elem = detail::BytesPerElement(false);
auto avail_elem = avail / per_elem;
size_t rows = avail_elem / kCols * 10;
auto batch = detail::SketchBatchNumElements(0, rows, kCols, rows * kCols, device, 256, false);
ASSERT_EQ(batch, avail_elem);
}
TEST(HistUtil, DeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, DeviceSketchWeightsMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
TEST(HistUtil, DeviceSketchDeterminism) {
int num_rows = 500;
int num_columns = 5;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto reference_sketch = DeviceSketch(0, dmat.get(), num_bins);
size_t constexpr kRounds{ 100 };
for (size_t r = 0; r < kRounds; ++r) {
auto new_sketch = DeviceSketch(0, dmat.get(), num_bins);
ASSERT_EQ(reference_sketch.Values(), new_sketch.Values());
ASSERT_EQ(reference_sketch.MinValues(), new_sketch.MinValues());
}
}
TEST(HistUtil, DeviceSketchCategoricalAsNumeric) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
void TestCategoricalSketch(size_t n, size_t num_categories, int32_t num_bins, bool weighted) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
dmat->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
if (weighted) {
std::vector<float> weights(n, 0);
SimpleLCG lcg;
SimpleRealUniformDistribution<float> dist(0, 1);
for (auto& v : weights) {
v = dist(&lcg);
}
dmat->Info().weights_.HostVector() = weights;
}
ASSERT_EQ(dmat->Info().feature_types.Size(), 1);
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
std::sort(x.begin(), x.end());
auto n_uniques = std::unique(x.begin(), x.end()) - x.begin();
ASSERT_NE(n_uniques, x.size());
ASSERT_EQ(cuts.TotalBins(), n_uniques);
ASSERT_EQ(n_uniques, num_categories);
auto& values = cuts.cut_values_.HostVector();
ASSERT_TRUE(std::is_sorted(values.cbegin(), values.cend()));
auto is_unique = (std::unique(values.begin(), values.end()) - values.begin()) == n_uniques;
ASSERT_TRUE(is_unique);
x.resize(n_uniques);
for (size_t i = 0; i < n_uniques; ++i) {
ASSERT_EQ(x[i], values[i]);
}
}
TEST(HistUtil, DeviceSketchCategoricalFeatures) {
TestCategoricalSketch(1000, 256, 32, false);
TestCategoricalSketch(1000, 256, 32, true);
}
void TestMixedSketch() {
size_t n_samples = 1000, n_features = 2, n_categories = 3;
std::vector<float> data(n_samples * n_features);
SimpleLCG gen;
SimpleRealUniformDistribution<float> cat_d{0.0f, float(n_categories)};
SimpleRealUniformDistribution<float> num_d{0.0f, 3.0f};
for (size_t i = 0; i < n_samples * n_features; ++i) {
if (i % 2 == 0) {
data[i] = ::floor(cat_d(&gen));
} else {
data[i] = num_d(&gen);
}
}
auto m = GetDMatrixFromData(data, n_samples, n_features);
m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
m->Info().feature_types.HostVector().push_back(FeatureType::kNumerical);
auto cuts = DeviceSketch(0, m.get(), 64);
ASSERT_EQ(cuts.Values().size(), 64 + n_categories);
}
TEST(HistUtil, DeviceSketchMixedFeatures) {
TestMixedSketch();
}
TEST(HistUtil, DeviceSketchMultipleColumns) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUitl, DeviceSketchWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto weighted_dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto& h_weights = weighted_dmat->Info().weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
auto wcuts = DeviceSketch(0, weighted_dmat.get(), num_bins);
ASSERT_EQ(cuts.MinValues(), wcuts.MinValues());
ASSERT_EQ(cuts.Ptrs(), wcuts.Ptrs());
ASSERT_EQ(cuts.Values(), wcuts.Values());
ValidateCuts(cuts, dmat.get(), num_bins);
ValidateCuts(wcuts, weighted_dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
int batch_sizes[] = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto cuts = DeviceSketch(0, dmat.get(), num_bins, batch_size);
ValidateCuts(cuts, dmat.get(), num_bins);
}
num_rows = 1000;
size_t batches = 16;
auto x = GenerateRandom(num_rows * batches, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows * batches, num_columns);
auto cuts_with_batches = DeviceSketch(0, dmat.get(), num_bins, num_rows);
auto cuts = DeviceSketch(0, dmat.get(), num_bins, 0);
auto const& cut_values_batched = cuts_with_batches.Values();
auto const& cut_values = cuts.Values();
CHECK_EQ(cut_values.size(), cut_values_batched.size());
for (size_t i = 0; i < cut_values.size(); ++i) {
ASSERT_NEAR(cut_values_batched[i], cut_values[i], 1e5);
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsExternal) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns =5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
dmlc::TemporaryDirectory temp;
auto dmat =
GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, 100, temp);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
// See https://github.com/dmlc/xgboost/issues/5866.
TEST(HistUtil, DeviceSketchExternalMemoryWithWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
dmlc::TemporaryDirectory temp;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, 100, temp);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
template <typename Adapter>
auto MakeUnweightedCutsForTest(Adapter adapter, int32_t num_bins, float missing, size_t batch_size = 0) {
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, adapter.NumColumns(), adapter.NumRows(), 0);
MetaInfo info;
AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
sketch_container.MakeCuts(&batched_cuts);
return batched_cuts;
}
template <typename Adapter>
void ValidateBatchedCuts(Adapter adapter, int num_bins, int num_columns, int num_rows,
DMatrix* dmat, size_t batch_size = 0) {
common::HistogramCuts batched_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
ValidateCuts(batched_cuts, dmat, num_bins);
}
TEST(HistUtil, AdapterDeviceSketch) {
int rows = 5;
int cols = 1;
int num_bins = 4;
float missing = - 1.0;
thrust::device_vector< float> data(rows*cols);
auto json_array_interface = Generate2dArrayInterface(rows, cols, "<f4", &data);
data = std::vector<float >{ 1.0,2.0,3.0,4.0,5.0 };
std::string str;
Json::Dump(json_array_interface, &str);
data::CupyAdapter adapter(str);
auto device_cuts = MakeUnweightedCutsForTest(adapter, num_bins, missing);
auto host_cuts = GetHostCuts(&adapter, num_bins, missing);
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, AdapterDeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto cuts = MakeUnweightedCutsForTest(adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
}
TEST(HistUtil, AdapterSketchSlidingWindowMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, AdapterSketchSlidingWindowWeightedMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
auto& h_weights = info.weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
TEST(HistUtil, AdapterDeviceSketchCategorical) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, n, 1);
ValidateBatchedCuts(adapter, num_bins, adapter.NumColumns(),
adapter.NumRows(), dmat.get());
}
}
}
TEST(HistUtil, AdapterDeviceSketchMultipleColumns) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
for (auto num_bins : bin_sizes) {
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, num_columns, num_rows, dmat.get());
}
}
}
TEST(HistUtil, AdapterDeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
int batch_sizes[] = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, num_columns, num_rows, dmat.get(), batch_size);
}
}
// Check sketching from adapter or DMatrix results in the same answer
// Consistency here is useful for testing and user experience
TEST(HistUtil, SketchingEquivalent) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto dmat_cuts = DeviceSketch(0, dmat.get(), num_bins);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
common::HistogramCuts adapter_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
EXPECT_EQ(dmat_cuts.Values(), adapter_cuts.Values());
EXPECT_EQ(dmat_cuts.Ptrs(), adapter_cuts.Ptrs());
EXPECT_EQ(dmat_cuts.MinValues(), adapter_cuts.MinValues());
ValidateBatchedCuts(adapter, num_bins, num_columns, num_rows, dmat.get());
}
}
}
TEST(HistUtil, DeviceSketchFromGroupWeights) {
size_t constexpr kRows = 3000, kCols = 200, kBins = 256;
size_t constexpr kGroups = 10;
auto m = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
auto& h_weights = m->Info().weights_.HostVector();
h_weights.resize(kRows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
m->Info().SetInfo("group", groups.data(), DataType::kUInt32, kGroups);
HistogramCuts weighted_cuts = DeviceSketch(0, m.get(), kBins, 0);
h_weights.clear();
HistogramCuts cuts = DeviceSketch(0, m.get(), kBins, 0);
ASSERT_EQ(cuts.Values().size(), weighted_cuts.Values().size());
ASSERT_EQ(cuts.MinValues().size(), weighted_cuts.MinValues().size());
ASSERT_EQ(cuts.Ptrs().size(), weighted_cuts.Ptrs().size());
for (size_t i = 0; i < cuts.Values().size(); ++i) {
EXPECT_EQ(cuts.Values()[i], weighted_cuts.Values()[i]) << "i:"<< i;
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], weighted_cuts.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), weighted_cuts.Ptrs().at(i));
}
ValidateCuts(weighted_cuts, m.get(), kBins);
}
void TestAdapterSketchFromWeights(bool with_group) {
size_t constexpr kRows = 300, kCols = 20, kBins = 256;
size_t constexpr kGroups = 10;
HostDeviceVector<float> storage;
std::string m =
RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateArrayInterface(
&storage);
MetaInfo info;
auto& h_weights = info.weights_.HostVector();
if (with_group) {
h_weights.resize(kGroups);
} else {
h_weights.resize(kRows);
}
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
if (with_group) {
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
info.SetInfo("group", groups.data(), DataType::kUInt32, kGroups);
}
info.weights_.SetDevice(0);
info.num_row_ = kRows;
info.num_col_ = kCols;
data::CupyAdapter adapter(m);
auto const& batch = adapter.Value();
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, kBins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
common::HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
auto dmat = GetDMatrixFromData(storage.HostVector(), kRows, kCols);
if (with_group) {
dmat->Info().SetInfo("group", groups.data(), DataType::kUInt32, kGroups);
}
dmat->Info().SetInfo("weight", h_weights.data(), DataType::kFloat32, h_weights.size());
dmat->Info().num_col_ = kCols;
dmat->Info().num_row_ = kRows;
ASSERT_EQ(cuts.Ptrs().size(), kCols + 1);
ValidateCuts(cuts, dmat.get(), kBins);
if (with_group) {
HistogramCuts non_weighted = DeviceSketch(0, dmat.get(), kBins, 0);
for (size_t i = 0; i < cuts.Values().size(); ++i) {
EXPECT_EQ(cuts.Values()[i], non_weighted.Values()[i]);
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], non_weighted.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), non_weighted.Ptrs().at(i));
}
}
}
TEST(HistUtil, AdapterSketchFromWeights) {
TestAdapterSketchFromWeights(false);
TestAdapterSketchFromWeights(true);
}
} // namespace common
} // namespace xgboost
| 982534288c4158354236637f8db411ca4c9e2e33.cu | #include <dmlc/filesystem.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cmath>
#include <thrust/device_vector.h>
#include <xgboost/data.h>
#include <xgboost/c_api.h>
#include "test_hist_util.h"
#include "../helpers.h"
#include "../data/test_array_interface.h"
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/hist_util.h"
#include "../../../src/common/hist_util.cuh"
#include "../../../src/data/device_adapter.cuh"
#include "../../../src/common/math.h"
#include "../../../src/data/simple_dmatrix.h"
#include "../../../include/xgboost/logging.h"
namespace xgboost {
namespace common {
template <typename AdapterT>
HistogramCuts GetHostCuts(AdapterT *adapter, int num_bins, float missing) {
data::SimpleDMatrix dmat(adapter, missing, 1);
HistogramCuts cuts = SketchOnDMatrix(&dmat, num_bins);
return cuts;
}
TEST(HistUtil, DeviceSketch) {
int num_columns = 1;
int num_bins = 4;
std::vector<float> x = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 7.0f, -1.0f};
int num_rows = x.size();
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
HistogramCuts host_cuts = SketchOnDMatrix(dmat.get(), num_bins);
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, SketchBatchNumElements) {
#if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
LOG(WARNING) << "Test not runnable with RMM enabled.";
return;
#endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
size_t constexpr kCols = 10000;
int device;
dh::safe_cuda(cudaGetDevice(&device));
auto avail = static_cast<size_t>(dh::AvailableMemory(device) * 0.8);
auto per_elem = detail::BytesPerElement(false);
auto avail_elem = avail / per_elem;
size_t rows = avail_elem / kCols * 10;
auto batch = detail::SketchBatchNumElements(0, rows, kCols, rows * kCols, device, 256, false);
ASSERT_EQ(batch, avail_elem);
}
TEST(HistUtil, DeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, DeviceSketchWeightsMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
TEST(HistUtil, DeviceSketchDeterminism) {
int num_rows = 500;
int num_columns = 5;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto reference_sketch = DeviceSketch(0, dmat.get(), num_bins);
size_t constexpr kRounds{ 100 };
for (size_t r = 0; r < kRounds; ++r) {
auto new_sketch = DeviceSketch(0, dmat.get(), num_bins);
ASSERT_EQ(reference_sketch.Values(), new_sketch.Values());
ASSERT_EQ(reference_sketch.MinValues(), new_sketch.MinValues());
}
}
TEST(HistUtil, DeviceSketchCategoricalAsNumeric) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
void TestCategoricalSketch(size_t n, size_t num_categories, int32_t num_bins, bool weighted) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
dmat->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
if (weighted) {
std::vector<float> weights(n, 0);
SimpleLCG lcg;
SimpleRealUniformDistribution<float> dist(0, 1);
for (auto& v : weights) {
v = dist(&lcg);
}
dmat->Info().weights_.HostVector() = weights;
}
ASSERT_EQ(dmat->Info().feature_types.Size(), 1);
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
std::sort(x.begin(), x.end());
auto n_uniques = std::unique(x.begin(), x.end()) - x.begin();
ASSERT_NE(n_uniques, x.size());
ASSERT_EQ(cuts.TotalBins(), n_uniques);
ASSERT_EQ(n_uniques, num_categories);
auto& values = cuts.cut_values_.HostVector();
ASSERT_TRUE(std::is_sorted(values.cbegin(), values.cend()));
auto is_unique = (std::unique(values.begin(), values.end()) - values.begin()) == n_uniques;
ASSERT_TRUE(is_unique);
x.resize(n_uniques);
for (size_t i = 0; i < n_uniques; ++i) {
ASSERT_EQ(x[i], values[i]);
}
}
TEST(HistUtil, DeviceSketchCategoricalFeatures) {
TestCategoricalSketch(1000, 256, 32, false);
TestCategoricalSketch(1000, 256, 32, true);
}
void TestMixedSketch() {
size_t n_samples = 1000, n_features = 2, n_categories = 3;
std::vector<float> data(n_samples * n_features);
SimpleLCG gen;
SimpleRealUniformDistribution<float> cat_d{0.0f, float(n_categories)};
SimpleRealUniformDistribution<float> num_d{0.0f, 3.0f};
for (size_t i = 0; i < n_samples * n_features; ++i) {
if (i % 2 == 0) {
data[i] = std::floor(cat_d(&gen));
} else {
data[i] = num_d(&gen);
}
}
auto m = GetDMatrixFromData(data, n_samples, n_features);
m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
m->Info().feature_types.HostVector().push_back(FeatureType::kNumerical);
auto cuts = DeviceSketch(0, m.get(), 64);
ASSERT_EQ(cuts.Values().size(), 64 + n_categories);
}
TEST(HistUtil, DeviceSketchMixedFeatures) {
TestMixedSketch();
}
TEST(HistUtil, DeviceSketchMultipleColumns) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUitl, DeviceSketchWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto weighted_dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto& h_weights = weighted_dmat->Info().weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
auto wcuts = DeviceSketch(0, weighted_dmat.get(), num_bins);
ASSERT_EQ(cuts.MinValues(), wcuts.MinValues());
ASSERT_EQ(cuts.Ptrs(), wcuts.Ptrs());
ASSERT_EQ(cuts.Values(), wcuts.Values());
ValidateCuts(cuts, dmat.get(), num_bins);
ValidateCuts(wcuts, weighted_dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
int batch_sizes[] = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto cuts = DeviceSketch(0, dmat.get(), num_bins, batch_size);
ValidateCuts(cuts, dmat.get(), num_bins);
}
num_rows = 1000;
size_t batches = 16;
auto x = GenerateRandom(num_rows * batches, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows * batches, num_columns);
auto cuts_with_batches = DeviceSketch(0, dmat.get(), num_bins, num_rows);
auto cuts = DeviceSketch(0, dmat.get(), num_bins, 0);
auto const& cut_values_batched = cuts_with_batches.Values();
auto const& cut_values = cuts.Values();
CHECK_EQ(cut_values.size(), cut_values_batched.size());
for (size_t i = 0; i < cut_values.size(); ++i) {
ASSERT_NEAR(cut_values_batched[i], cut_values[i], 1e5);
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsExternal) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns =5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
dmlc::TemporaryDirectory temp;
auto dmat =
GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, 100, temp);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
// See https://github.com/dmlc/xgboost/issues/5866.
TEST(HistUtil, DeviceSketchExternalMemoryWithWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
dmlc::TemporaryDirectory temp;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, 100, temp);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
template <typename Adapter>
auto MakeUnweightedCutsForTest(Adapter adapter, int32_t num_bins, float missing, size_t batch_size = 0) {
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, adapter.NumColumns(), adapter.NumRows(), 0);
MetaInfo info;
AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
sketch_container.MakeCuts(&batched_cuts);
return batched_cuts;
}
template <typename Adapter>
void ValidateBatchedCuts(Adapter adapter, int num_bins, int num_columns, int num_rows,
DMatrix* dmat, size_t batch_size = 0) {
common::HistogramCuts batched_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
ValidateCuts(batched_cuts, dmat, num_bins);
}
TEST(HistUtil, AdapterDeviceSketch) {
int rows = 5;
int cols = 1;
int num_bins = 4;
float missing = - 1.0;
thrust::device_vector< float> data(rows*cols);
auto json_array_interface = Generate2dArrayInterface(rows, cols, "<f4", &data);
data = std::vector<float >{ 1.0,2.0,3.0,4.0,5.0 };
std::string str;
Json::Dump(json_array_interface, &str);
data::CupyAdapter adapter(str);
auto device_cuts = MakeUnweightedCutsForTest(adapter, num_bins, missing);
auto host_cuts = GetHostCuts(&adapter, num_bins, missing);
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, AdapterDeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto cuts = MakeUnweightedCutsForTest(adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
}
TEST(HistUtil, AdapterSketchSlidingWindowMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, AdapterSketchSlidingWindowWeightedMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
auto& h_weights = info.weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
TEST(HistUtil, AdapterDeviceSketchCategorical) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, n, 1);
ValidateBatchedCuts(adapter, num_bins, adapter.NumColumns(),
adapter.NumRows(), dmat.get());
}
}
}
TEST(HistUtil, AdapterDeviceSketchMultipleColumns) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
for (auto num_bins : bin_sizes) {
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, num_columns, num_rows, dmat.get());
}
}
}
TEST(HistUtil, AdapterDeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
int batch_sizes[] = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, num_columns, num_rows, dmat.get(), batch_size);
}
}
// Check sketching from adapter or DMatrix results in the same answer
// Consistency here is useful for testing and user experience
TEST(HistUtil, SketchingEquivalent) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto dmat_cuts = DeviceSketch(0, dmat.get(), num_bins);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
common::HistogramCuts adapter_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
EXPECT_EQ(dmat_cuts.Values(), adapter_cuts.Values());
EXPECT_EQ(dmat_cuts.Ptrs(), adapter_cuts.Ptrs());
EXPECT_EQ(dmat_cuts.MinValues(), adapter_cuts.MinValues());
ValidateBatchedCuts(adapter, num_bins, num_columns, num_rows, dmat.get());
}
}
}
TEST(HistUtil, DeviceSketchFromGroupWeights) {
size_t constexpr kRows = 3000, kCols = 200, kBins = 256;
size_t constexpr kGroups = 10;
auto m = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
auto& h_weights = m->Info().weights_.HostVector();
h_weights.resize(kRows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
m->Info().SetInfo("group", groups.data(), DataType::kUInt32, kGroups);
HistogramCuts weighted_cuts = DeviceSketch(0, m.get(), kBins, 0);
h_weights.clear();
HistogramCuts cuts = DeviceSketch(0, m.get(), kBins, 0);
ASSERT_EQ(cuts.Values().size(), weighted_cuts.Values().size());
ASSERT_EQ(cuts.MinValues().size(), weighted_cuts.MinValues().size());
ASSERT_EQ(cuts.Ptrs().size(), weighted_cuts.Ptrs().size());
for (size_t i = 0; i < cuts.Values().size(); ++i) {
EXPECT_EQ(cuts.Values()[i], weighted_cuts.Values()[i]) << "i:"<< i;
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], weighted_cuts.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), weighted_cuts.Ptrs().at(i));
}
ValidateCuts(weighted_cuts, m.get(), kBins);
}
void TestAdapterSketchFromWeights(bool with_group) {
size_t constexpr kRows = 300, kCols = 20, kBins = 256;
size_t constexpr kGroups = 10;
HostDeviceVector<float> storage;
std::string m =
RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateArrayInterface(
&storage);
MetaInfo info;
auto& h_weights = info.weights_.HostVector();
if (with_group) {
h_weights.resize(kGroups);
} else {
h_weights.resize(kRows);
}
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
if (with_group) {
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
info.SetInfo("group", groups.data(), DataType::kUInt32, kGroups);
}
info.weights_.SetDevice(0);
info.num_row_ = kRows;
info.num_col_ = kCols;
data::CupyAdapter adapter(m);
auto const& batch = adapter.Value();
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, kBins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
common::HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
auto dmat = GetDMatrixFromData(storage.HostVector(), kRows, kCols);
if (with_group) {
dmat->Info().SetInfo("group", groups.data(), DataType::kUInt32, kGroups);
}
dmat->Info().SetInfo("weight", h_weights.data(), DataType::kFloat32, h_weights.size());
dmat->Info().num_col_ = kCols;
dmat->Info().num_row_ = kRows;
ASSERT_EQ(cuts.Ptrs().size(), kCols + 1);
ValidateCuts(cuts, dmat.get(), kBins);
if (with_group) {
HistogramCuts non_weighted = DeviceSketch(0, dmat.get(), kBins, 0);
for (size_t i = 0; i < cuts.Values().size(); ++i) {
EXPECT_EQ(cuts.Values()[i], non_weighted.Values()[i]);
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], non_weighted.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), non_weighted.Ptrs().at(i));
}
}
}
TEST(HistUtil, AdapterSketchFromWeights) {
TestAdapterSketchFromWeights(false);
TestAdapterSketchFromWeights(true);
}
} // namespace common
} // namespace xgboost
|
071bdc09962f4f515f7d59f73da8a0f66790e4f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
system("pause");
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| 071bdc09962f4f515f7d59f73da8a0f66790e4f2.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
system("pause");
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
95da753a1aa92b3110d1638ef877300f2daa5990.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/column_utilities.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/type_list_utilities.hpp>
#include <tests/utilities/type_lists.hpp>
#include <thrust/sequence.h>
#include <random>
#include <gmock/gmock.h>
template <typename T>
struct TypedColumnTest : public cudf::test::BaseFixture {
static std::size_t data_size() { return 1000; }
static std::size_t mask_size() { return 100; }
cudf::data_type type() {
return cudf::data_type{cudf::experimental::type_to_id<T>()};
}
TypedColumnTest()
: data{_num_elements * cudf::size_of(type())},
mask{cudf::bitmask_allocation_size_bytes(_num_elements)} {
auto typed_data = static_cast<char*>(data.data());
auto typed_mask = static_cast<char*>(mask.data());
thrust::sequence(thrust::device, typed_data, typed_data + data_size());
thrust::sequence(thrust::device, typed_mask, typed_mask + mask_size());
}
cudf::size_type num_elements() { return _num_elements; }
std::random_device r;
std::default_random_engine generator{r()};
std::uniform_int_distribution<cudf::size_type> distribution{200, 1000};
cudf::size_type _num_elements{distribution(generator)};
rmm::device_buffer data{};
rmm::device_buffer mask{};
rmm::device_buffer all_valid_mask{
create_null_mask(num_elements(), cudf::mask_state::ALL_VALID)};
rmm::device_buffer all_null_mask{
create_null_mask(num_elements(), cudf::mask_state::ALL_NULL)};
};
TYPED_TEST_CASE(TypedColumnTest, cudf::test::Types<int32_t>);
/**---------------------------------------------------------------------------*
* @brief Verifies equality of the properties and data of a `column`'s views.
*
* @param col The `column` to verify
*---------------------------------------------------------------------------**/
void verify_column_views(cudf::column col) {
cudf::column_view view = col;
cudf::mutable_column_view mutable_view = col;
EXPECT_EQ(col.type(), view.type());
EXPECT_EQ(col.type(), mutable_view.type());
EXPECT_EQ(col.size(), view.size());
EXPECT_EQ(col.size(), mutable_view.size());
EXPECT_EQ(col.null_count(), view.null_count());
EXPECT_EQ(col.null_count(), mutable_view.null_count());
EXPECT_EQ(col.nullable(), view.nullable());
EXPECT_EQ(col.nullable(), mutable_view.nullable());
EXPECT_EQ(col.num_children(), view.num_children());
EXPECT_EQ(col.num_children(), mutable_view.num_children());
EXPECT_EQ(view.head(), mutable_view.head());
EXPECT_EQ(view.data<char>(), mutable_view.data<char>());
EXPECT_EQ(view.offset(), mutable_view.offset());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountNoMask) {
cudf::column col{this->type(), this->num_elements(), this->data};
EXPECT_FALSE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountEmptyMask) {
cudf::column col{this->type(), this->num_elements(), this->data,
rmm::device_buffer{}};
EXPECT_FALSE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountAllValid) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
EXPECT_TRUE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, ExplicitNullCountAllValid) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_valid_mask, 0};
EXPECT_TRUE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountAllNull) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_null_mask};
EXPECT_TRUE(col.nullable());
EXPECT_TRUE(col.has_nulls());
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ExplicitNullCountAllNull) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_null_mask, this->num_elements()};
EXPECT_TRUE(col.nullable());
EXPECT_TRUE(col.has_nulls());
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, SetNullCountNoMask) {
cudf::column col{this->type(), this->num_elements(), this->data};
EXPECT_THROW(col.set_null_count(1), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetEmptyNullMaskNonZeroNullCount) {
cudf::column col{this->type(), this->num_elements(), this->data};
rmm::device_buffer empty_null_mask{};
EXPECT_THROW(col.set_null_mask(empty_null_mask, this->num_elements()),
cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetInvalidSizeNullMaskNonZeroNullCount) {
cudf::column col{this->type(), this->num_elements(), this->data};
auto invalid_size_null_mask =
create_null_mask(::min(this->num_elements() - 50, 0),
cudf::mask_state::ALL_VALID);
EXPECT_THROW(col.set_null_mask(invalid_size_null_mask, this->num_elements()),
cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetNullCountEmptyMask) {
cudf::column col{this->type(), this->num_elements(), this->data,
rmm::device_buffer{}};
EXPECT_THROW(col.set_null_count(1), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetNullCountAllValid) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
EXPECT_NO_THROW(col.set_null_count(0));
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, SetNullCountAllNull) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_null_mask};
EXPECT_NO_THROW(col.set_null_count(this->num_elements()));
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ResetNullCountAllNull) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_null_mask};
EXPECT_EQ(this->num_elements(), col.null_count());
EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT));
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ResetNullCountAllValid) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
EXPECT_EQ(0, col.null_count());
EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT));
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, CopyDataNoMask) {
cudf::column col{this->type(), this->num_elements(), this->data};
EXPECT_EQ(this->type(), col.type());
EXPECT_FALSE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify deep copy
cudf::column_view v = col;
EXPECT_NE(v.head(), this->data.data());
cudf::test::expect_equal_buffers(v.head(), this->data.data(),
this->data.size());
}
TYPED_TEST(TypedColumnTest, MoveDataNoMask) {
void* original_data = this->data.data();
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
EXPECT_EQ(this->type(), col.type());
EXPECT_FALSE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify shallow copy
cudf::column_view v = col;
EXPECT_EQ(v.head(), original_data);
}
TYPED_TEST(TypedColumnTest, CopyDataAndMask) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
EXPECT_EQ(this->type(), col.type());
EXPECT_TRUE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify deep copy
cudf::column_view v = col;
EXPECT_NE(v.head(), this->data.data());
EXPECT_NE(v.null_mask(), this->all_valid_mask.data());
cudf::test::expect_equal_buffers(v.head(), this->data.data(),
this->data.size());
cudf::test::expect_equal_buffers(v.null_mask(), this->all_valid_mask.data(),
this->mask.size());
}
TYPED_TEST(TypedColumnTest, MoveDataAndMask) {
void* original_data = this->data.data();
void* original_mask = this->all_valid_mask.data();
cudf::column col{this->type(), this->num_elements(), std::move(this->data),
std::move(this->all_valid_mask)};
EXPECT_EQ(this->type(), col.type());
EXPECT_TRUE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify shallow copy
cudf::column_view v = col;
EXPECT_EQ(v.head(), original_data);
EXPECT_EQ(v.null_mask(), original_mask);
}
TYPED_TEST(TypedColumnTest, CopyConstructorNoMask) {
cudf::column original{this->type(), this->num_elements(), this->data};
cudf::column copy{original};
verify_column_views(copy);
cudf::test::expect_columns_equal(original, copy);
// Verify deep copy
cudf::column_view original_view = original;
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
}
TYPED_TEST(TypedColumnTest, CopyConstructorWithMask) {
cudf::column original{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
cudf::column copy{original};
verify_column_views(copy);
cudf::test::expect_columns_equal(original, copy);
// Verify deep copy
cudf::column_view original_view = original;
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
EXPECT_NE(original_view.null_mask(), copy_view.null_mask());
}
TYPED_TEST(TypedColumnTest, MoveConstructorNoMask) {
cudf::column original{this->type(), this->num_elements(), this->data};
auto original_data = original.view().head();
cudf::column moved_to{std::move(original)};
EXPECT_EQ(0, original.size());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type());
verify_column_views(moved_to);
// Verify move
cudf::column_view moved_to_view = moved_to;
EXPECT_EQ(original_data, moved_to_view.head());
}
TYPED_TEST(TypedColumnTest, MoveConstructorWithMask) {
cudf::column original{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
auto original_data = original.view().head();
auto original_mask = original.view().null_mask();
cudf::column moved_to{std::move(original)};
verify_column_views(moved_to);
EXPECT_EQ(0, original.size());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type());
// Verify move
cudf::column_view moved_to_view = moved_to;
EXPECT_EQ(original_data, moved_to_view.head());
EXPECT_EQ(original_mask, moved_to_view.null_mask());
}
TYPED_TEST(TypedColumnTest, ConstructWithChildren) {
std::vector<std::unique_ptr<cudf::column>> children;
children.emplace_back(
std::make_unique<cudf::column>(cudf::data_type{cudf::type_id::INT8}, 42,
this->data, this->all_valid_mask));
children.emplace_back(
std::make_unique<cudf::column>(cudf::data_type{cudf::type_id::FLOAT64},
314, this->data, this->all_valid_mask));
cudf::column col{
this->type(), this->num_elements(), this->data,
this->all_valid_mask, cudf::UNKNOWN_NULL_COUNT, std::move(children)};
verify_column_views(col);
EXPECT_EQ(2, col.num_children());
EXPECT_EQ(cudf::data_type{cudf::type_id::INT8}, col.child(0).type());
EXPECT_EQ(42, col.child(0).size());
EXPECT_EQ(cudf::data_type{cudf::type_id::FLOAT64}, col.child(1).type());
EXPECT_EQ(314, col.child(1).size());
}
TYPED_TEST(TypedColumnTest, ReleaseNoChildren) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
auto original_data = col.view().head();
auto original_mask = col.view().null_mask();
cudf::column::contents contents = col.release();
EXPECT_EQ(original_data, contents.data->data());
EXPECT_EQ(original_mask, contents.null_mask->data());
EXPECT_EQ(0u, contents.children.size());
EXPECT_EQ(0, col.size());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type());
EXPECT_EQ(0, col.num_children());
}
TYPED_TEST(TypedColumnTest, ReleaseWithChildren) {
std::vector<std::unique_ptr<cudf::column>> children;
children.emplace_back(std::make_unique<cudf::column>(
this->type(), this->num_elements(), this->data, this->all_valid_mask));
children.emplace_back(std::make_unique<cudf::column>(
this->type(), this->num_elements(), this->data, this->all_valid_mask));
cudf::column col{
this->type(), this->num_elements(), this->data,
this->all_valid_mask, cudf::UNKNOWN_NULL_COUNT, std::move(children)};
auto original_data = col.view().head();
auto original_mask = col.view().null_mask();
cudf::column::contents contents = col.release();
EXPECT_EQ(original_data, contents.data->data());
EXPECT_EQ(original_mask, contents.null_mask->data());
EXPECT_EQ(2u, contents.children.size());
EXPECT_EQ(0, col.size());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type());
EXPECT_EQ(0, col.num_children());
}
TYPED_TEST(TypedColumnTest, ColumnViewConstructorWithMask) {
cudf::column original{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
cudf::column_view original_view = original;
cudf::column copy{original_view};
verify_column_views(copy);
cudf::test::expect_columns_equal(original, copy);
// Verify deep copy
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
EXPECT_NE(original_view.null_mask(), copy_view.null_mask());
}
TYPED_TEST(TypedColumnTest, ConcatenateColumnView) {
cudf::column original{this->type(), this->num_elements(), this->data,
this->mask};
std::vector<cudf::size_type> indices{
0, this->num_elements()/3,
this->num_elements()/3, this->num_elements()/2,
this->num_elements()/2, this->num_elements()};
std::vector<cudf::column_view> views = cudf::experimental::slice(original, indices);
auto concatenated_col = cudf::concatenate(views);
cudf::test::expect_columns_equal(original, *concatenated_col);
}
struct StringColumnTest : public cudf::test::BaseFixture {};
TEST_F(StringColumnTest, ConcatenateColumnView) {
std::vector<const char*> h_strings{ "aaa", "bb", "", "cccc", "d", "", "ff", "gggg", "", "h", "iiii", "jjj", "k", "lllllll", "mmmmm", "n", "oo", "ppp" };
cudf::test::strings_column_wrapper strings1( h_strings.data(), h_strings.data()+6 );
cudf::test::strings_column_wrapper strings2( h_strings.data()+6, h_strings.data()+10 );
cudf::test::strings_column_wrapper strings3( h_strings.data()+10, h_strings.data()+h_strings.size() );
std::vector<cudf::column_view> strings_columns;
strings_columns.push_back(strings1);
strings_columns.push_back(strings2);
strings_columns.push_back(strings3);
auto results = cudf::concatenate(strings_columns);
cudf::test::strings_column_wrapper expected( h_strings.begin(), h_strings.end() );
cudf::test::expect_columns_equal(*results,expected);
}
| 95da753a1aa92b3110d1638ef877300f2daa5990.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/copying.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/null_mask.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/column_utilities.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/type_list_utilities.hpp>
#include <tests/utilities/type_lists.hpp>
#include <thrust/sequence.h>
#include <random>
#include <gmock/gmock.h>
template <typename T>
struct TypedColumnTest : public cudf::test::BaseFixture {
static std::size_t data_size() { return 1000; }
static std::size_t mask_size() { return 100; }
cudf::data_type type() {
return cudf::data_type{cudf::experimental::type_to_id<T>()};
}
TypedColumnTest()
: data{_num_elements * cudf::size_of(type())},
mask{cudf::bitmask_allocation_size_bytes(_num_elements)} {
auto typed_data = static_cast<char*>(data.data());
auto typed_mask = static_cast<char*>(mask.data());
thrust::sequence(thrust::device, typed_data, typed_data + data_size());
thrust::sequence(thrust::device, typed_mask, typed_mask + mask_size());
}
cudf::size_type num_elements() { return _num_elements; }
std::random_device r;
std::default_random_engine generator{r()};
std::uniform_int_distribution<cudf::size_type> distribution{200, 1000};
cudf::size_type _num_elements{distribution(generator)};
rmm::device_buffer data{};
rmm::device_buffer mask{};
rmm::device_buffer all_valid_mask{
create_null_mask(num_elements(), cudf::mask_state::ALL_VALID)};
rmm::device_buffer all_null_mask{
create_null_mask(num_elements(), cudf::mask_state::ALL_NULL)};
};
TYPED_TEST_CASE(TypedColumnTest, cudf::test::Types<int32_t>);
/**---------------------------------------------------------------------------*
* @brief Verifies equality of the properties and data of a `column`'s views.
*
* @param col The `column` to verify
*---------------------------------------------------------------------------**/
void verify_column_views(cudf::column col) {
cudf::column_view view = col;
cudf::mutable_column_view mutable_view = col;
EXPECT_EQ(col.type(), view.type());
EXPECT_EQ(col.type(), mutable_view.type());
EXPECT_EQ(col.size(), view.size());
EXPECT_EQ(col.size(), mutable_view.size());
EXPECT_EQ(col.null_count(), view.null_count());
EXPECT_EQ(col.null_count(), mutable_view.null_count());
EXPECT_EQ(col.nullable(), view.nullable());
EXPECT_EQ(col.nullable(), mutable_view.nullable());
EXPECT_EQ(col.num_children(), view.num_children());
EXPECT_EQ(col.num_children(), mutable_view.num_children());
EXPECT_EQ(view.head(), mutable_view.head());
EXPECT_EQ(view.data<char>(), mutable_view.data<char>());
EXPECT_EQ(view.offset(), mutable_view.offset());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountNoMask) {
cudf::column col{this->type(), this->num_elements(), this->data};
EXPECT_FALSE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountEmptyMask) {
cudf::column col{this->type(), this->num_elements(), this->data,
rmm::device_buffer{}};
EXPECT_FALSE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountAllValid) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
EXPECT_TRUE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, ExplicitNullCountAllValid) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_valid_mask, 0};
EXPECT_TRUE(col.nullable());
EXPECT_FALSE(col.has_nulls());
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, DefaultNullCountAllNull) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_null_mask};
EXPECT_TRUE(col.nullable());
EXPECT_TRUE(col.has_nulls());
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ExplicitNullCountAllNull) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_null_mask, this->num_elements()};
EXPECT_TRUE(col.nullable());
EXPECT_TRUE(col.has_nulls());
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, SetNullCountNoMask) {
cudf::column col{this->type(), this->num_elements(), this->data};
EXPECT_THROW(col.set_null_count(1), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetEmptyNullMaskNonZeroNullCount) {
cudf::column col{this->type(), this->num_elements(), this->data};
rmm::device_buffer empty_null_mask{};
EXPECT_THROW(col.set_null_mask(empty_null_mask, this->num_elements()),
cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetInvalidSizeNullMaskNonZeroNullCount) {
cudf::column col{this->type(), this->num_elements(), this->data};
auto invalid_size_null_mask =
create_null_mask(std::min(this->num_elements() - 50, 0),
cudf::mask_state::ALL_VALID);
EXPECT_THROW(col.set_null_mask(invalid_size_null_mask, this->num_elements()),
cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetNullCountEmptyMask) {
cudf::column col{this->type(), this->num_elements(), this->data,
rmm::device_buffer{}};
EXPECT_THROW(col.set_null_count(1), cudf::logic_error);
}
TYPED_TEST(TypedColumnTest, SetNullCountAllValid) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
EXPECT_NO_THROW(col.set_null_count(0));
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, SetNullCountAllNull) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_null_mask};
EXPECT_NO_THROW(col.set_null_count(this->num_elements()));
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ResetNullCountAllNull) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_null_mask};
EXPECT_EQ(this->num_elements(), col.null_count());
EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT));
EXPECT_EQ(this->num_elements(), col.null_count());
}
TYPED_TEST(TypedColumnTest, ResetNullCountAllValid) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
EXPECT_EQ(0, col.null_count());
EXPECT_NO_THROW(col.set_null_count(cudf::UNKNOWN_NULL_COUNT));
EXPECT_EQ(0, col.null_count());
}
TYPED_TEST(TypedColumnTest, CopyDataNoMask) {
cudf::column col{this->type(), this->num_elements(), this->data};
EXPECT_EQ(this->type(), col.type());
EXPECT_FALSE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify deep copy
cudf::column_view v = col;
EXPECT_NE(v.head(), this->data.data());
cudf::test::expect_equal_buffers(v.head(), this->data.data(),
this->data.size());
}
TYPED_TEST(TypedColumnTest, MoveDataNoMask) {
void* original_data = this->data.data();
cudf::column col{this->type(), this->num_elements(), std::move(this->data)};
EXPECT_EQ(this->type(), col.type());
EXPECT_FALSE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify shallow copy
cudf::column_view v = col;
EXPECT_EQ(v.head(), original_data);
}
TYPED_TEST(TypedColumnTest, CopyDataAndMask) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
EXPECT_EQ(this->type(), col.type());
EXPECT_TRUE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify deep copy
cudf::column_view v = col;
EXPECT_NE(v.head(), this->data.data());
EXPECT_NE(v.null_mask(), this->all_valid_mask.data());
cudf::test::expect_equal_buffers(v.head(), this->data.data(),
this->data.size());
cudf::test::expect_equal_buffers(v.null_mask(), this->all_valid_mask.data(),
this->mask.size());
}
TYPED_TEST(TypedColumnTest, MoveDataAndMask) {
void* original_data = this->data.data();
void* original_mask = this->all_valid_mask.data();
cudf::column col{this->type(), this->num_elements(), std::move(this->data),
std::move(this->all_valid_mask)};
EXPECT_EQ(this->type(), col.type());
EXPECT_TRUE(col.nullable());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(this->num_elements(), col.size());
EXPECT_EQ(0, col.num_children());
verify_column_views(col);
// Verify shallow copy
cudf::column_view v = col;
EXPECT_EQ(v.head(), original_data);
EXPECT_EQ(v.null_mask(), original_mask);
}
TYPED_TEST(TypedColumnTest, CopyConstructorNoMask) {
cudf::column original{this->type(), this->num_elements(), this->data};
cudf::column copy{original};
verify_column_views(copy);
cudf::test::expect_columns_equal(original, copy);
// Verify deep copy
cudf::column_view original_view = original;
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
}
TYPED_TEST(TypedColumnTest, CopyConstructorWithMask) {
cudf::column original{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
cudf::column copy{original};
verify_column_views(copy);
cudf::test::expect_columns_equal(original, copy);
// Verify deep copy
cudf::column_view original_view = original;
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
EXPECT_NE(original_view.null_mask(), copy_view.null_mask());
}
TYPED_TEST(TypedColumnTest, MoveConstructorNoMask) {
cudf::column original{this->type(), this->num_elements(), this->data};
auto original_data = original.view().head();
cudf::column moved_to{std::move(original)};
EXPECT_EQ(0, original.size());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type());
verify_column_views(moved_to);
// Verify move
cudf::column_view moved_to_view = moved_to;
EXPECT_EQ(original_data, moved_to_view.head());
}
TYPED_TEST(TypedColumnTest, MoveConstructorWithMask) {
cudf::column original{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
auto original_data = original.view().head();
auto original_mask = original.view().null_mask();
cudf::column moved_to{std::move(original)};
verify_column_views(moved_to);
EXPECT_EQ(0, original.size());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, original.type());
// Verify move
cudf::column_view moved_to_view = moved_to;
EXPECT_EQ(original_data, moved_to_view.head());
EXPECT_EQ(original_mask, moved_to_view.null_mask());
}
TYPED_TEST(TypedColumnTest, ConstructWithChildren) {
std::vector<std::unique_ptr<cudf::column>> children;
children.emplace_back(
std::make_unique<cudf::column>(cudf::data_type{cudf::type_id::INT8}, 42,
this->data, this->all_valid_mask));
children.emplace_back(
std::make_unique<cudf::column>(cudf::data_type{cudf::type_id::FLOAT64},
314, this->data, this->all_valid_mask));
cudf::column col{
this->type(), this->num_elements(), this->data,
this->all_valid_mask, cudf::UNKNOWN_NULL_COUNT, std::move(children)};
verify_column_views(col);
EXPECT_EQ(2, col.num_children());
EXPECT_EQ(cudf::data_type{cudf::type_id::INT8}, col.child(0).type());
EXPECT_EQ(42, col.child(0).size());
EXPECT_EQ(cudf::data_type{cudf::type_id::FLOAT64}, col.child(1).type());
EXPECT_EQ(314, col.child(1).size());
}
TYPED_TEST(TypedColumnTest, ReleaseNoChildren) {
cudf::column col{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
auto original_data = col.view().head();
auto original_mask = col.view().null_mask();
cudf::column::contents contents = col.release();
EXPECT_EQ(original_data, contents.data->data());
EXPECT_EQ(original_mask, contents.null_mask->data());
EXPECT_EQ(0u, contents.children.size());
EXPECT_EQ(0, col.size());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type());
EXPECT_EQ(0, col.num_children());
}
TYPED_TEST(TypedColumnTest, ReleaseWithChildren) {
std::vector<std::unique_ptr<cudf::column>> children;
children.emplace_back(std::make_unique<cudf::column>(
this->type(), this->num_elements(), this->data, this->all_valid_mask));
children.emplace_back(std::make_unique<cudf::column>(
this->type(), this->num_elements(), this->data, this->all_valid_mask));
cudf::column col{
this->type(), this->num_elements(), this->data,
this->all_valid_mask, cudf::UNKNOWN_NULL_COUNT, std::move(children)};
auto original_data = col.view().head();
auto original_mask = col.view().null_mask();
cudf::column::contents contents = col.release();
EXPECT_EQ(original_data, contents.data->data());
EXPECT_EQ(original_mask, contents.null_mask->data());
EXPECT_EQ(2u, contents.children.size());
EXPECT_EQ(0, col.size());
EXPECT_EQ(0, col.null_count());
EXPECT_EQ(cudf::data_type{cudf::type_id::EMPTY}, col.type());
EXPECT_EQ(0, col.num_children());
}
TYPED_TEST(TypedColumnTest, ColumnViewConstructorWithMask) {
cudf::column original{this->type(), this->num_elements(), this->data,
this->all_valid_mask};
cudf::column_view original_view = original;
cudf::column copy{original_view};
verify_column_views(copy);
cudf::test::expect_columns_equal(original, copy);
// Verify deep copy
cudf::column_view copy_view = copy;
EXPECT_NE(original_view.head(), copy_view.head());
EXPECT_NE(original_view.null_mask(), copy_view.null_mask());
}
TYPED_TEST(TypedColumnTest, ConcatenateColumnView) {
cudf::column original{this->type(), this->num_elements(), this->data,
this->mask};
std::vector<cudf::size_type> indices{
0, this->num_elements()/3,
this->num_elements()/3, this->num_elements()/2,
this->num_elements()/2, this->num_elements()};
std::vector<cudf::column_view> views = cudf::experimental::slice(original, indices);
auto concatenated_col = cudf::concatenate(views);
cudf::test::expect_columns_equal(original, *concatenated_col);
}
struct StringColumnTest : public cudf::test::BaseFixture {};
TEST_F(StringColumnTest, ConcatenateColumnView) {
std::vector<const char*> h_strings{ "aaa", "bb", "", "cccc", "d", "ééé", "ff", "gggg", "", "h", "iiii", "jjj", "k", "lllllll", "mmmmm", "n", "oo", "ppp" };
cudf::test::strings_column_wrapper strings1( h_strings.data(), h_strings.data()+6 );
cudf::test::strings_column_wrapper strings2( h_strings.data()+6, h_strings.data()+10 );
cudf::test::strings_column_wrapper strings3( h_strings.data()+10, h_strings.data()+h_strings.size() );
std::vector<cudf::column_view> strings_columns;
strings_columns.push_back(strings1);
strings_columns.push_back(strings2);
strings_columns.push_back(strings3);
auto results = cudf::concatenate(strings_columns);
cudf::test::strings_column_wrapper expected( h_strings.begin(), h_strings.end() );
cudf::test::expect_columns_equal(*results,expected);
}
|
ba65c58fde2bfcc92c41043471032381a544e24a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass: checka se o parmetro passado com sucesso
//--blockDim=1024 --gridDim=1 --no-inline
#include <stdio.h>
#include <stdlib.h>
#define N 8
__device__ float multiplyByTwo(float *v, unsigned int tid) {
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid) {
return v[tid] * 0.5f;
}
typedef float(*funcType)(float*, unsigned int);
__global__ void foo(float *v, funcType* f, unsigned int size)
{
//*** __requires(f == multiplyByTwo | f == divideByTwo); ****/
/************************************************************/
assert(*f == divideByTwo || *f == multiplybyTwo);
/************************************************************/
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
v[tid] = (*f)(v, tid);
}
}
int main (){
float* w;
float* dev_w;
int size = N*sizeof(float);
w =(float*) malloc(size);
for (int i = 0; i < N; ++i){
w[i] = i;
}
hipMalloc((void**)&dev_w, size);
hipMemcpy(dev_w,w, size,hipMemcpyHostToDevice);
funcType* g;
funcType* dev_g;
g =(funcType*) malloc(sizeof(funcType));
//*g = multiplyByTwo;
*g = divideByTwo;
hipMalloc((void**)&dev_g, sizeof(funcType));
hipMemcpy(dev_g, g, sizeof(funcType),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( foo) , dim3(1),dim3(N), 0, 0, dev_w, dev_g, N );
hipMemcpy(w,dev_w,size,hipMemcpyDeviceToHost);
hipMemcpy(g,dev_g,sizeof(funcType),hipMemcpyDeviceToHost);
printf("\nw:");
for (int i = 0; i < N; ++i){
printf(" %f ", w[i]);
}
//printf ("\n (float) functype: %f", divideByTwo);
free(w);
hipFree(&dev_w);
hipFree(&dev_g);
return 0;
}
| ba65c58fde2bfcc92c41043471032381a544e24a.cu | //pass: checka se o parâmetro é passado com sucesso
//--blockDim=1024 --gridDim=1 --no-inline
#include <stdio.h>
#include <stdlib.h>
#define N 8
__device__ float multiplyByTwo(float *v, unsigned int tid) {
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid) {
return v[tid] * 0.5f;
}
typedef float(*funcType)(float*, unsigned int);
__global__ void foo(float *v, funcType* f, unsigned int size)
{
//*** __requires(f == multiplyByTwo | f == divideByTwo); ****/
/************************************************************/
assert(*f == divideByTwo || *f == multiplybyTwo);
/************************************************************/
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size) {
v[tid] = (*f)(v, tid);
}
}
int main (){
float* w;
float* dev_w;
int size = N*sizeof(float);
w =(float*) malloc(size);
for (int i = 0; i < N; ++i){
w[i] = i;
}
cudaMalloc((void**)&dev_w, size);
cudaMemcpy(dev_w,w, size,cudaMemcpyHostToDevice);
funcType* g;
funcType* dev_g;
g =(funcType*) malloc(sizeof(funcType));
//*g = multiplyByTwo;
*g = divideByTwo;
cudaMalloc((void**)&dev_g, sizeof(funcType));
cudaMemcpy(dev_g, g, sizeof(funcType),cudaMemcpyHostToDevice);
foo <<<1,N>>>(dev_w, dev_g, N );
cudaMemcpy(w,dev_w,size,cudaMemcpyDeviceToHost);
cudaMemcpy(g,dev_g,sizeof(funcType),cudaMemcpyDeviceToHost);
printf("\nw:");
for (int i = 0; i < N; ++i){
printf(" %f ", w[i]);
}
//printf ("\n (float) functype: %f", divideByTwo);
free(w);
cudaFree(&dev_w);
cudaFree(&dev_g);
return 0;
}
|
5afde5fa879da83b3786ee248e9392aa78a304fc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "softmax_kernel_new_api.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int batch = 2;
int batch_offset = 2;
int groups = 1;
int group_offset = 1;
int stride = 2;
float temp = 1;
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
softmax_kernel_new_api), dim3(gridBlock),dim3(threadBlock), 0, 0, input,n,batch,batch_offset,groups,group_offset,stride,temp,output);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
softmax_kernel_new_api), dim3(gridBlock),dim3(threadBlock), 0, 0, input,n,batch,batch_offset,groups,group_offset,stride,temp,output);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
softmax_kernel_new_api), dim3(gridBlock),dim3(threadBlock), 0, 0, input,n,batch,batch_offset,groups,group_offset,stride,temp,output);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5afde5fa879da83b3786ee248e9392aa78a304fc.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "softmax_kernel_new_api.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int batch = 2;
int batch_offset = 2;
int groups = 1;
int group_offset = 1;
int stride = 2;
float temp = 1;
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
softmax_kernel_new_api<<<gridBlock,threadBlock>>>(input,n,batch,batch_offset,groups,group_offset,stride,temp,output);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
softmax_kernel_new_api<<<gridBlock,threadBlock>>>(input,n,batch,batch_offset,groups,group_offset,stride,temp,output);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
softmax_kernel_new_api<<<gridBlock,threadBlock>>>(input,n,batch,batch_offset,groups,group_offset,stride,temp,output);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
45cc23130b819ba7aa2da35980ede2f4001dfe55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define CBLACK "\33[30m"
#define CRED "\33[31m"
#define CGREEN "\33[32m"
#define CWHITE "\33[37m"
#define SIT_SIZE 500
#define NBR_COIN 162
#define NBR_COIN_CUDA 162
#define NBR_BLOCK 1024
#define NBR_HIGH_SCORE 50
#define MIN_PRICE 0.000620
#define TIME_GUESS 100
#define COIN_TEST 98
#define AMOUNT_BET 100
#define MIN_POURCENT_GUESS 0.001
#define NBR_MINUTES 881003
#define AMOUNT_TEST 881003
typedef struct {
double open;
double high;
double low;
double close;
double volume;
} Data;
typedef struct {
double time;
Data data[NBR_COIN];
} Minute;
typedef struct {
int score;
int minuteId;
int coinId;
} Score;
typedef struct {
Score highScores[NBR_HIGH_SCORE];
double *guessed;
/**Cuda memory */
Minute **minutes; // all history
Minute **srcPourcent;
int *scores;
} Env;
typedef struct {
int cursor;
int coinId;
} Situation;
Env env;
/**
* Clear visual field
*/
void clear() { dprintf(1, "#CLS\n"); }
/**
* Launch the great machine comparator
* Comparing pourcent source with all other minutes
*/
__global__ void bake(Minute **source, int sourceCoinId, int cursor,
Minute **minutes, int *scores) {
int coinId = threadIdx.x;
int minuteId = blockIdx.x;
double score = 0;
if (minutes[cursor + minuteId]->data[coinId].open < MIN_PRICE) {
scores[NBR_COIN_CUDA * minuteId + coinId] = -1;
return;
}
for (int i = 0; i < SIT_SIZE; i++) {
if (minutes[cursor + minuteId + i]->data[coinId].open == -1) {
scores[NBR_COIN_CUDA * minuteId + coinId] = -1;
return;
}
double pourcent = minutes[cursor + minuteId + i]->data[coinId].open /
minutes[cursor + minuteId]->data[coinId].open * 100;
score +=
fabs(fabs(source[i]->data[sourceCoinId].open) - fabs(pourcent));
}
// printf("score : %12lf coinId: %4d minuteId : %3d test: %lf \n", score,
// coinId, minuteId + cursor,
// minutes[minuteId + cursor]->data[coinId].open);
scores[NBR_COIN_CUDA * minuteId + coinId] = score;
}
/**
* Generate a random number
*/
int random_number(int min_num, int max_num) {
int result = (rand() % (max_num - min_num)) + min_num;
return result;
}
/**
* Load history in RAM and VRAM
*/
Minute **loadHistory(int start, int amount) {
int fd = open("../data/bin/full", O_RDONLY);
Minute **minutes;
hipMallocManaged(&minutes, sizeof(void *) * amount);
int i = -1;
while (1) {
i++;
hipMallocManaged(&minutes[i], sizeof(Minute));
if (read(fd, minutes[i], sizeof(Minute)) < 1 || i == AMOUNT_TEST) break;
}
return minutes;
}
/**
* Transform every value of a situation to a pourcentage from first value
*/
Minute **SituationToPourcent(int cursor) {
for (int i = 0; i < SIT_SIZE; i++) {
env.srcPourcent[i]->time = env.minutes[cursor + i]->time;
for (int coinIndex = 0; coinIndex < NBR_COIN_CUDA; coinIndex++) {
env.srcPourcent[i]->data[coinIndex].close =
env.minutes[cursor + i]->data[coinIndex].close /
env.minutes[cursor]->data[coinIndex].close * 100;
env.srcPourcent[i]->data[coinIndex].high =
env.minutes[cursor + i]->data[coinIndex].high /
env.minutes[cursor]->data[coinIndex].high * 100;
env.srcPourcent[i]->data[coinIndex].low =
env.minutes[cursor + i]->data[coinIndex].low /
env.minutes[cursor]->data[coinIndex].low * 100;
env.srcPourcent[i]->data[coinIndex].open =
env.minutes[cursor + i]->data[coinIndex].open /
env.minutes[cursor]->data[coinIndex].open * 100;
env.srcPourcent[i]->data[coinIndex].volume =
env.minutes[cursor + i]->data[coinIndex].volume /
env.minutes[cursor + i]->data[coinIndex].volume * 100;
}
}
return env.srcPourcent;
}
/**
* Export situation to external program
*/
void printSituation(int cursor, int coinId) {
dprintf(2, "sit : %lf coinId : %d\n", env.minutes[cursor]->time, coinId);
dprintf(1, "#SIT");
for (int i = 0; i < SIT_SIZE * 2; i++) {
dprintf(2, " %lf", env.minutes[i + cursor]->data[coinId].open);
dprintf(1, " %lf", env.minutes[i + cursor]->data[coinId].open);
}
dprintf(1, "\n");
}
/**
* Compare Given situation with all history
*/
void bakeSituation(int cursor, int baseCoinId) {
// score
int *scores = env.scores;
int baseCursor = cursor;
Minute **pourcent = SituationToPourcent(cursor);
// cursor += SIT_SIZE; // avoiding compare source situation
cursor = 0;
for (int hi = 0; hi < NBR_HIGH_SCORE; hi++) {
env.highScores[hi].score = 99999999;
env.highScores[hi].minuteId = 0;
env.highScores[hi].coinId = 0;
}
for (int bakeIndex = 0; cursor < 870000; bakeIndex++) {
hipLaunchKernelGGL(( bake), dim3(NBR_BLOCK), dim3(NBR_COIN_CUDA), 0, 0, pourcent, baseCoinId, cursor,
env.minutes, scores);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
for (int i = 0; i < NBR_BLOCK * NBR_COIN_CUDA; i++) {
if (scores[i] != -1) {
int minuteId = i / NBR_COIN;
int coinId = i % NBR_COIN;
if (abs((minuteId + cursor) - baseCursor) < (SIT_SIZE * 5)) {
continue;
}
// dprintf(2,
// "score : %12d coinId: %4d minuteid : %3d test:
// %lf\n", scores[i], coinId, minuteId + cursor,
// env.minutes[minuteId + cursor]->data[coinId].open);
for (int highIndex = 0; highIndex < NBR_HIGH_SCORE;
highIndex++) {
if (scores[i] < env.highScores[highIndex].score) {
env.highScores[highIndex].score = scores[i];
env.highScores[highIndex].minuteId = minuteId + cursor;
env.highScores[highIndex].coinId = coinId;
i += NBR_COIN_CUDA * 50;
break;
}
}
// if (found) {
// break;
// }
// if (scores[i] < 47) {
// dprintf(2, "score : %d coinId : %d\n time :", scores[i],
// coinId);
// printSituation(minuteId + cursor, coinId);
// // getchar();
// break;
// }
}
}
cursor += NBR_BLOCK;
// if (cursor % 100 == 0) {
// // dprintf(2, "cursor : %d\n", cursor);
// // getchar();
// }
// getchar();
}
// dprintf(2, "Done\n");
// getchar();
// clear();
// for (int highIndex = 0; highIndex < NBR_HIGH_SCORE - 1; highIndex++) {
// getchar();
// printSituation(env.highScores[highIndex].minuteId,
// env.highScores[highIndex].coinId);
// }
}
/**
* Return the guessed percentage of change from situation to TIME_GUESS
*/
double makeNextGuess() {
double pred = 0;
for (int highIndex = 0; highIndex < NBR_HIGH_SCORE; highIndex++) {
// env.highScores[highIndex].minuteId + SIT_SIZE;
// env.highScores[highIndex].coinId;
double start =
env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE]
->data[env.highScores[highIndex].coinId]
.open;
double end = env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE +
TIME_GUESS]
->data[env.highScores[highIndex].coinId]
.open;
pred += 100 - (start / end * 100);
}
pred = pred / NBR_HIGH_SCORE;
return pred;
}
/**
* Get real next pourcent of given situation
*/
double getRealNext(int minuteId, int coinId) {
double start = env.minutes[minuteId + SIT_SIZE]->data[coinId].open;
double end =
env.minutes[minuteId + SIT_SIZE + TIME_GUESS]->data[coinId].open;
return 100 - (start / end * 100);
}
// /**
// * do something with the score of a minute
// */
// void onScore() {}
void initMem() {
hipMallocManaged(&env.srcPourcent, sizeof(void *) * SIT_SIZE);
for (int i = 0; i < SIT_SIZE; i++) {
hipMallocManaged(&env.srcPourcent[i], sizeof(Minute));
}
hipMallocManaged(&env.scores, sizeof(int) * NBR_BLOCK * NBR_COIN);
env.guessed = (double *)malloc(sizeof(double) * SIT_SIZE);
}
Situation getRandomSituation() {
Situation res;
int last = 0;
while (1) {
res.cursor = random_number(200000, NBR_MINUTES - 1000);
last = res.cursor;
res.coinId = random_number(0, NBR_COIN_CUDA);
if (env.minutes[res.cursor]->data[res.coinId].open != -1 &&
env.minutes[res.cursor]->data[res.coinId].open > MIN_PRICE) {
return res;
}
usleep(1000);
}
}
void printInfos(Situation sit) {
printf("%d;%d(", sit.coinId, sit.cursor);
for (int i = 20; i < 220; i += 20) {
double start =
env.minutes[sit.cursor + SIT_SIZE]->data[sit.coinId].open;
double end =
env.minutes[sit.cursor + SIT_SIZE + i]->data[sit.coinId].open;
double pred = 100 - (start / end * 100);
printf("%lf;", pred);
}
printf(")-->");
for (int highIndex = 0; highIndex < NBR_HIGH_SCORE; highIndex++) {
// env.highScores[highIndex].minuteId + SIT_SIZE;
// env.highScores[highIndex].coinId;
printf("%d;%d(", env.highScores[highIndex].coinId,
env.highScores[highIndex].minuteId);
for (int i = 20; i < 220; i += 20) {
double start =
env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE]
->data[env.highScores[highIndex].coinId]
.open;
double end =
env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE + i]
->data[env.highScores[highIndex].coinId]
.open;
double pred = 100 - (start / end * 100);
printf("%lf;", pred);
}
printf(")|");
}
printf("\n");
}
int main() {
srand(time(NULL));
env.minutes = loadHistory(0, AMOUNT_TEST);
initMem();
int cur = 0;
double bank = 1000;
while (1) {
// dprintf(2, "ready\n");
// int cursor = 397100 + cur;
// int cursor = random_number(397100, 500000);
// clear();
// printSituation(cursor, COIN_TEST);
// dprintf(2, "READY\n");
Situation sit = getRandomSituation();
bakeSituation(sit.cursor, sit.coinId);
printInfos(sit);
// double pred = makeNextGuess();
// double real = getRealNext(sit.cursor, sit.coinId);
// if (abs(real) > 5) {
// continue;
// }
// printf(
// "Time : %d | Cursor : %8d | CoinId : %4d | Pred : %10lf | Real :
// "
// "%10lf | BANK : %12lf |",
// (int)env.minutes[sit.cursor + SIT_SIZE]->time, sit.cursor,
// sit.coinId, pred, real, bank);
// if (abs(pred) > MIN_POURCENT_GUESS) {
// if (pred * real > 0) {
// bank += abs(real) * AMOUNT_BET * 0.01;
// printf("%sWON %s ", CGREEN, CWHITE);
// bank += -(AMOUNT_BET * 0.002);
// } else {
// printf("%sLOST %s ", CRED, CWHITE);
// bank -= abs(real) * AMOUNT_BET * 0.01;
// bank += -(AMOUNT_BET * 0.002);
// }
// }
// printf("\n");
// fflush(stdout);
// exit(0);
// cur += SIT_SIZE / 2;
}
return 0;
} | 45cc23130b819ba7aa2da35980ede2f4001dfe55.cu | #include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define CBLACK "\33[30m"
#define CRED "\33[31m"
#define CGREEN "\33[32m"
#define CWHITE "\33[37m"
#define SIT_SIZE 500
#define NBR_COIN 162
#define NBR_COIN_CUDA 162
#define NBR_BLOCK 1024
#define NBR_HIGH_SCORE 50
#define MIN_PRICE 0.000620
#define TIME_GUESS 100
#define COIN_TEST 98
#define AMOUNT_BET 100
#define MIN_POURCENT_GUESS 0.001
#define NBR_MINUTES 881003
#define AMOUNT_TEST 881003
typedef struct {
double open;
double high;
double low;
double close;
double volume;
} Data;
typedef struct {
double time;
Data data[NBR_COIN];
} Minute;
typedef struct {
int score;
int minuteId;
int coinId;
} Score;
typedef struct {
Score highScores[NBR_HIGH_SCORE];
double *guessed;
/**Cuda memory */
Minute **minutes; // all history
Minute **srcPourcent;
int *scores;
} Env;
typedef struct {
int cursor;
int coinId;
} Situation;
Env env;
/**
* Clear visual field
*/
void clear() { dprintf(1, "#CLS\n"); }
/**
* Launch the great machine comparator
* Comparing pourcent source with all other minutes
*/
__global__ void bake(Minute **source, int sourceCoinId, int cursor,
Minute **minutes, int *scores) {
int coinId = threadIdx.x;
int minuteId = blockIdx.x;
double score = 0;
if (minutes[cursor + minuteId]->data[coinId].open < MIN_PRICE) {
scores[NBR_COIN_CUDA * minuteId + coinId] = -1;
return;
}
for (int i = 0; i < SIT_SIZE; i++) {
if (minutes[cursor + minuteId + i]->data[coinId].open == -1) {
scores[NBR_COIN_CUDA * minuteId + coinId] = -1;
return;
}
double pourcent = minutes[cursor + minuteId + i]->data[coinId].open /
minutes[cursor + minuteId]->data[coinId].open * 100;
score +=
fabs(fabs(source[i]->data[sourceCoinId].open) - fabs(pourcent));
}
// printf("score : %12lf coinId: %4d minuteId : %3d test: %lf \n", score,
// coinId, minuteId + cursor,
// minutes[minuteId + cursor]->data[coinId].open);
scores[NBR_COIN_CUDA * minuteId + coinId] = score;
}
/**
* Generate a random number
*/
int random_number(int min_num, int max_num) {
int result = (rand() % (max_num - min_num)) + min_num;
return result;
}
/**
* Load history in RAM and VRAM
*/
Minute **loadHistory(int start, int amount) {
int fd = open("../data/bin/full", O_RDONLY);
Minute **minutes;
cudaMallocManaged(&minutes, sizeof(void *) * amount);
int i = -1;
while (1) {
i++;
cudaMallocManaged(&minutes[i], sizeof(Minute));
if (read(fd, minutes[i], sizeof(Minute)) < 1 || i == AMOUNT_TEST) break;
}
return minutes;
}
/**
* Transform every value of a situation to a pourcentage from first value
*/
Minute **SituationToPourcent(int cursor) {
for (int i = 0; i < SIT_SIZE; i++) {
env.srcPourcent[i]->time = env.minutes[cursor + i]->time;
for (int coinIndex = 0; coinIndex < NBR_COIN_CUDA; coinIndex++) {
env.srcPourcent[i]->data[coinIndex].close =
env.minutes[cursor + i]->data[coinIndex].close /
env.minutes[cursor]->data[coinIndex].close * 100;
env.srcPourcent[i]->data[coinIndex].high =
env.minutes[cursor + i]->data[coinIndex].high /
env.minutes[cursor]->data[coinIndex].high * 100;
env.srcPourcent[i]->data[coinIndex].low =
env.minutes[cursor + i]->data[coinIndex].low /
env.minutes[cursor]->data[coinIndex].low * 100;
env.srcPourcent[i]->data[coinIndex].open =
env.minutes[cursor + i]->data[coinIndex].open /
env.minutes[cursor]->data[coinIndex].open * 100;
env.srcPourcent[i]->data[coinIndex].volume =
env.minutes[cursor + i]->data[coinIndex].volume /
env.minutes[cursor + i]->data[coinIndex].volume * 100;
}
}
return env.srcPourcent;
}
/**
* Export situation to external program
*/
void printSituation(int cursor, int coinId) {
dprintf(2, "sit : %lf coinId : %d\n", env.minutes[cursor]->time, coinId);
dprintf(1, "#SIT");
for (int i = 0; i < SIT_SIZE * 2; i++) {
dprintf(2, " %lf", env.minutes[i + cursor]->data[coinId].open);
dprintf(1, " %lf", env.minutes[i + cursor]->data[coinId].open);
}
dprintf(1, "\n");
}
/**
* Compare Given situation with all history
*/
void bakeSituation(int cursor, int baseCoinId) {
// score
int *scores = env.scores;
int baseCursor = cursor;
Minute **pourcent = SituationToPourcent(cursor);
// cursor += SIT_SIZE; // avoiding compare source situation
cursor = 0;
for (int hi = 0; hi < NBR_HIGH_SCORE; hi++) {
env.highScores[hi].score = 99999999;
env.highScores[hi].minuteId = 0;
env.highScores[hi].coinId = 0;
}
for (int bakeIndex = 0; cursor < 870000; bakeIndex++) {
bake<<<NBR_BLOCK, NBR_COIN_CUDA>>>(pourcent, baseCoinId, cursor,
env.minutes, scores);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
for (int i = 0; i < NBR_BLOCK * NBR_COIN_CUDA; i++) {
if (scores[i] != -1) {
int minuteId = i / NBR_COIN;
int coinId = i % NBR_COIN;
if (abs((minuteId + cursor) - baseCursor) < (SIT_SIZE * 5)) {
continue;
}
// dprintf(2,
// "score : %12d coinId: %4d minuteid : %3d test:
// %lf\n", scores[i], coinId, minuteId + cursor,
// env.minutes[minuteId + cursor]->data[coinId].open);
for (int highIndex = 0; highIndex < NBR_HIGH_SCORE;
highIndex++) {
if (scores[i] < env.highScores[highIndex].score) {
env.highScores[highIndex].score = scores[i];
env.highScores[highIndex].minuteId = minuteId + cursor;
env.highScores[highIndex].coinId = coinId;
i += NBR_COIN_CUDA * 50;
break;
}
}
// if (found) {
// break;
// }
// if (scores[i] < 47) {
// dprintf(2, "score : %d coinId : %d\n time :", scores[i],
// coinId);
// printSituation(minuteId + cursor, coinId);
// // getchar();
// break;
// }
}
}
cursor += NBR_BLOCK;
// if (cursor % 100 == 0) {
// // dprintf(2, "cursor : %d\n", cursor);
// // getchar();
// }
// getchar();
}
// dprintf(2, "Done\n");
// getchar();
// clear();
// for (int highIndex = 0; highIndex < NBR_HIGH_SCORE - 1; highIndex++) {
// getchar();
// printSituation(env.highScores[highIndex].minuteId,
// env.highScores[highIndex].coinId);
// }
}
/**
* Return the guessed percentage of change from situation to TIME_GUESS
*/
double makeNextGuess() {
double pred = 0;
for (int highIndex = 0; highIndex < NBR_HIGH_SCORE; highIndex++) {
// env.highScores[highIndex].minuteId + SIT_SIZE;
// env.highScores[highIndex].coinId;
double start =
env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE]
->data[env.highScores[highIndex].coinId]
.open;
double end = env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE +
TIME_GUESS]
->data[env.highScores[highIndex].coinId]
.open;
pred += 100 - (start / end * 100);
}
pred = pred / NBR_HIGH_SCORE;
return pred;
}
/**
* Get real next pourcent of given situation
*/
double getRealNext(int minuteId, int coinId) {
double start = env.minutes[minuteId + SIT_SIZE]->data[coinId].open;
double end =
env.minutes[minuteId + SIT_SIZE + TIME_GUESS]->data[coinId].open;
return 100 - (start / end * 100);
}
// /**
// * do something with the score of a minute
// */
// void onScore() {}
void initMem() {
cudaMallocManaged(&env.srcPourcent, sizeof(void *) * SIT_SIZE);
for (int i = 0; i < SIT_SIZE; i++) {
cudaMallocManaged(&env.srcPourcent[i], sizeof(Minute));
}
cudaMallocManaged(&env.scores, sizeof(int) * NBR_BLOCK * NBR_COIN);
env.guessed = (double *)malloc(sizeof(double) * SIT_SIZE);
}
Situation getRandomSituation() {
Situation res;
int last = 0;
while (1) {
res.cursor = random_number(200000, NBR_MINUTES - 1000);
last = res.cursor;
res.coinId = random_number(0, NBR_COIN_CUDA);
if (env.minutes[res.cursor]->data[res.coinId].open != -1 &&
env.minutes[res.cursor]->data[res.coinId].open > MIN_PRICE) {
return res;
}
usleep(1000);
}
}
void printInfos(Situation sit) {
printf("%d;%d(", sit.coinId, sit.cursor);
for (int i = 20; i < 220; i += 20) {
double start =
env.minutes[sit.cursor + SIT_SIZE]->data[sit.coinId].open;
double end =
env.minutes[sit.cursor + SIT_SIZE + i]->data[sit.coinId].open;
double pred = 100 - (start / end * 100);
printf("%lf;", pred);
}
printf(")-->");
for (int highIndex = 0; highIndex < NBR_HIGH_SCORE; highIndex++) {
// env.highScores[highIndex].minuteId + SIT_SIZE;
// env.highScores[highIndex].coinId;
printf("%d;%d(", env.highScores[highIndex].coinId,
env.highScores[highIndex].minuteId);
for (int i = 20; i < 220; i += 20) {
double start =
env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE]
->data[env.highScores[highIndex].coinId]
.open;
double end =
env.minutes[env.highScores[highIndex].minuteId + SIT_SIZE + i]
->data[env.highScores[highIndex].coinId]
.open;
double pred = 100 - (start / end * 100);
printf("%lf;", pred);
}
printf(")|");
}
printf("\n");
}
int main() {
srand(time(NULL));
env.minutes = loadHistory(0, AMOUNT_TEST);
initMem();
int cur = 0;
double bank = 1000;
while (1) {
// dprintf(2, "ready\n");
// int cursor = 397100 + cur;
// int cursor = random_number(397100, 500000);
// clear();
// printSituation(cursor, COIN_TEST);
// dprintf(2, "READY\n");
Situation sit = getRandomSituation();
bakeSituation(sit.cursor, sit.coinId);
printInfos(sit);
// double pred = makeNextGuess();
// double real = getRealNext(sit.cursor, sit.coinId);
// if (abs(real) > 5) {
// continue;
// }
// printf(
// "Time : %d | Cursor : %8d | CoinId : %4d | Pred : %10lf | Real :
// "
// "%10lf | BANK : %12lf |",
// (int)env.minutes[sit.cursor + SIT_SIZE]->time, sit.cursor,
// sit.coinId, pred, real, bank);
// if (abs(pred) > MIN_POURCENT_GUESS) {
// if (pred * real > 0) {
// bank += abs(real) * AMOUNT_BET * 0.01;
// printf("%sWON %s ", CGREEN, CWHITE);
// bank += -(AMOUNT_BET * 0.002);
// } else {
// printf("%sLOST %s ", CRED, CWHITE);
// bank -= abs(real) * AMOUNT_BET * 0.01;
// bank += -(AMOUNT_BET * 0.002);
// }
// }
// printf("\n");
// fflush(stdout);
// exit(0);
// cur += SIT_SIZE / 2;
}
return 0;
} |
b817014f8011141f2bbda3b639ca5fd57d692d2e.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaLife.cuh"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
__global__ void cudaDecKernel(
hipSurfaceObject_t surfaceIn,
hipSurfaceObject_t surfaceOut,
unsigned int width,
unsigned int height
) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
uchar4 cell;
surf2Dread(&cell, surfaceIn, x * 4, y);
if (cell.x != 0 && cell.x != 0x80)
cell = make_uchar4(cell.x - 1, cell.x - 1, cell.x - 1, 0xff);
surf2Dwrite(cell, surfaceOut, x * 4, y);
}
__global__ void cudaLifeKernel(
hipSurfaceObject_t surfaceIn,
hipSurfaceObject_t surfaceOut,
unsigned int width,
unsigned int height
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
int neighbors = 0;
if (x == 0 || x == width - 1 || y == 0 || y == height - 1) {
for (int yo = -1; yo <= 1; ++yo)
for (int xo = -1; xo <= 1; ++xo)
if (yo != 0 || xo != 0) {
int ix = x + xo;
int iy = y + yo;
if (ix < 0) ix += width;
if (ix >= width) ix -= width;
if (iy < 0) iy += height;
if (iy >= height) iy -= height;
uchar4 data;
surf2Dread(&data, surfaceIn, ix * 4, iy);
if (data.x > 0x7f)
++neighbors;
}
}
else
{
for (int yo = -1; yo <= 1; ++yo)
for (int xo = -1; xo <= 1; ++xo)
if (yo != 0 || xo != 0) {
int ix = x + xo;
int iy = y + yo;
uchar4 data;
surf2Dread(&data, surfaceIn, ix * 4, iy);
if (data.x > 0x7f)
++neighbors;
}
}
uchar4 cell;
surf2Dread(&cell, surfaceIn, x * 4, y);
uchar4 data;
if (neighbors == 2)
data = make_uchar4(cell.x, cell.x, cell.x, 0xff);
else if (neighbors == 3)
if (cell.x > 0x7f)
data = make_uchar4(cell.x, cell.x, cell.x, 0xff);
else
data = make_uchar4(0xff, 0xff, 0xff, 0xff);
else
if (cell.x > 0x7f)
data = make_uchar4(0x7f, 0x7f, 0x7f, 0xff);
else
data = make_uchar4(cell.x, cell.x, cell.x, 0xff);
surf2Dwrite(data, surfaceOut, x * 4, y);
}
extern "C" void doDecKernel(hipSurfaceObject_t surfaceIn, hipSurfaceObject_t surfaceOut, unsigned int width, unsigned int height) {
dim3 threads(32, 32);
dim3 blocks(width / threads.x, height / threads.y);
if (blocks.x * threads.x < width) ++blocks.x;
if (blocks.y * threads.y < height) ++blocks.y;
cudaDecKernel << <blocks, threads >> > (surfaceIn, surfaceOut, width, height);
}
extern "C" void doLifeKernel(hipSurfaceObject_t surfaceIn, hipSurfaceObject_t surfaceOut, unsigned int width, unsigned int height) {
dim3 threads(32, 32);
dim3 blocks(width / threads.x, height / threads.y);
if (blocks.x * threads.x < width) ++blocks.x;
if (blocks.y * threads.y < height) ++blocks.y;
cudaLifeKernel << <blocks, threads >> > (surfaceIn, surfaceOut, width, height);
} | b817014f8011141f2bbda3b639ca5fd57d692d2e.cu | #include "CudaLife.cuh"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__global__ void cudaDecKernel(
cudaSurfaceObject_t surfaceIn,
cudaSurfaceObject_t surfaceOut,
unsigned int width,
unsigned int height
) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
uchar4 cell;
surf2Dread(&cell, surfaceIn, x * 4, y);
if (cell.x != 0 && cell.x != 0x80)
cell = make_uchar4(cell.x - 1, cell.x - 1, cell.x - 1, 0xff);
surf2Dwrite(cell, surfaceOut, x * 4, y);
}
__global__ void cudaLifeKernel(
cudaSurfaceObject_t surfaceIn,
cudaSurfaceObject_t surfaceOut,
unsigned int width,
unsigned int height
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
int neighbors = 0;
if (x == 0 || x == width - 1 || y == 0 || y == height - 1) {
for (int yo = -1; yo <= 1; ++yo)
for (int xo = -1; xo <= 1; ++xo)
if (yo != 0 || xo != 0) {
int ix = x + xo;
int iy = y + yo;
if (ix < 0) ix += width;
if (ix >= width) ix -= width;
if (iy < 0) iy += height;
if (iy >= height) iy -= height;
uchar4 data;
surf2Dread(&data, surfaceIn, ix * 4, iy);
if (data.x > 0x7f)
++neighbors;
}
}
else
{
for (int yo = -1; yo <= 1; ++yo)
for (int xo = -1; xo <= 1; ++xo)
if (yo != 0 || xo != 0) {
int ix = x + xo;
int iy = y + yo;
uchar4 data;
surf2Dread(&data, surfaceIn, ix * 4, iy);
if (data.x > 0x7f)
++neighbors;
}
}
uchar4 cell;
surf2Dread(&cell, surfaceIn, x * 4, y);
uchar4 data;
if (neighbors == 2)
data = make_uchar4(cell.x, cell.x, cell.x, 0xff);
else if (neighbors == 3)
if (cell.x > 0x7f)
data = make_uchar4(cell.x, cell.x, cell.x, 0xff);
else
data = make_uchar4(0xff, 0xff, 0xff, 0xff);
else
if (cell.x > 0x7f)
data = make_uchar4(0x7f, 0x7f, 0x7f, 0xff);
else
data = make_uchar4(cell.x, cell.x, cell.x, 0xff);
surf2Dwrite(data, surfaceOut, x * 4, y);
}
extern "C" void doDecKernel(cudaSurfaceObject_t surfaceIn, cudaSurfaceObject_t surfaceOut, unsigned int width, unsigned int height) {
dim3 threads(32, 32);
dim3 blocks(width / threads.x, height / threads.y);
if (blocks.x * threads.x < width) ++blocks.x;
if (blocks.y * threads.y < height) ++blocks.y;
cudaDecKernel << <blocks, threads >> > (surfaceIn, surfaceOut, width, height);
}
extern "C" void doLifeKernel(cudaSurfaceObject_t surfaceIn, cudaSurfaceObject_t surfaceOut, unsigned int width, unsigned int height) {
dim3 threads(32, 32);
dim3 blocks(width / threads.x, height / threads.y);
if (blocks.x * threads.x < width) ++blocks.x;
if (blocks.y * threads.y < height) ++blocks.y;
cudaLifeKernel << <blocks, threads >> > (surfaceIn, surfaceOut, width, height);
} |
3a53257c957b6ae69ab08f3c3004a64fbd27b906.hip | // !!! This is a file automatically generated by hipify!!!
/****************************************************************************
* Roy Wong
* Dan Rolfe
* Keri Anderson
*
* CS6235 CUDA Final Project
* Due April 2014
*
*
* To Run: ./connectivity
* To Compile: use "make" command (use makefile provided)
* Use gradlabX.eng.utah.edu, where X can be 1, 2, 3, 4, .... 13
* (not all extensions have CUDA installed)
*
*
*
* EMACS notes:
* To get syntax highlighting in Emacs: alt-x c-mode
* To get line numbers: alt-x global-linum-mode
*
* Needed files:
*
*
* PROGRAM DESCRIPTION:
* This program reads in NIFTI data, "cleans" the noise from it, and
* calculates how "connected" each point in the brain is to every other
* point in the brain.
*
* Steps:
* 1) Query Cuda Device and read in NIFTI files
* 2) Run and Time covariate inverse calculation: Sequential and Parallel
* 3) Run and Time nifti Data tranpose into point data: Sequential and Parallel
* 4) Run and Time data cleaning: Sequential and Parallel
* 5) Optional: write cleaned data to disc
* 6) Run and Time data normalization: Sequential and Parallel
* 7) Run and Time data connectivity for Seed: Sequential and Parallel - 'seed' = one point in the brain
* 8) Pring final runtime statistics
*
***************************************************************************/
/*******************************************
* TODO:
* *)
* *)
* *)
* *)
* *)
*
*
*
*
*******************************************/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <string.h>
#include "GPUDeviceQuery.h"
#include "nifti1_Read_Write.h"
//number of times to run the code for runtime stats
#define TIMESTORUN 20
//for testing **** THESE NUMBERS CAN CHANGE FOR TESTING
#define TEST 0 //1 for testing, 0 to turn it off
#define TESTX 2 // dim X
#define TESTY 2 // dim Y
#define TESTZ 2 // dim Z
#define TESTNUMFILES 4 //number of scans taken
#define TESTNUMCOVARIATES 3 //number of covariates to use
#define TESTSEED 4 // Seed is the point in the brain that will be used to calulate connectivity
// with all other points
//FIXED DATA **** DO NOT CHANGE
#define NUMCOVARIATES 15 //changed from 27 - we are now using covariates 0..11, 24, 25, 26
#define NIFTIDIMX 91 //the x, y, z dimensions from the NIFTI files
#define NIFTIDIMY 109
#define NIFTIDIMZ 91
#define NIFTIBASESTRING "nifti1Data/rfMRI_REST1_LR_"
#define NIFTIEXTENSION ".nii"
#define COVARIATESFILE "nifti1Data/covariates.txt"
#define SEED 4 // Seed is the point in the brain that will be used to calulate connectivity
// with all other points
#define ERRORMARGIN .01 //margin of error when comparing result
//this function queries the CUDA device and returns info
extern "C" GPUDetails* queryCUDADevice(int verbose);
//these function reads in the NIFTI files or writes to new NIFTI files
extern "C" int read_nifti_file(char* data_file, float* dataArray, int verbose);
extern "C" int write_nifti_file(char* hdr_file, char* data_file, int berbose);
//Sequential functions - in fMRI_Sequential.c
//for whatever reason, "C" IS required in this file even though it is not used in the fMRI_Sequential.h and .c files
extern "C" int covariatesTranspose(float* covariates, float* covTranspose, int numCovariates, int numFiles);
extern "C" int covariatesTransCovariatesSeq(float* covariates, float* covTranspose, float* covTransXCov, int numCovariates, int numFiles);
extern "C" int covariatesInverse(float* matrixA, float* matrixAInverse, int dim);
extern "C" int transposeNiftiDataSeq(float* originalMatrix, float* transposedMatrix, int iDim, int jDim);
extern "C" int cleanSeq(float* pointData, float* cleanedData, float* covTranspose, float* matrixInverse, int numCovariates, int numFiles, int niftiVolume);
extern "C" int normalizeDataSeq(float* cleanedData, float* normalizedData, int numFiles, int niftiVolume);
extern "C" int connectivitySeq(int seed, float* normalizedData, float* connectivityData, int numFiles, int niftiVolume);
//CUDA Parallel functions - in fMRI_Covariate.cu (Keri)
extern "C" int covariatesTransCovariatesPar(float* covariates, float* covTranspose, float* covTransXCov, int numCovariates, int numFiles, float* runTime);
//CUDA Parallel functions - in fMRI_Clean.cu (Roy)
extern "C" int transposeNiftiDataPar(float* originalMatrix, float* transposedMatrix, int iDim, int jDim, float* runTime);
extern "C" int cleanPar(float* pointData, float* cleanedData, float* covTranspose, float* matrixInverse, int numCovariates, int numFiles, int niftiVolume, float* runTime);
//CUDA Parallel functions - in fMRI_Connectivity.cu (Dan)
extern "C" int normalizeDataPar(float* cleanedData, float* normalizedData, int numFiles, int niftiVolume, float* runTime);
extern "C" int connectivityPar(int seed, float* normalizedData, float* connectivityData, int numFiles, int niftiVolume, float* runTime);
/*
* Error Checking Macro - used to check errors in runtime API code
*
* From stackoverflow.com: The best way to check for errors in
* runtime API code is to define an assert style handler function and wrapper macro.
* Each API call can be wrapped with the gpuErrorchceck macro, which will process
* the return status of the API call it wraps. If there is an error in a call, a
* textual message describing the error and the file and line in your code where the
* error occurred will be emitted to stderr and the application will exit. You could
* conceivably modify gpuAssert to raise an exception rather than call exit() in a
* more sophisticated application if it were required.
*
* A second related question is how to check for errors in kernel launches, which
* can't be directly wrapped in a macro call like standard runtime API calls. For
* kernels, something like this:
*
* kernel<<<1,1>>>(a);
* gpuErrorcheck( hipPeekAtLastError() );
* gpuErrorcheck( hipDeviceSynchronize() );
*
* will firstly check for invalid launch argument, then force the host to wait
* until the kernel stops and checks for an execution error. The synchronisation
* can be eliminated if you have a subsequent blocking API call like this:
*
* kernel<<<1,1>>>(a_d);
* gpuErrorcheck( hipPeekAtLastError() );
* gpuErrorcheck( hipMemcpy(a_h, a_d, size * sizeof(int), hipMemcpyDeviceToHost) );
*
* in which case the hipMemcpy call can return either errors which occurred during
* the kernel execution or those from the memory copy itself. This can be confusing for
* the beginner, and I would recommend using explicit synchronisation after a kernel
* launch during debugging to make it easier to understand where problems might be arising.
*/
//wrap each API call with the gpuErrorCheck macro
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__);}
inline void gpuAssert(hipError_t code, char* file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}//end error checking macro
//pre-declare functions
void getNiftiFileName(int i, char* nameString);
int readCovariateFile(float* covariates, int numFiles);
int readNiftiFiles(float* niftiFiles, int numFiles, int fileVolume, int begFileNumber);
int writeNiftiFiles();
void checkInverse(float* covTransXCov, float* matrixInverse, int numCovariates);
void compareTransXCovariates(float* covTransXCovSeq, float* covTransXCovPar, int numCovariates);
void comparePointData(float* pointDataSeq, float* pointDataPar, int numFiles, int niftiVolume);
void compareCleanData(float* cleanedDataSeq, float* cleanedDataPar, int numFiles, int niftiVolume);
void compareNormalizedData(float* normalizedDataSeq, float* normalizedDataPar, int numFiles, int niftiVolume);
void compareConnectivityData(float* connectivityDataSeq, float* connectivityDataPar, int niftiVolume, int seed);
/************************************************************************
* *
* MAIN *
* *
************************************************************************/
int main(int argc, char **argv)
{
int niftiVolume = 0;
int begFileNumber = 0;
int endFileNumber = 0;
int numFiles = 0;
int numCovariates = 0;
int dimX = 0;
int dimY = 0;
int dimZ = 0;
int seed = 0;
if (TEST){
numFiles = TESTNUMFILES;
dimX = TESTX;
dimY = TESTY;
dimZ = TESTZ;
niftiVolume = dimX * dimY *dimZ;
numCovariates = TESTNUMCOVARIATES;
seed = TESTSEED;
}else{
begFileNumber = 11;
endFileNumber = 1200;
numFiles = endFileNumber - begFileNumber + 1;
dimX = NIFTIDIMX;
dimY = NIFTIDIMY;
dimZ = NIFTIDIMZ;
niftiVolume = dimX*dimY*dimZ;
numCovariates = NUMCOVARIATES;
seed = SEED;
}//end if TEST
//data structures for holding timing stats
float seqTimeInverse[TIMESTORUN + 1];
float seqTimePointData[TIMESTORUN + 1];
float seqTimeClean[TIMESTORUN + 1];
float seqTimeNormalize[TIMESTORUN + 1];
float seqTimeConnectivity[TIMESTORUN + 1];
float seqTimeTotal[TIMESTORUN + 1];
//these timings do not count the copy time from CPU to GPU and back
float parTimeInverseNoCopy[TIMESTORUN + 1];
float parTimePointDataNoCopy[TIMESTORUN + 1];
float parTimeCleanNoCopy[TIMESTORUN + 1];
float parTimeNormalizeNoCopy[TIMESTORUN + 1];
float parTimeConnectivityNoCopy[TIMESTORUN + 1];
float parTimeTotalNoCopy[TIMESTORUN + 1];
//Begin TIMES TO RUN
for (int runTime = 0; runTime < TIMESTORUN; runTime++){
printf("STARTING RUN #%d OUT OF %d\n\n", runTime+1, TIMESTORUN);
printf("\n\n#########################################################################################################\n");
printf("# STEP 1: QUERY CUDA DEVICE AND READ IN NIFTI FILES #\n");
printf("#########################################################################################################\n");
GPUDetails* gpuDets = queryCUDADevice(1); //1 = verbose/print
float* covariates = (float*) malloc ( (numFiles * numCovariates ) * sizeof (float*) );
float* niftiData = (float*) malloc ( (numFiles * niftiVolume ) * sizeof (float*) );
if (TEST){
printf(" *** RUNNING WITH TEST DATA: ***\n");
printf(" niftiVolume = %dx%dx%d = %d, numTimeFiles = %d, numCovariates = %d\n\n", dimX, dimY, dimZ, niftiVolume, numFiles, numCovariates);
//create "dummy" covariate data
for (int i = 0; i < numFiles * TESTNUMCOVARIATES; i++ ){
covariates[i]= (rand()%10 * 1.0); // numbers 0...9
}//end covariate data
//create "dummy nifti data
for (int i = 0; i < numFiles * niftiVolume; i++ ){
niftiData[i] = (rand()%10 * 1.0); // numbers 0...9
}//end niftidata
}
else{
printf(" *** RUNNING WITH NIFTI DATA: ***\n");
printf(" niftiVolume = %dx%dx%d = %d, numTimeFiles = %d, numCovariates = %d\n\n", dimX, dimY, dimZ, niftiVolume, numFiles, numCovariates);
//read in Covariate File:
// This specific covariate file has '27' covariates. There will be a set of
// covariates for each time t: so 27 elements for each of the 1200 - 11 + 1 = 1190 files.
// Matrix has 1190 * 27 = 32130 elements 1190 x 27 matrix
// UPDATE: we will only use covariates 0..11, 24, 25, 26 - so 15 covariates
int errorCov = readCovariateFile(covariates, numFiles);
if (errorCov){exit(1);}
//read NIFTI files
// There are 1190 files, each with 91 * 109 * 91 elements. This will be stored in
// one long array for ease in passing to the GPU
int errorNifti = readNiftiFiles(niftiData, numFiles, niftiVolume, begFileNumber);
if (errorNifti){exit(1);}
}//end if TEST
printf("\n#########################################################################################################\n");
printf("# STEP 2: RUN AND TIME COVARIATE INVERSE CALCULATION: SEQUENTIAL AND PARALLEL #\n");
printf("#########################################################################################################\n");
// code setup - get the covariates transpose matrix
float* covTranspose = (float*) malloc ( numFiles * numCovariates * sizeof(float) ); //holds Xtrans
int errorTranspose = covariatesTranspose(covariates, covTranspose, numCovariates, numFiles);
if (errorTranspose){exit(1);}
/* SEQUENTIAL CODE - only times covariatesTranspose * covarites */
float* covTransXCovSeq = (float*) malloc ( numCovariates * numCovariates * sizeof(float) ); //holds (Xtrans * X)
printf("\n ...RUNNING COVARIATES TRANSPOSE X COVARIATES SEQUENTIAL...\n");
hipEvent_t seq_start_event, seq_stop_event; //begin timing sequential
gpuErrorCheck(hipEventCreate(&seq_start_event));
gpuErrorCheck(hipEventCreate(&seq_stop_event));
gpuErrorCheck(hipEventRecord(seq_start_event, 0));
int errorSeqCovTxCov = covariatesTransCovariatesSeq(covariates, covTranspose, covTransXCovSeq, numCovariates, numFiles);
if (errorSeqCovTxCov){exit(1);}
gpuErrorCheck(hipEventRecord(seq_stop_event, 0));
gpuErrorCheck(hipEventSynchronize(seq_stop_event));
float seq_time_inverse = 0;
gpuErrorCheck(hipEventElapsedTime(&seq_time_inverse, seq_start_event, seq_stop_event));
seq_time_inverse /= 1.0e3f;
seqTimeInverse[runTime] = seq_time_inverse;
/* CUDA CODE - only times covariatesTranspose * covariates */
float* covTransXCovPar = (float*) malloc ( numCovariates * numCovariates * sizeof(float) ); //holds (Xtrans * X)
printf("\n ...RUNNING COVARIATES TRANSPOSE X COVARIATES PARALLEL ...\n");
int errorParCovTxCov = covariatesTransCovariatesPar(covariates, covTranspose, covTransXCovPar, numCovariates, numFiles, &parTimeInverseNoCopy[runTime]);
if (errorParCovTxCov){exit(1);}
//more set up - calculate the inverse
float* matrixInverse = (float*) malloc ( numCovariates * numCovariates * sizeof(float) ); //holds (Xtrans * X)inverse
int errorInverse = covariatesInverse(covTransXCovSeq, matrixInverse, numCovariates);
if (errorInverse){exit(1);}
checkInverse(covTransXCovSeq, matrixInverse, numCovariates);
/* RESULTS */
printf("\n **** RESULTS COVARIATES INVERSE: ****\n\n");
//compare results
compareTransXCovariates(covTransXCovSeq, covTransXCovPar, numCovariates);
printf("\n SEQ COVARIATES INVERSE RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n",
seq_time_inverse, 1.0e-3f * (numFiles*numCovariates) / seq_time_inverse, (numFiles*numCovariates) );
printf(" PAR COVARIATES INVERSE RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n\n",
parTimeInverseNoCopy[runTime], 1.0e-3f * (numFiles*numCovariates) / parTimeInverseNoCopy[runTime], (numFiles*numCovariates) );
//speedup
printf("\n **** SPEEDUP COVARIATES INVERSE compared to Sequential: %2f ****\n\n", seq_time_inverse/parTimeInverseNoCopy[runTime]);
//free un-needed data structures
free(covariates);
free(covTransXCovSeq);
free(covTransXCovPar);
printf("\n#########################################################################################################\n");
printf("# STEP 3: RUN AND TIME NIFTI DATA TRANSPOSE INTO POINT DATA: SEQUENTIAL AND PARALLELL #\n");
printf("#########################################################################################################\n");
/* SEQUENTIAL CODE */
float* pointDataSeq = (float*) malloc ( numFiles * niftiVolume * sizeof(float) );
printf("\n ...RUNNING POINT DATA SEQUENTIAL...\n");
gpuErrorCheck(hipEventCreate(&seq_start_event));
gpuErrorCheck(hipEventCreate(&seq_stop_event));
gpuErrorCheck(hipEventRecord(seq_start_event, 0));
int errorSeqPointData = transposeNiftiDataSeq(niftiData, pointDataSeq, numFiles, niftiVolume);
if (errorSeqPointData){exit(1);}
gpuErrorCheck(hipEventRecord(seq_stop_event, 0));
gpuErrorCheck(hipEventSynchronize(seq_stop_event));
float seq_time_pointData = 0;
gpuErrorCheck(hipEventElapsedTime(&seq_time_pointData, seq_start_event, seq_stop_event));
seq_time_pointData /= 1.0e3f;
seqTimePointData[runTime] = seq_time_pointData;
/* CUDA CODE */
float* pointDataPar = (float*) malloc ( numFiles * niftiVolume * sizeof(float) );
printf("\n ...RUNNING POINT DATA PARALLEL ...\n");
int errorParPointData = transposeNiftiDataPar(niftiData, pointDataPar, numFiles, niftiVolume, &parTimePointDataNoCopy[runTime]);
if (errorParPointData){exit(1);}
/* RESULTS */
printf("\n **** RESULTS POINT DATA: ****\n\n");
//compare results
comparePointData(pointDataSeq, pointDataPar, numCovariates, numFiles);
printf("\n SEQ POINT DATA RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n",
seq_time_pointData, 1.0e-3f * (numFiles*niftiVolume) / seq_time_pointData, (numFiles*niftiVolume) );
printf(" PAR POINT DATA RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n\n",
parTimePointDataNoCopy[runTime], 1.0e-3f * (numFiles*niftiVolume) / parTimePointDataNoCopy[runTime], (numFiles*niftiVolume) );
//speedup
printf("\n **** SPEEDUP POINT DATA compared to Sequential: %2f ****\n\n", seq_time_pointData/parTimePointDataNoCopy[runTime]);
//free un-needed data structures
free(niftiData);
//free(pointDataSeq); change this!!!!
free(pointDataPar);
printf("\n#########################################################################################################\n");
printf("# STEP 4: RUN AND TIME DATA CLEANING: SEQUENTIAL AND PARALLEL #\n");
printf("#########################################################################################################\n");
/* SEQUENTIAL CODE */
float* cleanedDataSeq = (float*) malloc ( numFiles * niftiVolume * sizeof(float) ); //holds cleaned data values
printf("\n ...RUNNING CLEAN DATA SEQUENTIAL...\n");
gpuErrorCheck(hipEventCreate(&seq_start_event));
gpuErrorCheck(hipEventCreate(&seq_stop_event));
gpuErrorCheck(hipEventRecord(seq_start_event, 0));
int errorSeqClean = cleanSeq(pointDataSeq, cleanedDataSeq, covTranspose, matrixInverse, numCovariates, numFiles, niftiVolume);
if (errorSeqClean){exit(1);}
gpuErrorCheck(hipEventRecord(seq_stop_event, 0));
gpuErrorCheck(hipEventSynchronize(seq_stop_event));
float seq_time_clean = 0;
gpuErrorCheck(hipEventElapsedTime(&seq_time_clean, seq_start_event, seq_stop_event));
seq_time_clean /= 1.0e3f;
seqTimeClean[runTime] = seq_time_clean;
/* CUDA CODE */
float* cleanedDataPar = (float*) malloc ( numFiles * niftiVolume * sizeof(float) ); //holds cleaned data values
printf("\n ...RUNNING CLEAN DATA PARALLEL ...\n");
int errorParClean = cleanPar(pointDataSeq, cleanedDataPar, covTranspose, matrixInverse, numCovariates, numFiles, niftiVolume, &parTimeCleanNoCopy[runTime]);
if (errorParClean){exit(1);}
/* RESULTS */
printf("\n **** RESULTS CLEAN DATA: ****\n\n");
//compare results
compareCleanData(cleanedDataSeq, cleanedDataPar, numFiles, niftiVolume);
printf("\n SEQ CLEAN RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n",
seq_time_clean, 1.0e-3f * (numFiles*niftiVolume) / seq_time_clean, (numFiles*niftiVolume) );
printf(" PAR CLEAN RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n\n",
parTimeCleanNoCopy[runTime], 1.0e-3f * (numFiles*niftiVolume) / parTimeCleanNoCopy[runTime], (numFiles*niftiVolume) );
//speedup
printf("\n **** SPEEDUP CLEAN DATA compared to Sequential: %2f ****\n\n", seq_time_clean/parTimeCleanNoCopy[runTime]);
//free un-needed data structures
//free(cleanedDataSeq);
free(cleanedDataPar);
free(pointDataSeq);
free(covTranspose);
free(matrixInverse);
//#########################################################################################################
//# STEP 5: OPTIONAL: WRITE CLEANED DATA TO DISC #
//#########################################################################################################
//first need to transpose back to NIFTI order???
if (!TEST){ //skip this step if testing
printf("\n#########################################################################################################\n");
printf("# STEP 5: WRITE CLEANED DATA TO DISC #\n");
printf("#########################################################################################################\n\n");
int errorWrite = writeNiftiFiles();
if (errorWrite){exit(1);}
printf(" ...finished writing to clean NIFTI files...\n");
}else{//running test data
printf("\n#########################################################################################################\n");
printf("# STEP 5: STEP 5 SKIPPED - USING TEST DATA #\n");
printf("#########################################################################################################\n\n");
}
printf("\n#########################################################################################################\n");
printf("# STEP 6: RUN AND TIME DATA NORMALIZATION: SEQUENTIAL AND PARALLEL #\n");
printf("#########################################################################################################\n");
/* SEQUENTIAL CODE */
float* normalizedDataSeq = (float*) malloc ( numFiles * niftiVolume * sizeof(float) ); //holds normalized values
float* normalizedDataPar = (float*) malloc ( numFiles * niftiVolume * sizeof(float) ); //holds normalized values
printf("\n ...RUNNING DATA NORMALIZATION SEQUENTIAL...\n");
gpuErrorCheck(hipEventCreate(&seq_start_event));
gpuErrorCheck(hipEventCreate(&seq_stop_event));
gpuErrorCheck(hipEventRecord(seq_start_event, 0));
int errorNormalizeSeq = normalizeDataSeq(cleanedDataSeq, normalizedDataSeq, numFiles, niftiVolume);
if (errorNormalizeSeq){exit(1);}
gpuErrorCheck(hipEventRecord(seq_stop_event, 0));
gpuErrorCheck(hipEventSynchronize(seq_stop_event));
float seq_time_normalize = 0;
gpuErrorCheck(hipEventElapsedTime(&seq_time_normalize, seq_start_event, seq_stop_event));
seq_time_normalize /= 1.0e3f;
seqTimeNormalize[runTime] = seq_time_normalize;
/* CUDA CODE */
printf("\n ...RUNNING DATA NORMALIZATION PARALLEL ...\n");
int errorNormalizePar = normalizeDataPar(cleanedDataSeq, normalizedDataPar, numFiles, niftiVolume, &parTimeNormalizeNoCopy[runTime]);
if (errorNormalizePar){exit(1);}
/* RESULTS */
printf("\n **** RESULTS DATA NORMALIZATION: ****\n\n");
//compare results
compareNormalizedData(normalizedDataSeq, normalizedDataPar, numFiles, niftiVolume);
printf("\n SEQ NORMALIZE RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n",
seq_time_normalize, 1.0e-3f * (numFiles*niftiVolume) / seq_time_normalize, (numFiles*niftiVolume) );
printf(" PAR NORMALIZE RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n\n",
parTimeNormalizeNoCopy[runTime], 1.0e-3f * (numFiles*niftiVolume) / parTimeNormalizeNoCopy[runTime], (numFiles*niftiVolume) );
//speedup
printf("\n **** SPEEDUP NORMALIZE compared to Sequential: %2f ****\n\n", seq_time_normalize/parTimeNormalizeNoCopy[runTime]);
//free un-needed data structures
free(cleanedDataSeq);
//free(normalizedDataSeq);
free(normalizedDataPar);
printf("\n#########################################################################################################\n");
printf("# STEP 7: RUN AND TIME DATA CONNECTIVITY FOR SEED: SEQUENTIAL AND PARALLEL #\n");
printf("#########################################################################################################\n");
/* SEQUENTIAL CODE */
float* connectivityDataSeq = (float*) malloc ( niftiVolume * sizeof(float) ); //holds normalized values
float* connectivityDataPar = (float*) malloc ( niftiVolume * sizeof(float) ); //holds normalized values
printf("\n ...RUNNING CONNECTIVITY SEQUENTIAL FOR SEED = %d...\n", seed);
gpuErrorCheck(hipEventCreate(&seq_start_event));
gpuErrorCheck(hipEventCreate(&seq_stop_event));
gpuErrorCheck(hipEventRecord(seq_start_event, 0));
int errorConnectivitySeq = connectivitySeq(seed, normalizedDataSeq, connectivityDataSeq, numFiles, niftiVolume);
if (errorConnectivitySeq){exit(1);}
gpuErrorCheck(hipEventRecord(seq_stop_event, 0));
gpuErrorCheck(hipEventSynchronize(seq_stop_event));
float seq_time_connectivity = 0;
gpuErrorCheck(hipEventElapsedTime(&seq_time_connectivity, seq_start_event, seq_stop_event));
seq_time_connectivity /= 1.0e3f;
seqTimeConnectivity[runTime] = seq_time_connectivity;
/* CUDA CODE */
printf("\n ...RUNNING CONNECTIVITY PARALLEL FOR SEED = %d...\n", seed);
int errorConnectivityPar = connectivityPar(seed, normalizedDataSeq, connectivityDataPar, numFiles, niftiVolume, &parTimeConnectivityNoCopy[runTime]);
if (errorConnectivityPar){exit(1);}
/* RESULTS */
printf("\n **** RESULTS CONNECTIVITY: ****\n\n");
//compare results
compareConnectivityData(connectivityDataSeq, connectivityDataPar, niftiVolume, seed);
printf("\n SEQ CONNECTIVITY RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n",
seq_time_connectivity, 1.0e-3f * (niftiVolume) / seq_time_connectivity, (niftiVolume) );
printf(" PAR CONNECTIVITY RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n\n",
parTimeConnectivityNoCopy[runTime], 1.0e-3f * (niftiVolume) / parTimeConnectivityNoCopy[runTime], (niftiVolume) );
//speedup
printf("\n **** SPEEDUP CONNECTIVITY compared to Sequential: %2f ****\n\n", seq_time_connectivity/parTimeConnectivityNoCopy[runTime]);
//free un-needed data structures
free(normalizedDataSeq);
free(connectivityDataSeq);
free(connectivityDataPar);
printf("\n#########################################################################################################\n");
printf("# STEP 8: PRINT FINAL RUNTIME STATISTICS #\n");
printf("#########################################################################################################\n");
float totalTimeSeq = seq_time_inverse + seq_time_pointData + seq_time_clean + seq_time_normalize + seq_time_connectivity;
float totalTimePar = parTimeInverseNoCopy[runTime] + parTimePointDataNoCopy[runTime] + parTimeCleanNoCopy[runTime] + parTimeNormalizeNoCopy[runTime] + parTimeConnectivityNoCopy[runTime];
seqTimeTotal[runTime] = totalTimeSeq;
parTimeTotalNoCopy[runTime] = totalTimePar;
printf("\n *** FINAL SPEEDUP FOR RUN %d out of %d (compared to Sequential): %4.4f ***\n\n\n\n", runTime, TIMESTORUN, totalTimeSeq/totalTimePar);
}//end times to run
//print out final stats
printf("\n\n\n\n");
printf("\n#########################################################################################################\n");
printf("# FINAL RUNTIME STATISTICS #\n");
printf("#########################################################################################################\n");
printf("INVERSE STATS\n");
seqTimeInverse[TIMESTORUN] = 0.0; //for averages
parTimeInverseNoCopy[TIMESTORUN] = 0.0;
for (int runTime = 0; runTime < TIMESTORUN; runTime++){
float seqTime = seqTimeInverse[runTime];
float parTime = parTimeInverseNoCopy[runTime];
seqTimeInverse[TIMESTORUN] += seqTime;
parTimeInverseNoCopy[TIMESTORUN] += parTime;
printf(" Run: %d: Inverse Seq: %.5f Inverse Par: %.5f Speedup: %.5f\n", runTime, seqTime, parTime, seqTime/parTime);
}//end runTime
printf("\n\nPOINT DATA STATS\n");
seqTimePointData[TIMESTORUN] = 0.0; //for averages
parTimePointDataNoCopy[TIMESTORUN] = 0.0;
for (int runTime = 0; runTime < TIMESTORUN; runTime++){
float seqTime = seqTimePointData[runTime];
float parTime = parTimePointDataNoCopy[runTime];
seqTimePointData[TIMESTORUN] += seqTime;
parTimePointDataNoCopy[TIMESTORUN] += parTime;
printf(" Run: %d: PointData Seq: %.5f PointData Par: %.5f Speedup: %.5f\n", runTime, seqTime, parTime, seqTime/parTime);
}//end runTime
printf("\n\nCLEAN STATS\n");
seqTimeClean[TIMESTORUN] = 0.0; //for averages
parTimeCleanNoCopy[TIMESTORUN] = 0.0;
for (int runTime = 0; runTime < TIMESTORUN; runTime++){
float seqTime = seqTimeClean[runTime];
float parTime = parTimeCleanNoCopy[runTime];
seqTimeClean[TIMESTORUN] += seqTime;
parTimeCleanNoCopy[TIMESTORUN] += parTime;
printf(" Run: %d: Clean Seq: %.5f Clean Par: %.5f Speedup: %.5f\n", runTime, seqTime, parTime, seqTime/parTime);
}//end runTime
printf("\n\nNORMALIZE STATS\n");
seqTimeNormalize[TIMESTORUN] = 0.0; //for averages
parTimeNormalizeNoCopy[TIMESTORUN] = 0.0;
for (int runTime = 0; runTime < TIMESTORUN; runTime++){
float seqTime = seqTimeNormalize[runTime];
float parTime = parTimeNormalizeNoCopy[runTime];
seqTimeNormalize[TIMESTORUN] += seqTime;
parTimeNormalizeNoCopy[TIMESTORUN] += parTime;
printf(" Run: %d: Normalize Seq: %.5f Normalize Par: %.5f Speedup: %.5f\n", runTime, seqTime, parTime, seqTime/parTime);
}//end runTime
printf("\n\nCONNECTIVITY STATS\n");
seqTimeConnectivity[TIMESTORUN] = 0.0; //for averages
parTimeConnectivityNoCopy[TIMESTORUN] = 0.0;
for (int runTime = 0; runTime < TIMESTORUN; runTime++){
float seqTime = seqTimeConnectivity[runTime];
float parTime = parTimeConnectivityNoCopy[runTime];
seqTimeConnectivity[TIMESTORUN] += seqTime;
parTimeConnectivityNoCopy[TIMESTORUN] += parTime;
printf(" Run: %d: Connectivity Seq: %.5f Connectivity Par: %.5f Speedup: %.5f\n", runTime, seqTime, parTime, seqTime/parTime);
}//end runTime
printf("\n\nTOTAL TIME STATS\n");
seqTimeTotal[TIMESTORUN] = 0.0; //for averages
parTimeTotalNoCopy[TIMESTORUN] = 0.0;
for (int runTime = 0; runTime < TIMESTORUN; runTime++){
float seqTime = seqTimeTotal[runTime];
float parTime = parTimeTotalNoCopy[runTime];
seqTimeTotal[TIMESTORUN] += seqTime;
parTimeTotalNoCopy[TIMESTORUN] += parTime;
printf(" Run: %d: Total Time Seq: %.5f Total Time Par: %.5f Speedup: %.5f\n", runTime, seqTime, parTime, seqTime/parTime);
}//end runTime
printf("***** AVERAGES *****\n\n");
float aveSeqInverse = seqTimeInverse[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveParInverse = parTimeInverseNoCopy[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveInvSpeedup = aveSeqInverse/aveParInverse;
printf("INVERSE AVERAGES: Ave Seq Time: %.5f, Ave Par Time: %.5f, Ave Speedup: %.5f\n", aveSeqInverse, aveParInverse, aveInvSpeedup);
float aveSeqPointData = seqTimePointData[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveParPointData = parTimePointDataNoCopy[TIMESTORUN]/ (TIMESTORUN*1.0);
float avePDSpeedup = aveSeqPointData/aveParPointData;
printf("POINT DATA AVERAGES: Ave Seq Time: %.5f, Ave Par Time: %.5f, Ave Speedup: %.5f\n", aveSeqPointData, aveParPointData, avePDSpeedup);
float aveSeqClean = seqTimeClean[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveParClean = parTimeCleanNoCopy[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveCleanSpeedup = aveSeqClean/aveParClean;
printf("CLEAN AVERAGES: Ave Seq Time: %.5f, Ave Par Time: %.5f, Ave Speedup: %.5f\n", aveSeqClean, aveParClean, aveCleanSpeedup);
float aveSeqNorm = seqTimeNormalize[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveParNorm = parTimeNormalizeNoCopy[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveNormSpeedup = aveSeqNorm/aveParNorm;
printf("NORMALIZE AVERAGES: Ave Seq Time: %.5f, Ave Par Time: %.5f, Ave Speedup: %.5f\n", aveSeqNorm, aveParNorm, aveNormSpeedup);
float aveSeqConn = seqTimeConnectivity[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveParConn = parTimeConnectivityNoCopy[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveConnSpeedup = aveSeqNorm/aveParNorm;
printf("CONNECTIVITY AVERAGES: Ave Seq Time: %.5f, Ave Par Time: %.5f, Ave Speedup: %.5f\n", aveSeqConn, aveParConn, aveConnSpeedup);
float aveSeqTotal = seqTimeTotal[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveParTotal = parTimeTotalNoCopy[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveTotalSpeedup = aveSeqTotal/aveParTotal;
printf("TOTAL AVERAGES: Ave Seq Time: %.5f, Ave Par Time: %.5f, Ave Speedup: %.5f\n", aveSeqTotal, aveParTotal, aveTotalSpeedup);
}// end main
/************************************************************************
* *
* READ / WRITE NIFTI FILES *
* *
************************************************************************/
/***
* Converts an integer to a string
*
* Specifically, we need the string in the form
* of a 5 character long number: such as "00011"
* to create a final string of
*
* "nifti1Data/rfMRI_REST1_LR_00011.nii"
*
*
*/
void getNiftiFileName(int i, char* nameString)
{
char const digit[] = "0123456789";
char temp[6];
char* tempString;
temp[5] = '\0'; //null terminate
//walk backwards through the integer loading the string
for (int j = 4; j >=0; j--){
temp[j] = digit[i%10];
i = i /10;
}
tempString = temp;
strcpy(nameString, NIFTIBASESTRING);
strcat(nameString, tempString);
strcat(nameString, NIFTIEXTENSION);
}//end intToString
/***
* Reads in the file of covariates.
*/
int readCovariateFile(float* covariates, int numFiles)
{
printf(" ...READING COVARIATES FILE....\n\n");
FILE* fp;
char oneWord[50];
char c;
int iCov = 0;
//temporary data structure to hold 27 covariates
int numCovariateElements = 27 * numFiles;
float* temp = (float*) calloc ( numCovariateElements, sizeof (float*) ); //calloc initializes bits to zero
fp = fopen(COVARIATESFILE, "r"); //open in "read" mode
if (fp == NULL){
printf("Covariates File Read Error: %s Program Abort.\n", COVARIATESFILE);
return(1); //error
}//end if
c = fscanf(fp, "%s", oneWord);
while(c!= EOF) /* repeat until EOF */
{
if (iCov >= numCovariateElements){
printf("Error Reading Covariates File: number of elements: %d, number expected: %d. Program Abort.\n", iCov, numCovariateElements);
return(1);//error
}//end if
temp[iCov] = atof(oneWord); /* convert to float and store */
iCov++;
c = fscanf(fp,"%s",oneWord); /* get next word from the file */
}//end while
fclose(fp);
if (iCov != numCovariateElements){
printf("Error Reading Covariates File: Expected %d elements, but read %d elements. Program Abort.\n", numCovariateElements, iCov);
return(1);
}
// at this point, we really only want to keep covariates 0..11, 24, 25, 26 out of 1 .. 26
for (int i = 0; i < 12*numFiles; i++){
covariates[i] = temp[i];
}// end for i
for (int i = 24; i < 27; i++){
for (int j = 0; j < numFiles; j++){
covariates[(i-12)*numFiles + j] = temp[i*numFiles + j];
}
}
free(temp);
return 0; //success
}//end readCovariateFile
/***
* Reads in the data from the 1190 NIFITI files
*/
int readNiftiFiles(float* niftiFiles, int numFiles, int fileVolume, int begFileNumber)
{
printf(" ...READING NIFTI FILES....\n");
char* niftiFileName = (char *) malloc ( 80 * sizeof(char));
int errorCount = 0;
// read in one file at a time
for (int i = 0; i < numFiles; i++){
//get the file name associated with this number
int fileNumber = i + begFileNumber;
getNiftiFileName(fileNumber, niftiFileName);
//read in the file to the appropriate place in the data structure
int error = read_nifti_file(niftiFileName, niftiFiles + (i*fileVolume), 0); //'1' for verbose
if (error){
errorCount++;
printf("File Error: %s\n", niftiFileName);
}
}//end for i
free(niftiFileName);
if (errorCount == 0){
//printf(" Finished READING NIFTI files.\n\n");
return 0; //success
}else{
printf(" Finished reading NIFTI files. %d files had read errors. Program Abort.\n\n", errorCount);
return(1); //error
}
}//end readNifitiFiles
/***
* Writes cleaned data to new 1190 NIFITI files
*/
int writeNiftiFiles()
{
printf(" ...WRITING NIFTI FILES....\n\n");
return 0; //success
}//end writeNiftiFiles
/************************************************************************
* *
* HELPER FUNCTIONS FOR TESTING *
* *
************************************************************************/
void checkInverse(float* covTransXCov, float* matrixInverse, int numCovariates)
{
int i, j, k;
for (i = 0; i < numCovariates; i++){
for (j = 0; j < numCovariates; j++){
float temp = 0.0;
for (k = 0; k < numCovariates; k++)
temp += covTransXCov[i*numCovariates+k] * matrixInverse[k*numCovariates+j];
//test the result
temp = abs(temp);
float desiredValue = 0.0;
if (i == j)
desiredValue = 1.0;
if ( abs(desiredValue - temp) > ERRORMARGIN ){
printf(" ERROR: matrix inverse test (identity matrix) not valid at [%d][%d]. Value: %5.7f, should be: %5.7f\n", i, j, temp, desiredValue);
return;
}//end if
}//end for j
}//end for i
//if we get here it's all good
printf(" Matrix inverse valid for Sequential Version\n");
}//end checkInverse
void compareTransXCovariates(float* covTransXCovSeq, float* covTransXCovPar, int numCovariates)
{
for (int i = 0; i < numCovariates; i++){
for (int j = 0; j < numCovariates; j++){
int currentElement = i*numCovariates + j;
float seqElement = covTransXCovSeq[currentElement];
float parElement = covTransXCovPar[currentElement];
if ( abs(seqElement - parElement) > ERRORMARGIN){
printf(" INVALID!!!!! Transpose X Covariates not equal at [%d][%d]: Should be: %.5f, Actual: %.5f\n", i, j, seqElement, parElement);
return;
}//end if
}//end for j
}//end for i
printf(" TRANSPOSE X COVARIATES VALID!\n");
}//end compareTransXCovariates
void comparePointData(float* pointDataSeq, float* pointDataPar, int numFiles, int niftiVolume)
{
for (int i = 0; i < niftiVolume; i++){
for (int j = 0; j < numFiles; j++){
int currentElement = i*numFiles + j;
if ( abs(pointDataSeq[currentElement] - pointDataPar[currentElement]) > ERRORMARGIN){
printf(" INVALID!!!!! Point Data Matrix not equal at [%d][%d]\n", i, j);
return;
}//end if !=
}//end for j
}//end for i
// if we get here, data is correct
printf(" POINT DATA MATRIX VALID!\n");
}//end comparePointData
void compareCleanData(float* cleanedDataSeq, float* cleanedDataPar, int numFiles, int niftiVolume)
{
int i, j;
for (i = 0; i < niftiVolume; i++){
for (j = 0; j < numFiles; j++){
int currentElement = i*numFiles + j;
float seqVal = cleanedDataSeq[currentElement];
float parVal = cleanedDataPar[currentElement];
if ( abs(seqVal - parVal) > ERRORMARGIN){
printf(" INVALID!!!!! Clean Data not equal at [%d][%d]: Should be: %.5f, actual: %.5f\n", i, j, seqVal, parVal );
return;
}//end if
}//end for j
}//end for i
printf(" CLEAN DATA VALID!\n");
}//end compareCleanData
/***
* Incoming data will be in point-vector form:
*
* rows = niftiVolume;
* cols = numFiles;
*
*/
void compareNormalizedData(float* normalizedDataSeq, float* normalizedDataPar, int numFiles, int niftiVolume)
{
int i, j;
for (i = 0; i < niftiVolume; i++){
int currentRowStartElement = i * numFiles;
for (j = 0; j < numFiles; j++){
int currentElement = currentRowStartElement + j;
float normSeq = normalizedDataSeq[currentElement];
float normPar = normalizedDataPar[currentElement];
if (abs(normSeq - normPar) > ERRORMARGIN){
printf(" INVALID!!!!! Normalized Matrix not equal at [%d][%d]: Should be: %.5f, actual: %.5f\n", i, j, normSeq, normPar);
return;
}//end if
}//end for j
}//end for i
printf(" NORMALIZED MATRIX VALID!\n");
}//end comparNormalizedData
void compareConnectivityData(float* connectivityDataSeq, float* connectivityDataPar, int niftiVolume, int seed)
{
int i;
for (i = 0; i <= niftiVolume; i++){
float conSeq = connectivityDataSeq[i];
float conPar = connectivityDataPar[i];
if ( abs(conSeq - conPar) > ERRORMARGIN){
printf(" INVALID!!!!! Connectivity Vector not equal at [%d] for seed %d: Should be: %.5f, actual: %.5f\n", i, seed, conSeq, conPar);
return;
}//end if
}//end for i
printf(" CONNECTIVITY VECTOR VALID FOR SEED %d!\n", seed);
}//end compareConnectivitydata
| 3a53257c957b6ae69ab08f3c3004a64fbd27b906.cu | /****************************************************************************
* Roy Wong
* Dan Rolfe
* Keri Anderson
*
* CS6235 CUDA Final Project
* Due April 2014
*
*
* To Run: ./connectivity
* To Compile: use "make" command (use makefile provided)
* Use gradlabX.eng.utah.edu, where X can be 1, 2, 3, 4, .... 13
* (not all extensions have CUDA installed)
*
*
*
* EMACS notes:
* To get syntax highlighting in Emacs: alt-x c-mode
* To get line numbers: alt-x global-linum-mode
*
* Needed files:
*
*
* PROGRAM DESCRIPTION:
* This program reads in NIFTI data, "cleans" the noise from it, and
* calculates how "connected" each point in the brain is to every other
* point in the brain.
*
* Steps:
* 1) Query Cuda Device and read in NIFTI files
* 2) Run and Time covariate inverse calculation: Sequential and Parallel
* 3) Run and Time nifti Data tranpose into point data: Sequential and Parallel
* 4) Run and Time data cleaning: Sequential and Parallel
* 5) Optional: write cleaned data to disc
* 6) Run and Time data normalization: Sequential and Parallel
* 7) Run and Time data connectivity for Seed: Sequential and Parallel - 'seed' = one point in the brain
* 8) Pring final runtime statistics
*
***************************************************************************/
/*******************************************
* TODO:
* *)
* *)
* *)
* *)
* *)
*
*
*
*
*******************************************/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <string.h>
#include "GPUDeviceQuery.h"
#include "nifti1_Read_Write.h"
//number of times to run the code for runtime stats
#define TIMESTORUN 20
//for testing **** THESE NUMBERS CAN CHANGE FOR TESTING
#define TEST 0 //1 for testing, 0 to turn it off
#define TESTX 2 // dim X
#define TESTY 2 // dim Y
#define TESTZ 2 // dim Z
#define TESTNUMFILES 4 //number of scans taken
#define TESTNUMCOVARIATES 3 //number of covariates to use
#define TESTSEED 4 // Seed is the point in the brain that will be used to calulate connectivity
// with all other points
//FIXED DATA **** DO NOT CHANGE
#define NUMCOVARIATES 15 //changed from 27 - we are now using covariates 0..11, 24, 25, 26
#define NIFTIDIMX 91 //the x, y, z dimensions from the NIFTI files
#define NIFTIDIMY 109
#define NIFTIDIMZ 91
#define NIFTIBASESTRING "nifti1Data/rfMRI_REST1_LR_"
#define NIFTIEXTENSION ".nii"
#define COVARIATESFILE "nifti1Data/covariates.txt"
#define SEED 4 // Seed is the point in the brain that will be used to calulate connectivity
// with all other points
#define ERRORMARGIN .01 //margin of error when comparing result
//this function queries the CUDA device and returns info
extern "C" GPUDetails* queryCUDADevice(int verbose);
//these function reads in the NIFTI files or writes to new NIFTI files
extern "C" int read_nifti_file(char* data_file, float* dataArray, int verbose);
extern "C" int write_nifti_file(char* hdr_file, char* data_file, int berbose);
//Sequential functions - in fMRI_Sequential.c
//for whatever reason, "C" IS required in this file even though it is not used in the fMRI_Sequential.h and .c files
extern "C" int covariatesTranspose(float* covariates, float* covTranspose, int numCovariates, int numFiles);
extern "C" int covariatesTransCovariatesSeq(float* covariates, float* covTranspose, float* covTransXCov, int numCovariates, int numFiles);
extern "C" int covariatesInverse(float* matrixA, float* matrixAInverse, int dim);
extern "C" int transposeNiftiDataSeq(float* originalMatrix, float* transposedMatrix, int iDim, int jDim);
extern "C" int cleanSeq(float* pointData, float* cleanedData, float* covTranspose, float* matrixInverse, int numCovariates, int numFiles, int niftiVolume);
extern "C" int normalizeDataSeq(float* cleanedData, float* normalizedData, int numFiles, int niftiVolume);
extern "C" int connectivitySeq(int seed, float* normalizedData, float* connectivityData, int numFiles, int niftiVolume);
//CUDA Parallel functions - in fMRI_Covariate.cu (Keri)
extern "C" int covariatesTransCovariatesPar(float* covariates, float* covTranspose, float* covTransXCov, int numCovariates, int numFiles, float* runTime);
//CUDA Parallel functions - in fMRI_Clean.cu (Roy)
extern "C" int transposeNiftiDataPar(float* originalMatrix, float* transposedMatrix, int iDim, int jDim, float* runTime);
extern "C" int cleanPar(float* pointData, float* cleanedData, float* covTranspose, float* matrixInverse, int numCovariates, int numFiles, int niftiVolume, float* runTime);
//CUDA Parallel functions - in fMRI_Connectivity.cu (Dan)
extern "C" int normalizeDataPar(float* cleanedData, float* normalizedData, int numFiles, int niftiVolume, float* runTime);
extern "C" int connectivityPar(int seed, float* normalizedData, float* connectivityData, int numFiles, int niftiVolume, float* runTime);
/*
* Error Checking Macro - used to check errors in runtime API code
*
* From stackoverflow.com: The best way to check for errors in
* runtime API code is to define an assert style handler function and wrapper macro.
* Each API call can be wrapped with the gpuErrorchceck macro, which will process
* the return status of the API call it wraps. If there is an error in a call, a
* textual message describing the error and the file and line in your code where the
* error occurred will be emitted to stderr and the application will exit. You could
* conceivably modify gpuAssert to raise an exception rather than call exit() in a
* more sophisticated application if it were required.
*
* A second related question is how to check for errors in kernel launches, which
* can't be directly wrapped in a macro call like standard runtime API calls. For
* kernels, something like this:
*
* kernel<<<1,1>>>(a);
* gpuErrorcheck( cudaPeekAtLastError() );
* gpuErrorcheck( cudaDeviceSynchronize() );
*
* will firstly check for invalid launch argument, then force the host to wait
* until the kernel stops and checks for an execution error. The synchronisation
* can be eliminated if you have a subsequent blocking API call like this:
*
* kernel<<<1,1>>>(a_d);
* gpuErrorcheck( cudaPeekAtLastError() );
* gpuErrorcheck( cudaMemcpy(a_h, a_d, size * sizeof(int), cudaMemcpyDeviceToHost) );
*
* in which case the cudaMemcpy call can return either errors which occurred during
* the kernel execution or those from the memory copy itself. This can be confusing for
* the beginner, and I would recommend using explicit synchronisation after a kernel
* launch during debugging to make it easier to understand where problems might be arising.
*/
//wrap each API call with the gpuErrorCheck macro
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__);}
inline void gpuAssert(cudaError_t code, char* file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}//end error checking macro
//pre-declare functions
void getNiftiFileName(int i, char* nameString);
int readCovariateFile(float* covariates, int numFiles);
int readNiftiFiles(float* niftiFiles, int numFiles, int fileVolume, int begFileNumber);
int writeNiftiFiles();
void checkInverse(float* covTransXCov, float* matrixInverse, int numCovariates);
void compareTransXCovariates(float* covTransXCovSeq, float* covTransXCovPar, int numCovariates);
void comparePointData(float* pointDataSeq, float* pointDataPar, int numFiles, int niftiVolume);
void compareCleanData(float* cleanedDataSeq, float* cleanedDataPar, int numFiles, int niftiVolume);
void compareNormalizedData(float* normalizedDataSeq, float* normalizedDataPar, int numFiles, int niftiVolume);
void compareConnectivityData(float* connectivityDataSeq, float* connectivityDataPar, int niftiVolume, int seed);
/************************************************************************
* *
* MAIN *
* *
************************************************************************/
int main(int argc, char **argv)
{
int niftiVolume = 0;
int begFileNumber = 0;
int endFileNumber = 0;
int numFiles = 0;
int numCovariates = 0;
int dimX = 0;
int dimY = 0;
int dimZ = 0;
int seed = 0;
if (TEST){
numFiles = TESTNUMFILES;
dimX = TESTX;
dimY = TESTY;
dimZ = TESTZ;
niftiVolume = dimX * dimY *dimZ;
numCovariates = TESTNUMCOVARIATES;
seed = TESTSEED;
}else{
begFileNumber = 11;
endFileNumber = 1200;
numFiles = endFileNumber - begFileNumber + 1;
dimX = NIFTIDIMX;
dimY = NIFTIDIMY;
dimZ = NIFTIDIMZ;
niftiVolume = dimX*dimY*dimZ;
numCovariates = NUMCOVARIATES;
seed = SEED;
}//end if TEST
//data structures for holding timing stats
float seqTimeInverse[TIMESTORUN + 1];
float seqTimePointData[TIMESTORUN + 1];
float seqTimeClean[TIMESTORUN + 1];
float seqTimeNormalize[TIMESTORUN + 1];
float seqTimeConnectivity[TIMESTORUN + 1];
float seqTimeTotal[TIMESTORUN + 1];
//these timings do not count the copy time from CPU to GPU and back
float parTimeInverseNoCopy[TIMESTORUN + 1];
float parTimePointDataNoCopy[TIMESTORUN + 1];
float parTimeCleanNoCopy[TIMESTORUN + 1];
float parTimeNormalizeNoCopy[TIMESTORUN + 1];
float parTimeConnectivityNoCopy[TIMESTORUN + 1];
float parTimeTotalNoCopy[TIMESTORUN + 1];
//Begin TIMES TO RUN
for (int runTime = 0; runTime < TIMESTORUN; runTime++){
printf("STARTING RUN #%d OUT OF %d\n\n", runTime+1, TIMESTORUN);
printf("\n\n#########################################################################################################\n");
printf("# STEP 1: QUERY CUDA DEVICE AND READ IN NIFTI FILES #\n");
printf("#########################################################################################################\n");
GPUDetails* gpuDets = queryCUDADevice(1); //1 = verbose/print
float* covariates = (float*) malloc ( (numFiles * numCovariates ) * sizeof (float*) );
float* niftiData = (float*) malloc ( (numFiles * niftiVolume ) * sizeof (float*) );
if (TEST){
printf(" *** RUNNING WITH TEST DATA: ***\n");
printf(" niftiVolume = %dx%dx%d = %d, numTimeFiles = %d, numCovariates = %d\n\n", dimX, dimY, dimZ, niftiVolume, numFiles, numCovariates);
//create "dummy" covariate data
for (int i = 0; i < numFiles * TESTNUMCOVARIATES; i++ ){
covariates[i]= (rand()%10 * 1.0); // numbers 0...9
}//end covariate data
//create "dummy nifti data
for (int i = 0; i < numFiles * niftiVolume; i++ ){
niftiData[i] = (rand()%10 * 1.0); // numbers 0...9
}//end niftidata
}
else{
printf(" *** RUNNING WITH NIFTI DATA: ***\n");
printf(" niftiVolume = %dx%dx%d = %d, numTimeFiles = %d, numCovariates = %d\n\n", dimX, dimY, dimZ, niftiVolume, numFiles, numCovariates);
//read in Covariate File:
// This specific covariate file has '27' covariates. There will be a set of
// covariates for each time t: so 27 elements for each of the 1200 - 11 + 1 = 1190 files.
// Matrix has 1190 * 27 = 32130 elements 1190 x 27 matrix
// UPDATE: we will only use covariates 0..11, 24, 25, 26 - so 15 covariates
int errorCov = readCovariateFile(covariates, numFiles);
if (errorCov){exit(1);}
//read NIFTI files
// There are 1190 files, each with 91 * 109 * 91 elements. This will be stored in
// one long array for ease in passing to the GPU
int errorNifti = readNiftiFiles(niftiData, numFiles, niftiVolume, begFileNumber);
if (errorNifti){exit(1);}
}//end if TEST
printf("\n#########################################################################################################\n");
printf("# STEP 2: RUN AND TIME COVARIATE INVERSE CALCULATION: SEQUENTIAL AND PARALLEL #\n");
printf("#########################################################################################################\n");
// code setup - get the covariates transpose matrix
float* covTranspose = (float*) malloc ( numFiles * numCovariates * sizeof(float) ); //holds Xtrans
int errorTranspose = covariatesTranspose(covariates, covTranspose, numCovariates, numFiles);
if (errorTranspose){exit(1);}
/* SEQUENTIAL CODE - only times covariatesTranspose * covarites */
float* covTransXCovSeq = (float*) malloc ( numCovariates * numCovariates * sizeof(float) ); //holds (Xtrans * X)
printf("\n ...RUNNING COVARIATES TRANSPOSE X COVARIATES SEQUENTIAL...\n");
cudaEvent_t seq_start_event, seq_stop_event; //begin timing sequential
gpuErrorCheck(cudaEventCreate(&seq_start_event));
gpuErrorCheck(cudaEventCreate(&seq_stop_event));
gpuErrorCheck(cudaEventRecord(seq_start_event, 0));
int errorSeqCovTxCov = covariatesTransCovariatesSeq(covariates, covTranspose, covTransXCovSeq, numCovariates, numFiles);
if (errorSeqCovTxCov){exit(1);}
gpuErrorCheck(cudaEventRecord(seq_stop_event, 0));
gpuErrorCheck(cudaEventSynchronize(seq_stop_event));
float seq_time_inverse = 0;
gpuErrorCheck(cudaEventElapsedTime(&seq_time_inverse, seq_start_event, seq_stop_event));
seq_time_inverse /= 1.0e3f;
seqTimeInverse[runTime] = seq_time_inverse;
/* CUDA CODE - only times covariatesTranspose * covariates */
float* covTransXCovPar = (float*) malloc ( numCovariates * numCovariates * sizeof(float) ); //holds (Xtrans * X)
printf("\n ...RUNNING COVARIATES TRANSPOSE X COVARIATES PARALLEL ...\n");
int errorParCovTxCov = covariatesTransCovariatesPar(covariates, covTranspose, covTransXCovPar, numCovariates, numFiles, &parTimeInverseNoCopy[runTime]);
if (errorParCovTxCov){exit(1);}
//more set up - calculate the inverse
float* matrixInverse = (float*) malloc ( numCovariates * numCovariates * sizeof(float) ); //holds (Xtrans * X)inverse
int errorInverse = covariatesInverse(covTransXCovSeq, matrixInverse, numCovariates);
if (errorInverse){exit(1);}
checkInverse(covTransXCovSeq, matrixInverse, numCovariates);
/* RESULTS */
printf("\n **** RESULTS COVARIATES INVERSE: ****\n\n");
//compare results
compareTransXCovariates(covTransXCovSeq, covTransXCovPar, numCovariates);
printf("\n SEQ COVARIATES INVERSE RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n",
seq_time_inverse, 1.0e-3f * (numFiles*numCovariates) / seq_time_inverse, (numFiles*numCovariates) );
printf(" PAR COVARIATES INVERSE RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n\n",
parTimeInverseNoCopy[runTime], 1.0e-3f * (numFiles*numCovariates) / parTimeInverseNoCopy[runTime], (numFiles*numCovariates) );
//speedup
printf("\n **** SPEEDUP COVARIATES INVERSE compared to Sequential: %2f ****\n\n", seq_time_inverse/parTimeInverseNoCopy[runTime]);
//free un-needed data structures
free(covariates);
free(covTransXCovSeq);
free(covTransXCovPar);
printf("\n#########################################################################################################\n");
printf("# STEP 3: RUN AND TIME NIFTI DATA TRANSPOSE INTO POINT DATA: SEQUENTIAL AND PARALLELL #\n");
printf("#########################################################################################################\n");
/* SEQUENTIAL CODE */
float* pointDataSeq = (float*) malloc ( numFiles * niftiVolume * sizeof(float) );
printf("\n ...RUNNING POINT DATA SEQUENTIAL...\n");
gpuErrorCheck(cudaEventCreate(&seq_start_event));
gpuErrorCheck(cudaEventCreate(&seq_stop_event));
gpuErrorCheck(cudaEventRecord(seq_start_event, 0));
int errorSeqPointData = transposeNiftiDataSeq(niftiData, pointDataSeq, numFiles, niftiVolume);
if (errorSeqPointData){exit(1);}
gpuErrorCheck(cudaEventRecord(seq_stop_event, 0));
gpuErrorCheck(cudaEventSynchronize(seq_stop_event));
float seq_time_pointData = 0;
gpuErrorCheck(cudaEventElapsedTime(&seq_time_pointData, seq_start_event, seq_stop_event));
seq_time_pointData /= 1.0e3f;
seqTimePointData[runTime] = seq_time_pointData;
/* CUDA CODE */
float* pointDataPar = (float*) malloc ( numFiles * niftiVolume * sizeof(float) );
printf("\n ...RUNNING POINT DATA PARALLEL ...\n");
int errorParPointData = transposeNiftiDataPar(niftiData, pointDataPar, numFiles, niftiVolume, &parTimePointDataNoCopy[runTime]);
if (errorParPointData){exit(1);}
/* RESULTS */
printf("\n **** RESULTS POINT DATA: ****\n\n");
//compare results
comparePointData(pointDataSeq, pointDataPar, numCovariates, numFiles);
printf("\n SEQ POINT DATA RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n",
seq_time_pointData, 1.0e-3f * (numFiles*niftiVolume) / seq_time_pointData, (numFiles*niftiVolume) );
printf(" PAR POINT DATA RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n\n",
parTimePointDataNoCopy[runTime], 1.0e-3f * (numFiles*niftiVolume) / parTimePointDataNoCopy[runTime], (numFiles*niftiVolume) );
//speedup
printf("\n **** SPEEDUP POINT DATA compared to Sequential: %2f ****\n\n", seq_time_pointData/parTimePointDataNoCopy[runTime]);
//free un-needed data structures
free(niftiData);
//free(pointDataSeq); change this!!!!
free(pointDataPar);
printf("\n#########################################################################################################\n");
printf("# STEP 4: RUN AND TIME DATA CLEANING: SEQUENTIAL AND PARALLEL #\n");
printf("#########################################################################################################\n");
/* SEQUENTIAL CODE */
float* cleanedDataSeq = (float*) malloc ( numFiles * niftiVolume * sizeof(float) ); //holds cleaned data values
printf("\n ...RUNNING CLEAN DATA SEQUENTIAL...\n");
gpuErrorCheck(cudaEventCreate(&seq_start_event));
gpuErrorCheck(cudaEventCreate(&seq_stop_event));
gpuErrorCheck(cudaEventRecord(seq_start_event, 0));
int errorSeqClean = cleanSeq(pointDataSeq, cleanedDataSeq, covTranspose, matrixInverse, numCovariates, numFiles, niftiVolume);
if (errorSeqClean){exit(1);}
gpuErrorCheck(cudaEventRecord(seq_stop_event, 0));
gpuErrorCheck(cudaEventSynchronize(seq_stop_event));
float seq_time_clean = 0;
gpuErrorCheck(cudaEventElapsedTime(&seq_time_clean, seq_start_event, seq_stop_event));
seq_time_clean /= 1.0e3f;
seqTimeClean[runTime] = seq_time_clean;
/* CUDA CODE */
float* cleanedDataPar = (float*) malloc ( numFiles * niftiVolume * sizeof(float) ); //holds cleaned data values
printf("\n ...RUNNING CLEAN DATA PARALLEL ...\n");
int errorParClean = cleanPar(pointDataSeq, cleanedDataPar, covTranspose, matrixInverse, numCovariates, numFiles, niftiVolume, &parTimeCleanNoCopy[runTime]);
if (errorParClean){exit(1);}
/* RESULTS */
printf("\n **** RESULTS CLEAN DATA: ****\n\n");
//compare results
compareCleanData(cleanedDataSeq, cleanedDataPar, numFiles, niftiVolume);
printf("\n SEQ CLEAN RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n",
seq_time_clean, 1.0e-3f * (numFiles*niftiVolume) / seq_time_clean, (numFiles*niftiVolume) );
printf(" PAR CLEAN RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n\n",
parTimeCleanNoCopy[runTime], 1.0e-3f * (numFiles*niftiVolume) / parTimeCleanNoCopy[runTime], (numFiles*niftiVolume) );
//speedup
printf("\n **** SPEEDUP CLEAN DATA compared to Sequential: %2f ****\n\n", seq_time_clean/parTimeCleanNoCopy[runTime]);
//free un-needed data structures
//free(cleanedDataSeq);
free(cleanedDataPar);
free(pointDataSeq);
free(covTranspose);
free(matrixInverse);
//#########################################################################################################
//# STEP 5: OPTIONAL: WRITE CLEANED DATA TO DISC #
//#########################################################################################################
//first need to transpose back to NIFTI order???
if (!TEST){ //skip this step if testing
printf("\n#########################################################################################################\n");
printf("# STEP 5: WRITE CLEANED DATA TO DISC #\n");
printf("#########################################################################################################\n\n");
int errorWrite = writeNiftiFiles();
if (errorWrite){exit(1);}
printf(" ...finished writing to clean NIFTI files...\n");
}else{//running test data
printf("\n#########################################################################################################\n");
printf("# STEP 5: STEP 5 SKIPPED - USING TEST DATA #\n");
printf("#########################################################################################################\n\n");
}
printf("\n#########################################################################################################\n");
printf("# STEP 6: RUN AND TIME DATA NORMALIZATION: SEQUENTIAL AND PARALLEL #\n");
printf("#########################################################################################################\n");
/* SEQUENTIAL CODE */
float* normalizedDataSeq = (float*) malloc ( numFiles * niftiVolume * sizeof(float) ); //holds normalized values
float* normalizedDataPar = (float*) malloc ( numFiles * niftiVolume * sizeof(float) ); //holds normalized values
printf("\n ...RUNNING DATA NORMALIZATION SEQUENTIAL...\n");
gpuErrorCheck(cudaEventCreate(&seq_start_event));
gpuErrorCheck(cudaEventCreate(&seq_stop_event));
gpuErrorCheck(cudaEventRecord(seq_start_event, 0));
int errorNormalizeSeq = normalizeDataSeq(cleanedDataSeq, normalizedDataSeq, numFiles, niftiVolume);
if (errorNormalizeSeq){exit(1);}
gpuErrorCheck(cudaEventRecord(seq_stop_event, 0));
gpuErrorCheck(cudaEventSynchronize(seq_stop_event));
float seq_time_normalize = 0;
gpuErrorCheck(cudaEventElapsedTime(&seq_time_normalize, seq_start_event, seq_stop_event));
seq_time_normalize /= 1.0e3f;
seqTimeNormalize[runTime] = seq_time_normalize;
/* CUDA CODE */
printf("\n ...RUNNING DATA NORMALIZATION PARALLEL ...\n");
int errorNormalizePar = normalizeDataPar(cleanedDataSeq, normalizedDataPar, numFiles, niftiVolume, &parTimeNormalizeNoCopy[runTime]);
if (errorNormalizePar){exit(1);}
/* RESULTS */
printf("\n **** RESULTS DATA NORMALIZATION: ****\n\n");
//compare results
compareNormalizedData(normalizedDataSeq, normalizedDataPar, numFiles, niftiVolume);
printf("\n SEQ NORMALIZE RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n",
seq_time_normalize, 1.0e-3f * (numFiles*niftiVolume) / seq_time_normalize, (numFiles*niftiVolume) );
printf(" PAR NORMALIZE RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n\n",
parTimeNormalizeNoCopy[runTime], 1.0e-3f * (numFiles*niftiVolume) / parTimeNormalizeNoCopy[runTime], (numFiles*niftiVolume) );
//speedup
printf("\n **** SPEEDUP NORMALIZE compared to Sequential: %2f ****\n\n", seq_time_normalize/parTimeNormalizeNoCopy[runTime]);
//free un-needed data structures
free(cleanedDataSeq);
//free(normalizedDataSeq);
free(normalizedDataPar);
printf("\n#########################################################################################################\n");
printf("# STEP 7: RUN AND TIME DATA CONNECTIVITY FOR SEED: SEQUENTIAL AND PARALLEL #\n");
printf("#########################################################################################################\n");
/* SEQUENTIAL CODE */
float* connectivityDataSeq = (float*) malloc ( niftiVolume * sizeof(float) ); //holds normalized values
float* connectivityDataPar = (float*) malloc ( niftiVolume * sizeof(float) ); //holds normalized values
printf("\n ...RUNNING CONNECTIVITY SEQUENTIAL FOR SEED = %d...\n", seed);
gpuErrorCheck(cudaEventCreate(&seq_start_event));
gpuErrorCheck(cudaEventCreate(&seq_stop_event));
gpuErrorCheck(cudaEventRecord(seq_start_event, 0));
int errorConnectivitySeq = connectivitySeq(seed, normalizedDataSeq, connectivityDataSeq, numFiles, niftiVolume);
if (errorConnectivitySeq){exit(1);}
gpuErrorCheck(cudaEventRecord(seq_stop_event, 0));
gpuErrorCheck(cudaEventSynchronize(seq_stop_event));
float seq_time_connectivity = 0;
gpuErrorCheck(cudaEventElapsedTime(&seq_time_connectivity, seq_start_event, seq_stop_event));
seq_time_connectivity /= 1.0e3f;
seqTimeConnectivity[runTime] = seq_time_connectivity;
/* CUDA CODE */
printf("\n ...RUNNING CONNECTIVITY PARALLEL FOR SEED = %d...\n", seed);
int errorConnectivityPar = connectivityPar(seed, normalizedDataSeq, connectivityDataPar, numFiles, niftiVolume, &parTimeConnectivityNoCopy[runTime]);
if (errorConnectivityPar){exit(1);}
/* RESULTS */
printf("\n **** RESULTS CONNECTIVITY: ****\n\n");
//compare results
compareConnectivityData(connectivityDataSeq, connectivityDataPar, niftiVolume, seed);
printf("\n SEQ CONNECTIVITY RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n",
seq_time_connectivity, 1.0e-3f * (niftiVolume) / seq_time_connectivity, (niftiVolume) );
printf(" PAR CONNECTIVITY RESULTS: Time = %.5f s, Throughput = %.4f KElements/s, Size = %u elements\n\n",
parTimeConnectivityNoCopy[runTime], 1.0e-3f * (niftiVolume) / parTimeConnectivityNoCopy[runTime], (niftiVolume) );
//speedup
printf("\n **** SPEEDUP CONNECTIVITY compared to Sequential: %2f ****\n\n", seq_time_connectivity/parTimeConnectivityNoCopy[runTime]);
//free un-needed data structures
free(normalizedDataSeq);
free(connectivityDataSeq);
free(connectivityDataPar);
printf("\n#########################################################################################################\n");
printf("# STEP 8: PRINT FINAL RUNTIME STATISTICS #\n");
printf("#########################################################################################################\n");
float totalTimeSeq = seq_time_inverse + seq_time_pointData + seq_time_clean + seq_time_normalize + seq_time_connectivity;
float totalTimePar = parTimeInverseNoCopy[runTime] + parTimePointDataNoCopy[runTime] + parTimeCleanNoCopy[runTime] + parTimeNormalizeNoCopy[runTime] + parTimeConnectivityNoCopy[runTime];
seqTimeTotal[runTime] = totalTimeSeq;
parTimeTotalNoCopy[runTime] = totalTimePar;
printf("\n *** FINAL SPEEDUP FOR RUN %d out of %d (compared to Sequential): %4.4f ***\n\n\n\n", runTime, TIMESTORUN, totalTimeSeq/totalTimePar);
}//end times to run
//print out final stats
printf("\n\n\n\n");
printf("\n#########################################################################################################\n");
printf("# FINAL RUNTIME STATISTICS #\n");
printf("#########################################################################################################\n");
printf("INVERSE STATS\n");
seqTimeInverse[TIMESTORUN] = 0.0; //for averages
parTimeInverseNoCopy[TIMESTORUN] = 0.0;
for (int runTime = 0; runTime < TIMESTORUN; runTime++){
float seqTime = seqTimeInverse[runTime];
float parTime = parTimeInverseNoCopy[runTime];
seqTimeInverse[TIMESTORUN] += seqTime;
parTimeInverseNoCopy[TIMESTORUN] += parTime;
printf(" Run: %d: Inverse Seq: %.5f Inverse Par: %.5f Speedup: %.5f\n", runTime, seqTime, parTime, seqTime/parTime);
}//end runTime
printf("\n\nPOINT DATA STATS\n");
seqTimePointData[TIMESTORUN] = 0.0; //for averages
parTimePointDataNoCopy[TIMESTORUN] = 0.0;
for (int runTime = 0; runTime < TIMESTORUN; runTime++){
float seqTime = seqTimePointData[runTime];
float parTime = parTimePointDataNoCopy[runTime];
seqTimePointData[TIMESTORUN] += seqTime;
parTimePointDataNoCopy[TIMESTORUN] += parTime;
printf(" Run: %d: PointData Seq: %.5f PointData Par: %.5f Speedup: %.5f\n", runTime, seqTime, parTime, seqTime/parTime);
}//end runTime
printf("\n\nCLEAN STATS\n");
seqTimeClean[TIMESTORUN] = 0.0; //for averages
parTimeCleanNoCopy[TIMESTORUN] = 0.0;
for (int runTime = 0; runTime < TIMESTORUN; runTime++){
float seqTime = seqTimeClean[runTime];
float parTime = parTimeCleanNoCopy[runTime];
seqTimeClean[TIMESTORUN] += seqTime;
parTimeCleanNoCopy[TIMESTORUN] += parTime;
printf(" Run: %d: Clean Seq: %.5f Clean Par: %.5f Speedup: %.5f\n", runTime, seqTime, parTime, seqTime/parTime);
}//end runTime
printf("\n\nNORMALIZE STATS\n");
seqTimeNormalize[TIMESTORUN] = 0.0; //for averages
parTimeNormalizeNoCopy[TIMESTORUN] = 0.0;
for (int runTime = 0; runTime < TIMESTORUN; runTime++){
float seqTime = seqTimeNormalize[runTime];
float parTime = parTimeNormalizeNoCopy[runTime];
seqTimeNormalize[TIMESTORUN] += seqTime;
parTimeNormalizeNoCopy[TIMESTORUN] += parTime;
printf(" Run: %d: Normalize Seq: %.5f Normalize Par: %.5f Speedup: %.5f\n", runTime, seqTime, parTime, seqTime/parTime);
}//end runTime
printf("\n\nCONNECTIVITY STATS\n");
seqTimeConnectivity[TIMESTORUN] = 0.0; //for averages
parTimeConnectivityNoCopy[TIMESTORUN] = 0.0;
for (int runTime = 0; runTime < TIMESTORUN; runTime++){
float seqTime = seqTimeConnectivity[runTime];
float parTime = parTimeConnectivityNoCopy[runTime];
seqTimeConnectivity[TIMESTORUN] += seqTime;
parTimeConnectivityNoCopy[TIMESTORUN] += parTime;
printf(" Run: %d: Connectivity Seq: %.5f Connectivity Par: %.5f Speedup: %.5f\n", runTime, seqTime, parTime, seqTime/parTime);
}//end runTime
printf("\n\nTOTAL TIME STATS\n");
seqTimeTotal[TIMESTORUN] = 0.0; //for averages
parTimeTotalNoCopy[TIMESTORUN] = 0.0;
for (int runTime = 0; runTime < TIMESTORUN; runTime++){
float seqTime = seqTimeTotal[runTime];
float parTime = parTimeTotalNoCopy[runTime];
seqTimeTotal[TIMESTORUN] += seqTime;
parTimeTotalNoCopy[TIMESTORUN] += parTime;
printf(" Run: %d: Total Time Seq: %.5f Total Time Par: %.5f Speedup: %.5f\n", runTime, seqTime, parTime, seqTime/parTime);
}//end runTime
printf("***** AVERAGES *****\n\n");
float aveSeqInverse = seqTimeInverse[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveParInverse = parTimeInverseNoCopy[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveInvSpeedup = aveSeqInverse/aveParInverse;
printf("INVERSE AVERAGES: Ave Seq Time: %.5f, Ave Par Time: %.5f, Ave Speedup: %.5f\n", aveSeqInverse, aveParInverse, aveInvSpeedup);
float aveSeqPointData = seqTimePointData[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveParPointData = parTimePointDataNoCopy[TIMESTORUN]/ (TIMESTORUN*1.0);
float avePDSpeedup = aveSeqPointData/aveParPointData;
printf("POINT DATA AVERAGES: Ave Seq Time: %.5f, Ave Par Time: %.5f, Ave Speedup: %.5f\n", aveSeqPointData, aveParPointData, avePDSpeedup);
float aveSeqClean = seqTimeClean[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveParClean = parTimeCleanNoCopy[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveCleanSpeedup = aveSeqClean/aveParClean;
printf("CLEAN AVERAGES: Ave Seq Time: %.5f, Ave Par Time: %.5f, Ave Speedup: %.5f\n", aveSeqClean, aveParClean, aveCleanSpeedup);
float aveSeqNorm = seqTimeNormalize[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveParNorm = parTimeNormalizeNoCopy[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveNormSpeedup = aveSeqNorm/aveParNorm;
printf("NORMALIZE AVERAGES: Ave Seq Time: %.5f, Ave Par Time: %.5f, Ave Speedup: %.5f\n", aveSeqNorm, aveParNorm, aveNormSpeedup);
float aveSeqConn = seqTimeConnectivity[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveParConn = parTimeConnectivityNoCopy[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveConnSpeedup = aveSeqNorm/aveParNorm;
printf("CONNECTIVITY AVERAGES: Ave Seq Time: %.5f, Ave Par Time: %.5f, Ave Speedup: %.5f\n", aveSeqConn, aveParConn, aveConnSpeedup);
float aveSeqTotal = seqTimeTotal[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveParTotal = parTimeTotalNoCopy[TIMESTORUN]/ (TIMESTORUN*1.0);
float aveTotalSpeedup = aveSeqTotal/aveParTotal;
printf("TOTAL AVERAGES: Ave Seq Time: %.5f, Ave Par Time: %.5f, Ave Speedup: %.5f\n", aveSeqTotal, aveParTotal, aveTotalSpeedup);
}// end main
/************************************************************************
* *
* READ / WRITE NIFTI FILES *
* *
************************************************************************/
/***
* Converts an integer to a string
*
* Specifically, we need the string in the form
* of a 5 character long number: such as "00011"
* to create a final string of
*
* "nifti1Data/rfMRI_REST1_LR_00011.nii"
*
*
*/
void getNiftiFileName(int i, char* nameString)
{
char const digit[] = "0123456789";
char temp[6];
char* tempString;
temp[5] = '\0'; //null terminate
//walk backwards through the integer loading the string
for (int j = 4; j >=0; j--){
temp[j] = digit[i%10];
i = i /10;
}
tempString = temp;
strcpy(nameString, NIFTIBASESTRING);
strcat(nameString, tempString);
strcat(nameString, NIFTIEXTENSION);
}//end intToString
/***
* Reads in the file of covariates.
*/
int readCovariateFile(float* covariates, int numFiles)
{
printf(" ...READING COVARIATES FILE....\n\n");
FILE* fp;
char oneWord[50];
char c;
int iCov = 0;
//temporary data structure to hold 27 covariates
int numCovariateElements = 27 * numFiles;
float* temp = (float*) calloc ( numCovariateElements, sizeof (float*) ); //calloc initializes bits to zero
fp = fopen(COVARIATESFILE, "r"); //open in "read" mode
if (fp == NULL){
printf("Covariates File Read Error: %s Program Abort.\n", COVARIATESFILE);
return(1); //error
}//end if
c = fscanf(fp, "%s", oneWord);
while(c!= EOF) /* repeat until EOF */
{
if (iCov >= numCovariateElements){
printf("Error Reading Covariates File: number of elements: %d, number expected: %d. Program Abort.\n", iCov, numCovariateElements);
return(1);//error
}//end if
temp[iCov] = atof(oneWord); /* convert to float and store */
iCov++;
c = fscanf(fp,"%s",oneWord); /* get next word from the file */
}//end while
fclose(fp);
if (iCov != numCovariateElements){
printf("Error Reading Covariates File: Expected %d elements, but read %d elements. Program Abort.\n", numCovariateElements, iCov);
return(1);
}
// at this point, we really only want to keep covariates 0..11, 24, 25, 26 out of 1 .. 26
for (int i = 0; i < 12*numFiles; i++){
covariates[i] = temp[i];
}// end for i
for (int i = 24; i < 27; i++){
for (int j = 0; j < numFiles; j++){
covariates[(i-12)*numFiles + j] = temp[i*numFiles + j];
}
}
free(temp);
return 0; //success
}//end readCovariateFile
/***
* Reads in the data from the 1190 NIFITI files
*/
int readNiftiFiles(float* niftiFiles, int numFiles, int fileVolume, int begFileNumber)
{
printf(" ...READING NIFTI FILES....\n");
char* niftiFileName = (char *) malloc ( 80 * sizeof(char));
int errorCount = 0;
// read in one file at a time
for (int i = 0; i < numFiles; i++){
//get the file name associated with this number
int fileNumber = i + begFileNumber;
getNiftiFileName(fileNumber, niftiFileName);
//read in the file to the appropriate place in the data structure
int error = read_nifti_file(niftiFileName, niftiFiles + (i*fileVolume), 0); //'1' for verbose
if (error){
errorCount++;
printf("File Error: %s\n", niftiFileName);
}
}//end for i
free(niftiFileName);
if (errorCount == 0){
//printf(" Finished READING NIFTI files.\n\n");
return 0; //success
}else{
printf(" Finished reading NIFTI files. %d files had read errors. Program Abort.\n\n", errorCount);
return(1); //error
}
}//end readNifitiFiles
/***
* Writes cleaned data to new 1190 NIFITI files
*/
int writeNiftiFiles()
{
printf(" ...WRITING NIFTI FILES....\n\n");
return 0; //success
}//end writeNiftiFiles
/************************************************************************
* *
* HELPER FUNCTIONS FOR TESTING *
* *
************************************************************************/
void checkInverse(float* covTransXCov, float* matrixInverse, int numCovariates)
{
int i, j, k;
for (i = 0; i < numCovariates; i++){
for (j = 0; j < numCovariates; j++){
float temp = 0.0;
for (k = 0; k < numCovariates; k++)
temp += covTransXCov[i*numCovariates+k] * matrixInverse[k*numCovariates+j];
//test the result
temp = abs(temp);
float desiredValue = 0.0;
if (i == j)
desiredValue = 1.0;
if ( abs(desiredValue - temp) > ERRORMARGIN ){
printf(" ERROR: matrix inverse test (identity matrix) not valid at [%d][%d]. Value: %5.7f, should be: %5.7f\n", i, j, temp, desiredValue);
return;
}//end if
}//end for j
}//end for i
//if we get here it's all good
printf(" Matrix inverse valid for Sequential Version\n");
}//end checkInverse
void compareTransXCovariates(float* covTransXCovSeq, float* covTransXCovPar, int numCovariates)
{
for (int i = 0; i < numCovariates; i++){
for (int j = 0; j < numCovariates; j++){
int currentElement = i*numCovariates + j;
float seqElement = covTransXCovSeq[currentElement];
float parElement = covTransXCovPar[currentElement];
if ( abs(seqElement - parElement) > ERRORMARGIN){
printf(" INVALID!!!!! Transpose X Covariates not equal at [%d][%d]: Should be: %.5f, Actual: %.5f\n", i, j, seqElement, parElement);
return;
}//end if
}//end for j
}//end for i
printf(" TRANSPOSE X COVARIATES VALID!\n");
}//end compareTransXCovariates
void comparePointData(float* pointDataSeq, float* pointDataPar, int numFiles, int niftiVolume)
{
for (int i = 0; i < niftiVolume; i++){
for (int j = 0; j < numFiles; j++){
int currentElement = i*numFiles + j;
if ( abs(pointDataSeq[currentElement] - pointDataPar[currentElement]) > ERRORMARGIN){
printf(" INVALID!!!!! Point Data Matrix not equal at [%d][%d]\n", i, j);
return;
}//end if !=
}//end for j
}//end for i
// if we get here, data is correct
printf(" POINT DATA MATRIX VALID!\n");
}//end comparePointData
void compareCleanData(float* cleanedDataSeq, float* cleanedDataPar, int numFiles, int niftiVolume)
{
int i, j;
for (i = 0; i < niftiVolume; i++){
for (j = 0; j < numFiles; j++){
int currentElement = i*numFiles + j;
float seqVal = cleanedDataSeq[currentElement];
float parVal = cleanedDataPar[currentElement];
if ( abs(seqVal - parVal) > ERRORMARGIN){
printf(" INVALID!!!!! Clean Data not equal at [%d][%d]: Should be: %.5f, actual: %.5f\n", i, j, seqVal, parVal );
return;
}//end if
}//end for j
}//end for i
printf(" CLEAN DATA VALID!\n");
}//end compareCleanData
/***
* Incoming data will be in point-vector form:
*
* rows = niftiVolume;
* cols = numFiles;
*
*/
void compareNormalizedData(float* normalizedDataSeq, float* normalizedDataPar, int numFiles, int niftiVolume)
{
int i, j;
for (i = 0; i < niftiVolume; i++){
int currentRowStartElement = i * numFiles;
for (j = 0; j < numFiles; j++){
int currentElement = currentRowStartElement + j;
float normSeq = normalizedDataSeq[currentElement];
float normPar = normalizedDataPar[currentElement];
if (abs(normSeq - normPar) > ERRORMARGIN){
printf(" INVALID!!!!! Normalized Matrix not equal at [%d][%d]: Should be: %.5f, actual: %.5f\n", i, j, normSeq, normPar);
return;
}//end if
}//end for j
}//end for i
printf(" NORMALIZED MATRIX VALID!\n");
}//end comparNormalizedData
void compareConnectivityData(float* connectivityDataSeq, float* connectivityDataPar, int niftiVolume, int seed)
{
int i;
for (i = 0; i <= niftiVolume; i++){
float conSeq = connectivityDataSeq[i];
float conPar = connectivityDataPar[i];
if ( abs(conSeq - conPar) > ERRORMARGIN){
printf(" INVALID!!!!! Connectivity Vector not equal at [%d] for seed %d: Should be: %.5f, actual: %.5f\n", i, seed, conSeq, conPar);
return;
}//end if
}//end for i
printf(" CONNECTIVITY VECTOR VALID FOR SEED %d!\n", seed);
}//end compareConnectivitydata
|
42aa168422f2c989c2683c4971c9a221521f4071.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ZWEngine.h"
#include <iostream>
// settings
const unsigned int SCR_WIDTH = 800;
const unsigned int SCR_HEIGHT = 600;
const GLchar *vs_shader_path = "../glsl/vertex_shader.glsl";
const GLchar *fs_shader_path = "../glsl/fragment_shader.glsl";
__global__ void add(int n, float *x, float *y) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
uint stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main() {
auto *tmp_app = new ZWEngine();
if (!tmp_app->init_engine(SCR_WIDTH, SCR_HEIGHT)) {
std::cout << "engine failed to initialize" << std::endl;
} else {
std::cout << "engine initialized successfully" << std::endl;
}
tmp_app->init_shader_program(vs_shader_path, fs_shader_path);
std::cout << "engine start running" << std::endl;
tmp_app->run();
} | 42aa168422f2c989c2683c4971c9a221521f4071.cu | #include "ZWEngine.h"
#include <iostream>
// settings
const unsigned int SCR_WIDTH = 800;
const unsigned int SCR_HEIGHT = 600;
const GLchar *vs_shader_path = "../glsl/vertex_shader.glsl";
const GLchar *fs_shader_path = "../glsl/fragment_shader.glsl";
__global__ void add(int n, float *x, float *y) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
uint stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main() {
auto *tmp_app = new ZWEngine();
if (!tmp_app->init_engine(SCR_WIDTH, SCR_HEIGHT)) {
std::cout << "engine failed to initialize" << std::endl;
} else {
std::cout << "engine initialized successfully" << std::endl;
}
tmp_app->init_shader_program(vs_shader_path, fs_shader_path);
std::cout << "engine start running" << std::endl;
tmp_app->run();
} |
83dc34cfd176191fa91f4025aaabddfdf790747b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#include "stdlib.h"
// for cuda profiler
#include "hip/hip_runtime_api.h"
#define M_s 1.f // Solar mass
#define G 39.5f// Gravitational constant Solar mass, AU
// single precision CUDA function to be called on GPU
__device__ float potential_thingy(float x, float y) {
return G * M_s * x / powf((powf(x, 2) + powf(y, 2)), 1.5f);
}
// euler method for velocity component
__global__ void euler_integration_vx(float *x_out, float *y_out, float *vx_out, int n, int steps, int current_step, float dt) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n){
vx_out[n*current_step+tid] = vx_out[(n*current_step-n)+tid] - potential_thingy(x_out[(n*current_step-n)+tid], y_out[(n*current_step-n)+tid]) * dt;
tid += gridDim.x * blockDim.x;
}
}
// euler method for position component
__global__ void euler_integration_x(float *x_out, float *vx_out, int n, int steps, int current_step, float dt) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n){
x_out[n*current_step+tid] = x_out[(n*current_step-n)+tid] + vx_out[n*current_step+tid] * dt;
tid += gridDim.x * blockDim.x;
}
}
extern "C" int integrate_euler_cuda(float *x, float *y, float *vx, float *vy, float *x_out, float *y_out, float *vx_out,
float *vy_out, int n, int steps, float dt) {
// dev_** variables for variables on CUDA device
float *dev_x_out, *dev_y_out, *dev_vx_out, *dev_vy_out;
// streams related constants and things
const int nStreams = 4;
// stream for kernel
hipStream_t stream[nStreams];
for (int i = 0; i < nStreams; ++i)
hipStreamCreate(&stream[i]);
// allocate the memory on the GPU (VRAM)
// hipMalloc docs: http://horacio9573.no-ip.org/cuda/group__CUDART__MEMORY_gc63ffd93e344b939d6399199d8b12fef.html
hipMalloc((void**)&dev_x_out, steps * n * sizeof(float));
hipMalloc((void**)&dev_y_out, steps * n * sizeof(float));
hipMalloc((void**)&dev_vx_out, steps * n * sizeof(float));
hipMalloc((void**)&dev_vy_out, steps * n * sizeof(float));
// map the arrays x, y, vx, vy to the corresponding GPU array
// hipMemcpy docs: http://horacio9573.no-ip.org/cuda/group__CUDART__MEMORY_g48efa06b81cc031b2aa6fdc2e9930741.html
hipMemcpy(&dev_x_out[0], &x[0], n * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(&dev_y_out[0], &y[0], n * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(&dev_vx_out[0], &vx[0], n * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(&dev_vy_out[0], &vy[0], n * sizeof(float), hipMemcpyHostToDevice);
// loop time, because time steps cannot be paralleled
int cstep = 1; // keep track of the time in integration
while (cstep < steps){
// integrate velocity first in 2 concurrent kernel
hipLaunchKernelGGL(( euler_integration_vx), dim3(128), dim3(128), 0, stream[0], dev_x_out, dev_y_out, dev_vx_out, n, steps, cstep, dt);
hipLaunchKernelGGL(( euler_integration_vx), dim3(128), dim3(128), 0, stream[1], dev_y_out, dev_x_out, dev_vy_out, n, steps, cstep, dt);
// as soon as any kernel finished computation, send the data back to CPU host
hipMemcpyAsync(&vx_out[cstep*n], &dev_vx_out[cstep*n], n * sizeof(float), hipMemcpyDeviceToHost, stream[0]);
hipMemcpyAsync(&vy_out[cstep*n], &dev_vy_out[cstep*n], n * sizeof(float), hipMemcpyDeviceToHost, stream[1]);
// as soon as above finished, start corresponding position computation
hipLaunchKernelGGL(( euler_integration_x), dim3(128), dim3(128), 0, stream[2], dev_x_out, dev_vx_out, n, steps, cstep, dt);
hipLaunchKernelGGL(( euler_integration_x), dim3(128), dim3(128), 0, stream[3], dev_y_out, dev_vy_out, n, steps, cstep, dt);
// as soon as any kernel finished computation, send the data back to CPU host
hipMemcpyAsync(&x_out[cstep*n], &dev_x_out[cstep*n], n * sizeof(float), hipMemcpyDeviceToHost, stream[2]);
hipMemcpyAsync(&y_out[cstep*n], &dev_y_out[cstep*n], n * sizeof(float), hipMemcpyDeviceToHost, stream[3]);
// make sure above all finished to start next time step because next time step depends on this step
hipDeviceSynchronize();
cstep += 1;
}
// free the memory allocated on the GPU after integration, if really galpy, need to take care memory for real
hipFree(dev_x_out);
hipFree(dev_y_out);
hipFree(dev_vx_out);
hipFree(dev_vy_out);
return 0;
} | 83dc34cfd176191fa91f4025aaabddfdf790747b.cu | #include "cuda.h"
#include "stdio.h"
#include "stdlib.h"
// for cuda profiler
#include "cuda_profiler_api.h"
#define M_s 1.f // Solar mass
#define G 39.5f// Gravitational constant Solar mass, AU
// single precision CUDA function to be called on GPU
__device__ float potential_thingy(float x, float y) {
return G * M_s * x / powf((powf(x, 2) + powf(y, 2)), 1.5f);
}
// euler method for velocity component
__global__ void euler_integration_vx(float *x_out, float *y_out, float *vx_out, int n, int steps, int current_step, float dt) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n){
vx_out[n*current_step+tid] = vx_out[(n*current_step-n)+tid] - potential_thingy(x_out[(n*current_step-n)+tid], y_out[(n*current_step-n)+tid]) * dt;
tid += gridDim.x * blockDim.x;
}
}
// euler method for position component
__global__ void euler_integration_x(float *x_out, float *vx_out, int n, int steps, int current_step, float dt) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < n){
x_out[n*current_step+tid] = x_out[(n*current_step-n)+tid] + vx_out[n*current_step+tid] * dt;
tid += gridDim.x * blockDim.x;
}
}
extern "C" int integrate_euler_cuda(float *x, float *y, float *vx, float *vy, float *x_out, float *y_out, float *vx_out,
float *vy_out, int n, int steps, float dt) {
// dev_** variables for variables on CUDA device
float *dev_x_out, *dev_y_out, *dev_vx_out, *dev_vy_out;
// streams related constants and things
const int nStreams = 4;
// stream for kernel
cudaStream_t stream[nStreams];
for (int i = 0; i < nStreams; ++i)
cudaStreamCreate(&stream[i]);
// allocate the memory on the GPU (VRAM)
// cudaMalloc docs: http://horacio9573.no-ip.org/cuda/group__CUDART__MEMORY_gc63ffd93e344b939d6399199d8b12fef.html
cudaMalloc((void**)&dev_x_out, steps * n * sizeof(float));
cudaMalloc((void**)&dev_y_out, steps * n * sizeof(float));
cudaMalloc((void**)&dev_vx_out, steps * n * sizeof(float));
cudaMalloc((void**)&dev_vy_out, steps * n * sizeof(float));
// map the arrays x, y, vx, vy to the corresponding GPU array
// cudaMemcpy docs: http://horacio9573.no-ip.org/cuda/group__CUDART__MEMORY_g48efa06b81cc031b2aa6fdc2e9930741.html
cudaMemcpy(&dev_x_out[0], &x[0], n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(&dev_y_out[0], &y[0], n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(&dev_vx_out[0], &vx[0], n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(&dev_vy_out[0], &vy[0], n * sizeof(float), cudaMemcpyHostToDevice);
// loop time, because time steps cannot be paralleled
int cstep = 1; // keep track of the time in integration
while (cstep < steps){
// integrate velocity first in 2 concurrent kernel
euler_integration_vx<<<128, 128, 0, stream[0]>>>(dev_x_out, dev_y_out, dev_vx_out, n, steps, cstep, dt);
euler_integration_vx<<<128, 128, 0, stream[1]>>>(dev_y_out, dev_x_out, dev_vy_out, n, steps, cstep, dt);
// as soon as any kernel finished computation, send the data back to CPU host
cudaMemcpyAsync(&vx_out[cstep*n], &dev_vx_out[cstep*n], n * sizeof(float), cudaMemcpyDeviceToHost, stream[0]);
cudaMemcpyAsync(&vy_out[cstep*n], &dev_vy_out[cstep*n], n * sizeof(float), cudaMemcpyDeviceToHost, stream[1]);
// as soon as above finished, start corresponding position computation
euler_integration_x<<<128, 128, 0, stream[2]>>>(dev_x_out, dev_vx_out, n, steps, cstep, dt);
euler_integration_x<<<128, 128, 0, stream[3]>>>(dev_y_out, dev_vy_out, n, steps, cstep, dt);
// as soon as any kernel finished computation, send the data back to CPU host
cudaMemcpyAsync(&x_out[cstep*n], &dev_x_out[cstep*n], n * sizeof(float), cudaMemcpyDeviceToHost, stream[2]);
cudaMemcpyAsync(&y_out[cstep*n], &dev_y_out[cstep*n], n * sizeof(float), cudaMemcpyDeviceToHost, stream[3]);
// make sure above all finished to start next time step because next time step depends on this step
cudaDeviceSynchronize();
cstep += 1;
}
// free the memory allocated on the GPU after integration, if really galpy, need to take care memory for real
cudaFree(dev_x_out);
cudaFree(dev_y_out);
cudaFree(dev_vx_out);
cudaFree(dev_vy_out);
return 0;
} |
555e94ecf284a8255c4effea2b79b6ad56d3f02b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathReduce.hip"
#else
accreal THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
void THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::lower_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<scalar_t, int64_t>());
}
void THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::upper_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<scalar_t, int64_t>());
}
scalar_t THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMax<accreal>{},
THCNumerics<accreal>::lower_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return scalar_cast<scalar_t>(val);
}
scalar_t THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMin<accreal>{},
THCNumerics<accreal>::upper_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return scalar_cast<scalar_t>(val);
}
#if !defined(THC_REAL_IS_BOOL)
void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
int64_t numel = THCTensor_(nElement)(state, data);
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions");
if (numel > 0) {
ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0);
dim3 grid( THTensor_sizeLegacyNoScalars(data, 0));
// NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel.
dim3 threads(32);
hipLaunchKernelGGL(( THCTensor_kernel_renorm<scalar_t, accreal>)
, dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
}
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
void THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
WelfordData<accreal, scalar_t> init;
init.reset();
if (!THC_reduceDim<scalar_t>(state, self_, src,
ModifyWelford<WelfordData<accreal, scalar_t>>{},
ReduceWelford<accreal, scalar_t>{},
VarianceWelford<accreal, scalar_t>{biased, true},
init,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
WelfordData<accreal, scalar_t> init;
init.reset();
if (!THC_reduceDim<scalar_t>(state, self_, src,
ModifyWelford<WelfordData<accreal, scalar_t>>{},
ReduceWelford<accreal, scalar_t>{},
VarianceWelford<accreal, scalar_t>{biased, false},
init,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
accreal THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
accreal THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (biased ? 0 : 1)))
);
THCudaCheck(hipGetLastError());
return val;
}
void THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t _value, int dimension, int keepdim)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{scalar_cast<accreal>(.5)},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMin<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(INFINITY),
dimension, keepdim);
} else {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{THCNumerics<accreal>::cinv(value)},
scalar_cast<accreal>(0),
dimension, keepdim);
}
THCudaCheck(hipGetLastError());
}
accreal THCTensor_(normall)(THCState *state, THCTensor *self, scalar_t _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceAll<scalar_t>(state, self,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMin<accreal>{},
scalar_cast<accreal>(INFINITY),
&result, 0);
} else {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::pow(result,
THCNumerics<accreal>::cinv(value));
}
THCudaCheck(hipGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, scalar_t _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<scalar_t> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
ReduceMax<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1)));
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(INFINITY),
ReduceMin<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1)));
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(0)));
} else {
result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(value));
result = THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value));
}
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return result;
}
#endif
accreal THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
#endif
#endif
| 555e94ecf284a8255c4effea2b79b6ad56d3f02b.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathReduce.cu"
#else
accreal THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
void THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::lower_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<scalar_t, int64_t>());
}
void THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<scalar_t, int64_t>
init =
thrust::make_pair<scalar_t, int64_t>(
THCNumerics<scalar_t>::upper_bound(), 0);
return THC_reduceDimIndex<scalar_t, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<scalar_t, int64_t>());
}
scalar_t THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMax<accreal>{},
THCNumerics<accreal>::lower_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return scalar_cast<scalar_t>(val);
}
scalar_t THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
thrust::identity<accreal>{},
ReduceMin<accreal>{},
THCNumerics<accreal>::upper_bound(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return scalar_cast<scalar_t>(val);
}
#if !defined(THC_REAL_IS_BOOL)
void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
int64_t numel = THCTensor_(nElement)(state, data);
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimensionLegacyNoScalars)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<scalar_t>::gt(value, scalar_cast<scalar_t>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) > 1, 1, "need at least 2 dimensions");
if (numel > 0) {
ptrdiff_t size = numel / THTensor_sizeLegacyNoScalars(data, 0);
dim3 grid( THTensor_sizeLegacyNoScalars(data, 0));
// NOTE: only with this specific number of threads can this work on GPUs with a warp size != 32 (such as AMD). Do not alter w/o changing buffer size in kernel.
dim3 threads(32);
THCTensor_kernel_renorm<scalar_t, accreal>
<<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
cudaError_t errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
}
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
void THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
WelfordData<accreal, scalar_t> init;
init.reset();
if (!THC_reduceDim<scalar_t>(state, self_, src,
ModifyWelford<WelfordData<accreal, scalar_t>>{},
ReduceWelford<accreal, scalar_t>{},
VarianceWelford<accreal, scalar_t>{biased, true},
init,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
WelfordData<accreal, scalar_t> init;
init.reset();
if (!THC_reduceDim<scalar_t>(state, self_, src,
ModifyWelford<WelfordData<accreal, scalar_t>>{},
ReduceWelford<accreal, scalar_t>{},
VarianceWelford<accreal, scalar_t>{biased, false},
init,
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
accreal THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
accreal THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll<scalar_t>(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(std::max<int64_t>(0, THCTensor_(nElement)(state, self) - (biased ? 0 : 1)))
);
THCudaCheck(cudaGetLastError());
return val;
}
void THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t _value, int dimension, int keepdim)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{scalar_cast<accreal>(.5)},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMin<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(INFINITY),
dimension, keepdim);
} else {
THC_reduceDim<scalar_t>(state, self, src,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{THCNumerics<accreal>::cinv(value)},
scalar_cast<accreal>(0),
dimension, keepdim);
}
THCudaCheck(cudaGetLastError());
}
accreal THCTensor_(normall)(THCState *state, THCTensor *self, scalar_t _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceAll<scalar_t>(state, self,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMin<accreal>{},
scalar_cast<accreal>(INFINITY),
&result, 0);
} else {
THC_reduceAll<scalar_t>(state, self,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::pow(result,
THCNumerics<accreal>::cinv(value));
}
THCudaCheck(cudaGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, scalar_t _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<scalar_t> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
result = thrust::inner_product(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
ReduceMax<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1)));
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(-INFINITY))) {
result = thrust::inner_product(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(INFINITY),
ReduceMin<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(1)));
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
result = thrust::inner_product(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(scalar_cast<scalar_t>(0)));
} else {
result = thrust::inner_product(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<scalar_t, accreal>(value));
result = THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value));
}
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return result;
}
#endif
accreal THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
#endif
#endif
|
f80c0e99a704556d0b66c9e7d807925719f4c67e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <linalg/batched/make_symm.cuh>
#include <raft/random/rng.cuh>
#include "../test_utils.h"
namespace MLCommon {
namespace LinAlg {
namespace Batched {
template <typename T>
struct BatchMakeSymmInputs {
T tolerance;
int n, batchSize;
unsigned long long int seed;
};
template <typename T, typename IdxType = int>
::std::ostream& operator<<(::std::ostream& os, const BatchMakeSymmInputs<T>& dims)
{
return os;
}
template <typename Type>
__global__ void naiveBatchMakeSymmKernel(Type* y, const Type* x, int n)
{
int batch = blockIdx.z;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int col = threadIdx.x + blockDim.x * blockIdx.x;
if (row < n && col < n) {
int idx = batch * n * n + row * n + col;
int other = batch * n * n + col * n + row;
y[idx] = (x[idx] + x[other]) * Type(0.5);
}
}
template <typename Type>
void naiveBatchMakeSymm(Type* y, const Type* x, int batchSize, int n, hipStream_t stream)
{
dim3 blk(16, 16);
int nblks = raft::ceildiv<int>(n, blk.x);
dim3 grid(nblks, nblks, batchSize);
hipLaunchKernelGGL(( naiveBatchMakeSymmKernel<Type>), dim3(grid), dim3(blk), 0, stream, y, x, n);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
class BatchMakeSymmTest : public ::testing::TestWithParam<BatchMakeSymmInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<BatchMakeSymmInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.batchSize * params.n * params.n;
CUDA_CHECK(hipStreamCreate(&stream));
raft::allocate(x, len);
raft::allocate(out_ref, len);
raft::allocate(out, len);
r.uniform(x, len, T(-1.0), T(1.0), stream);
naiveBatchMakeSymm(out_ref, x, params.batchSize, params.n, stream);
make_symm<T, int>(out, x, params.batchSize, params.n, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
}
void TearDown() override
{
CUDA_CHECK(hipFree(x));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipStreamDestroy(stream));
}
protected:
hipStream_t stream;
BatchMakeSymmInputs<T> params;
T *x, *out_ref, *out;
};
const std::vector<BatchMakeSymmInputs<float>> inputsf = {
{0.000001f, 128, 32, 1234ULL},
{0.000001f, 126, 32, 1234ULL},
{0.000001f, 125, 32, 1234ULL},
};
typedef BatchMakeSymmTest<float> BatchMakeSymmTestF;
TEST_P(BatchMakeSymmTestF, Result)
{
int len = params.batchSize * params.n * params.n;
ASSERT_TRUE(devArrMatch(out_ref, out, len, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchMakeSymmTests, BatchMakeSymmTestF, ::testing::ValuesIn(inputsf));
typedef BatchMakeSymmTest<double> BatchMakeSymmTestD;
const std::vector<BatchMakeSymmInputs<double>> inputsd = {
{0.0000001, 128, 32, 1234ULL},
{0.0000001, 126, 32, 1234ULL},
{0.0000001, 125, 32, 1234ULL},
};
TEST_P(BatchMakeSymmTestD, Result)
{
int len = params.batchSize * params.n * params.n;
ASSERT_TRUE(devArrMatch(out_ref, out, len, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchMakeSymmTests, BatchMakeSymmTestD, ::testing::ValuesIn(inputsd));
} // end namespace Batched
} // end namespace LinAlg
} // end namespace MLCommon
| f80c0e99a704556d0b66c9e7d807925719f4c67e.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <test_utils.h>
#include <linalg/batched/make_symm.cuh>
#include <raft/random/rng.cuh>
#include "../test_utils.h"
namespace MLCommon {
namespace LinAlg {
namespace Batched {
template <typename T>
struct BatchMakeSymmInputs {
T tolerance;
int n, batchSize;
unsigned long long int seed;
};
template <typename T, typename IdxType = int>
::std::ostream& operator<<(::std::ostream& os, const BatchMakeSymmInputs<T>& dims)
{
return os;
}
template <typename Type>
__global__ void naiveBatchMakeSymmKernel(Type* y, const Type* x, int n)
{
int batch = blockIdx.z;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int col = threadIdx.x + blockDim.x * blockIdx.x;
if (row < n && col < n) {
int idx = batch * n * n + row * n + col;
int other = batch * n * n + col * n + row;
y[idx] = (x[idx] + x[other]) * Type(0.5);
}
}
template <typename Type>
void naiveBatchMakeSymm(Type* y, const Type* x, int batchSize, int n, cudaStream_t stream)
{
dim3 blk(16, 16);
int nblks = raft::ceildiv<int>(n, blk.x);
dim3 grid(nblks, nblks, batchSize);
naiveBatchMakeSymmKernel<Type><<<grid, blk, 0, stream>>>(y, x, n);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
class BatchMakeSymmTest : public ::testing::TestWithParam<BatchMakeSymmInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<BatchMakeSymmInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.batchSize * params.n * params.n;
CUDA_CHECK(cudaStreamCreate(&stream));
raft::allocate(x, len);
raft::allocate(out_ref, len);
raft::allocate(out, len);
r.uniform(x, len, T(-1.0), T(1.0), stream);
naiveBatchMakeSymm(out_ref, x, params.batchSize, params.n, stream);
make_symm<T, int>(out, x, params.batchSize, params.n, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override
{
CUDA_CHECK(cudaFree(x));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaStreamDestroy(stream));
}
protected:
cudaStream_t stream;
BatchMakeSymmInputs<T> params;
T *x, *out_ref, *out;
};
const std::vector<BatchMakeSymmInputs<float>> inputsf = {
{0.000001f, 128, 32, 1234ULL},
{0.000001f, 126, 32, 1234ULL},
{0.000001f, 125, 32, 1234ULL},
};
typedef BatchMakeSymmTest<float> BatchMakeSymmTestF;
TEST_P(BatchMakeSymmTestF, Result)
{
int len = params.batchSize * params.n * params.n;
ASSERT_TRUE(devArrMatch(out_ref, out, len, raft::CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchMakeSymmTests, BatchMakeSymmTestF, ::testing::ValuesIn(inputsf));
typedef BatchMakeSymmTest<double> BatchMakeSymmTestD;
const std::vector<BatchMakeSymmInputs<double>> inputsd = {
{0.0000001, 128, 32, 1234ULL},
{0.0000001, 126, 32, 1234ULL},
{0.0000001, 125, 32, 1234ULL},
};
TEST_P(BatchMakeSymmTestD, Result)
{
int len = params.batchSize * params.n * params.n;
ASSERT_TRUE(devArrMatch(out_ref, out, len, raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(BatchMakeSymmTests, BatchMakeSymmTestD, ::testing::ValuesIn(inputsd));
} // end namespace Batched
} // end namespace LinAlg
} // end namespace MLCommon
|
12257f973b00d449be509b7e39d6b8c6489db07e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <memory>
#include <cmath>
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "render/utils/color/color_gradient.h"
#include "render/2d/heatmap/set_color.h"
namespace zilliz {
namespace render {
unsigned int
iDivUp(const unsigned int &a, const unsigned int &b) { return (a + b - 1) / b; }
template<typename T>
__global__ void SetCountValue_gpu(float *out,
uint32_t *in_x,
uint32_t *in_y,
T *in_c,
int64_t num,
int64_t width,
int64_t height) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
for (; i < num; i += blockDim.x * gridDim.x) {
uint32_t vertice_x = in_x[i];
uint32_t vertice_y = height - in_y[i] - 1;
if (vertice_y > height || vertice_x > width)
continue;
int64_t index = vertice_y * width + vertice_x;
if (index >= width * height)
continue;
out[index] += in_c[i];
}
}
__global__ void
HeatMapArray_gpu(float *in_count, float *out_count, float *kernel, int64_t kernel_size, int64_t width, int64_t height) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int count_index = y * width + x;
if (in_count[count_index] > 1e-8) {
int r = kernel_size / 2;
for (int m = -r; m <= r; m++) {
if (x + m < 0 || x + m >= width)
continue;
for (int n = -r; n <= r; n++) {
if (y + n < 0 || y + n >= height)
continue;
int kernel_index = (r + n) * (2 * r + 1) + (m + r);
int dev_index = (y + n) * width + (x + m);
out_count[dev_index] += in_count[count_index] * kernel[kernel_index];
}
}
}
}
__global__ void
MeanKernel_gpu(float *img_in, float *img_out, int64_t r, int64_t img_w, int64_t img_h) {
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
double gradient = 0.0;
if (r > 10) r = 10;
int count = 0;
if ((row >= 0) && (row < img_h) && (col >= 0) && (col < img_w)) {
for (int m = -r; m <= r; m++) {
if (row + m < 0 || row + m >= img_h)
continue;
for (int n = -r; n <= r; n++) {
if (col + n < 0 || col + n >= img_w)
continue;
int y = row + m;
int x = col + n;
gradient += img_in[y * img_w + x];
count++;
}
}
img_out[row * img_w + col] = gradient / count;
}
}
template<typename T>
void set_colors_gpu(float *colors,
uint32_t* input_x,
uint32_t* input_y,
T* input_c,
int64_t num,
VegaHeatMap &vega_heat_map) {
WindowParams window_params = vega_heat_map.window_params();
int64_t width = window_params.width();
int64_t height = window_params.height();
int64_t window_size = width * height;
float *pix_count;
uint32_t *in_x, *in_y;
T *in_c;
hipMalloc((void **) &pix_count, window_size * sizeof(float));
hipMalloc((void **) &in_x, num * sizeof(uint32_t));
hipMalloc((void **) &in_y, num * sizeof(uint32_t));
hipMalloc((void **) &in_c, num * sizeof(T));
hipMemset(pix_count, 0, window_size * sizeof(float));
hipMemcpy(in_x, input_x, num * sizeof(uint32_t), hipMemcpyHostToDevice);
hipMemcpy(in_y, input_y, num * sizeof(uint32_t), hipMemcpyHostToDevice);
hipMemcpy(in_c, input_c, num * sizeof(T), hipMemcpyHostToDevice);
SetCountValue_gpu<T> << < 256, 1024 >>
> (pix_count, in_x, in_y, in_c, num, width, height);
double scale = vega_heat_map.map_scale() * 0.4;
int d = pow(2, scale);
int64_t kernel_size = d * 2 + 3;
float *kernel = (float *) malloc(kernel_size * kernel_size * sizeof(float));
guassiankernel2d(kernel, kernel_size, kernel_size, kernel_size, kernel_size);
float *dev_kernel;
hipMalloc((void **) &dev_kernel, kernel_size * kernel_size * sizeof(float));
hipMemcpy(dev_kernel, kernel, kernel_size * kernel_size * sizeof(float), hipMemcpyHostToDevice);
float *dev_count;
hipMalloc((void **) &dev_count, window_size * sizeof(float));
hipMemset(dev_count, 0, window_size * sizeof(float));
const unsigned int blockW = 32;
const unsigned int blockH = 32;
const dim3 threadBlock(blockW, blockH);
const dim3 grid(iDivUp(width, blockW), iDivUp(height, blockH));
HeatMapArray_gpu << < grid, threadBlock >>
> (pix_count, dev_count, dev_kernel, kernel_size, width, height);
float *color_count;
hipMalloc((void **) &color_count, window_size * sizeof(float));
hipMemset(color_count, 0, window_size * sizeof(float));
int64_t mean_radius = (int) (log((kernel_size - 3) / 2) / 0.4);
MeanKernel_gpu << < grid, threadBlock >>
> (dev_count, color_count, mean_radius + 1, width, height);
MeanKernel_gpu << < grid, threadBlock >>
> (color_count, dev_count, mean_radius / 2 + 1, width, height);
auto host_count = (float *) malloc(window_size * sizeof(float));
hipMemcpy(host_count, dev_count, window_size * sizeof(float), hipMemcpyDeviceToHost);
float max_pix = 0;
for (auto k = 0; k < window_size; k++) {
if (max_pix < host_count[k])
max_pix = host_count[k];
}
ColorGradient color_gradient;
color_gradient.createDefaultHeatMapGradient();
int64_t c_offset = 0;
for (auto j = 0; j < window_size; j++) {
float value = host_count[j] / max_pix;
float color_r, color_g, color_b;
color_gradient.getColorAtValue(value, color_r, color_g, color_b);
colors[c_offset++] = color_r;
colors[c_offset++] = color_g;
colors[c_offset++] = color_b;
colors[c_offset++] = value;
}
free(kernel);
free(host_count);
hipFree(pix_count);
hipFree(dev_kernel);
hipFree(dev_count);
hipFree(color_count);
hipFree(in_x);
hipFree(in_y);
hipFree(in_c);
}
} //namespace render
} //namespace
#define TEMPLATE_GEN_PREFIX
#define T int8_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T int16_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T int32_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T int64_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T uint8_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T uint16_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T uint32_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T uint64_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T float
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T double
#include "render/2d/heatmap/set_color.inl"
| 12257f973b00d449be509b7e39d6b8c6489db07e.cu | #include <iostream>
#include <memory>
#include <cmath>
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "render/utils/color/color_gradient.h"
#include "render/2d/heatmap/set_color.h"
namespace zilliz {
namespace render {
unsigned int
iDivUp(const unsigned int &a, const unsigned int &b) { return (a + b - 1) / b; }
template<typename T>
__global__ void SetCountValue_gpu(float *out,
uint32_t *in_x,
uint32_t *in_y,
T *in_c,
int64_t num,
int64_t width,
int64_t height) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
for (; i < num; i += blockDim.x * gridDim.x) {
uint32_t vertice_x = in_x[i];
uint32_t vertice_y = height - in_y[i] - 1;
if (vertice_y > height || vertice_x > width)
continue;
int64_t index = vertice_y * width + vertice_x;
if (index >= width * height)
continue;
out[index] += in_c[i];
}
}
__global__ void
HeatMapArray_gpu(float *in_count, float *out_count, float *kernel, int64_t kernel_size, int64_t width, int64_t height) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int count_index = y * width + x;
if (in_count[count_index] > 1e-8) {
int r = kernel_size / 2;
for (int m = -r; m <= r; m++) {
if (x + m < 0 || x + m >= width)
continue;
for (int n = -r; n <= r; n++) {
if (y + n < 0 || y + n >= height)
continue;
int kernel_index = (r + n) * (2 * r + 1) + (m + r);
int dev_index = (y + n) * width + (x + m);
out_count[dev_index] += in_count[count_index] * kernel[kernel_index];
}
}
}
}
__global__ void
MeanKernel_gpu(float *img_in, float *img_out, int64_t r, int64_t img_w, int64_t img_h) {
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
double gradient = 0.0;
if (r > 10) r = 10;
int count = 0;
if ((row >= 0) && (row < img_h) && (col >= 0) && (col < img_w)) {
for (int m = -r; m <= r; m++) {
if (row + m < 0 || row + m >= img_h)
continue;
for (int n = -r; n <= r; n++) {
if (col + n < 0 || col + n >= img_w)
continue;
int y = row + m;
int x = col + n;
gradient += img_in[y * img_w + x];
count++;
}
}
img_out[row * img_w + col] = gradient / count;
}
}
template<typename T>
void set_colors_gpu(float *colors,
uint32_t* input_x,
uint32_t* input_y,
T* input_c,
int64_t num,
VegaHeatMap &vega_heat_map) {
WindowParams window_params = vega_heat_map.window_params();
int64_t width = window_params.width();
int64_t height = window_params.height();
int64_t window_size = width * height;
float *pix_count;
uint32_t *in_x, *in_y;
T *in_c;
cudaMalloc((void **) &pix_count, window_size * sizeof(float));
cudaMalloc((void **) &in_x, num * sizeof(uint32_t));
cudaMalloc((void **) &in_y, num * sizeof(uint32_t));
cudaMalloc((void **) &in_c, num * sizeof(T));
cudaMemset(pix_count, 0, window_size * sizeof(float));
cudaMemcpy(in_x, input_x, num * sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMemcpy(in_y, input_y, num * sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaMemcpy(in_c, input_c, num * sizeof(T), cudaMemcpyHostToDevice);
SetCountValue_gpu<T> << < 256, 1024 >>
> (pix_count, in_x, in_y, in_c, num, width, height);
double scale = vega_heat_map.map_scale() * 0.4;
int d = pow(2, scale);
int64_t kernel_size = d * 2 + 3;
float *kernel = (float *) malloc(kernel_size * kernel_size * sizeof(float));
guassiankernel2d(kernel, kernel_size, kernel_size, kernel_size, kernel_size);
float *dev_kernel;
cudaMalloc((void **) &dev_kernel, kernel_size * kernel_size * sizeof(float));
cudaMemcpy(dev_kernel, kernel, kernel_size * kernel_size * sizeof(float), cudaMemcpyHostToDevice);
float *dev_count;
cudaMalloc((void **) &dev_count, window_size * sizeof(float));
cudaMemset(dev_count, 0, window_size * sizeof(float));
const unsigned int blockW = 32;
const unsigned int blockH = 32;
const dim3 threadBlock(blockW, blockH);
const dim3 grid(iDivUp(width, blockW), iDivUp(height, blockH));
HeatMapArray_gpu << < grid, threadBlock >>
> (pix_count, dev_count, dev_kernel, kernel_size, width, height);
float *color_count;
cudaMalloc((void **) &color_count, window_size * sizeof(float));
cudaMemset(color_count, 0, window_size * sizeof(float));
int64_t mean_radius = (int) (log((kernel_size - 3) / 2) / 0.4);
MeanKernel_gpu << < grid, threadBlock >>
> (dev_count, color_count, mean_radius + 1, width, height);
MeanKernel_gpu << < grid, threadBlock >>
> (color_count, dev_count, mean_radius / 2 + 1, width, height);
auto host_count = (float *) malloc(window_size * sizeof(float));
cudaMemcpy(host_count, dev_count, window_size * sizeof(float), cudaMemcpyDeviceToHost);
float max_pix = 0;
for (auto k = 0; k < window_size; k++) {
if (max_pix < host_count[k])
max_pix = host_count[k];
}
ColorGradient color_gradient;
color_gradient.createDefaultHeatMapGradient();
int64_t c_offset = 0;
for (auto j = 0; j < window_size; j++) {
float value = host_count[j] / max_pix;
float color_r, color_g, color_b;
color_gradient.getColorAtValue(value, color_r, color_g, color_b);
colors[c_offset++] = color_r;
colors[c_offset++] = color_g;
colors[c_offset++] = color_b;
colors[c_offset++] = value;
}
free(kernel);
free(host_count);
cudaFree(pix_count);
cudaFree(dev_kernel);
cudaFree(dev_count);
cudaFree(color_count);
cudaFree(in_x);
cudaFree(in_y);
cudaFree(in_c);
}
} //namespace render
} //namespace
#define TEMPLATE_GEN_PREFIX
#define T int8_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T int16_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T int32_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T int64_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T uint8_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T uint16_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T uint32_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T uint64_t
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T float
#include "render/2d/heatmap/set_color.inl"
#define TEMPLATE_GEN_PREFIX
#define T double
#include "render/2d/heatmap/set_color.inl"
|
c349b578cd9c98ab65ffdc295bd4d3398358ace6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @brief
* array_ops
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <vector>
#include "k2/csrc/array.h"
#include "k2/csrc/array_ops.h"
#include "k2/csrc/macros.h"
namespace k2 {
// See documentation in header of what this is supposed to do.
// This is similar to the template Append() defined in ops_inl.h,
// but with changes largely about adding `data_offsets`, and
// subtracting one from the dims of all but the last array.
Array1<int32_t> SpliceRowSplits(int32_t num_arrays,
const Array1<int32_t> **src) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_arrays, 0);
ContextPtr &c = src[0]->Context();
// row_splits_vec is the exclusive-sum of the modified dimensions of
// the arrays in `src`. `Modified` means: is subtracted from the dims
// of all but the last array.
std::vector<int32_t> row_splits_vec(num_arrays + 1);
int32_t sum = 0, max_dim = 0;
row_splits_vec[0] = sum;
// `last_elem_ptrs_vec` contains, for each of the arrays in `num_array`, a
// pointer to the last element in that array.
std::vector<const int32_t *> last_elem_ptrs_vec(num_arrays);
for (int32_t i = 0; i < num_arrays; i++) {
K2_CHECK_GE(src[i]->Dim(), 1);
int32_t dim = src[i]->Dim() - (i + 1 < num_arrays ? 1 : 0);
if (dim > max_dim) max_dim = dim;
sum += dim;
row_splits_vec[i + 1] = sum;
last_elem_ptrs_vec[i] = src[i]->Data() + src[i]->Dim() - 1;
}
int32_t ans_size = sum;
Array1<int32_t> ans(c, ans_size);
int32_t *ans_data = ans.Data();
Array1<const int32_t *> last_elems_ptrs(c, last_elem_ptrs_vec);
Array1<int32_t> data_offsets(c, num_arrays);
// note as data_offsets.Dim() == last_elem_ptrs.Dim(), so the last element of
// last_elem_ptrs.Dim() will not be summed to data_offsets, it's OK as we
// don't need that value since we would not drop the last element of the last
// array.
ExclusiveSumDeref(last_elems_ptrs, &data_offsets);
int32_t *data_offsets_data = data_offsets.Data();
if (c->GetDeviceType() == kCpu) {
// a simple loop is faster, although the other branches should still work on
// CPU.
for (int32_t i = 0; i < num_arrays; i++) {
int32_t this_dim = src[i]->Dim();
const int32_t *this_src_data = src[i]->Data();
int32_t data_offset = data_offsets_data[i];
for (int32_t j = 0; j < this_dim; j++) {
ans_data[j] = this_src_data[j] + data_offset;
}
// notice `this_dim - 1` here, it means we will overwrite the copy of last
// element of src[i] when copying elements in src[i+1] in the next
// for-loop, it generates the same result with dropping the last element
// of src[i] as last-elment-of-src[i] == src[i+1]->Data()[0] (equals 0) +
// data_offsets_data[i+1].
ans_data += this_dim - 1;
}
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
Array1<int32_t> row_splits(c, row_splits_vec);
const int32_t *row_splits_data = row_splits.Data();
std::vector<const int32_t *> src_ptrs_vec(num_arrays);
for (int32_t i = 0; i < num_arrays; i++) src_ptrs_vec[i] = src[i]->Data();
Array1<const int32_t *> src_ptrs(c, src_ptrs_vec);
const int32_t **src_ptrs_data = src_ptrs.Data();
int32_t avg_input_size = ans_size / num_arrays;
if (max_dim < 2 * avg_input_size + 512) {
// here, 2 is a heuristic factor. We're saying, "if the max length of any
// of the source arrays is not too much larger than the average length of
// the source arrays." The `+ 512` is an additional heuristic factor, as
// we care less about launching too many GPU threads if the number of
// elements being processed is small. What we're saying is that the
// arrays' sizes are fairly balanced, so we launch with a simple
// rectangular kernel.
K2_EVAL2(
c, num_arrays, max_dim, lambda_set_data,
(int32_t i, int32_t j)->void {
int32_t row_start = row_splits_data[i],
row_end = row_splits_data[i + 1];
const int32_t *src_ptr = src_ptrs_data[i];
// not we have dropped the last element of src[i] in
// row_splits_data, so here it will not be copied.
if (j < row_end - row_start) {
ans_data[row_start + j] = src_ptr[j] + data_offsets_data[i];
}
});
} else {
int32_t block_dim = 256;
while (block_dim * 4 < avg_input_size && block_dim < 8192) block_dim *= 2;
// `index_map` will map from 'new index' to 'old index', with 0 <=
// old_index < num_arrays... we handle each source array with multiple
// blocks.
// The elements of `index_map` will be of the form:
// old_index + (block_of_this_array << 32).
// where `old_index` is an index into `src` and `block_of_this_array`
// tells us which block it is, as in 0, 1, 2, 3...
// there won't be very many blocks, so it's not a problem to enumerate
// them on CPU.
std::vector<uint64_t> index_map;
index_map.reserve((2 * ans_size) / block_dim);
for (int32_t i = 0; i < num_arrays; i++) {
int32_t this_array_size = src[i]->Dim();
int32_t this_num_blocks = NumBlocks(this_array_size, block_dim);
for (int32_t j = 0; j < this_num_blocks; j++) {
index_map.push_back((static_cast<uint64_t>(j) << 32) +
static_cast<uint64_t>(i));
}
}
Array1<uint64_t> index_map_gpu(c, index_map);
const uint64_t *index_map_data = index_map_gpu.Data();
K2_EVAL2(
c, index_map_gpu.Dim(), block_dim, lambda_set_data_blocks,
(int32_t i, int32_t j) {
uint64_t index = index_map_data[i];
uint32_t orig_i = static_cast<uint32_t>(index),
block_index = static_cast<uint32_t>(index >> 32);
int32_t row_start = row_splits_data[orig_i],
row_end = row_splits_data[orig_i + 1],
orig_j = (block_index * block_dim) + j;
const int32_t *src_ptr = src_ptrs_data[orig_i];
if (orig_j < row_end - row_start) {
ans_data[row_start + orig_j] =
src_ptr[orig_j] + data_offsets_data[orig_i];
}
});
}
}
return ans;
}
bool ValidateRowIds(const Array1<int32_t> &row_ids,
Array1<int32_t> *temp /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &ctx = row_ids.Context();
const int32_t *data = row_ids.Data();
int32_t dim = row_ids.Dim();
if (dim == 0) return true; // will treat this as valid
// note `row_ids[0]` may copy memory from device to host
if (row_ids[0] < 0) return false;
Array1<int32_t> temp_array;
if (temp == nullptr || temp->Dim() == 0) {
temp_array = Array1<int32_t>(ctx, 1);
} else {
K2_CHECK(IsCompatible(row_ids, *temp));
temp_array = temp->Range(0, 1);
}
temp = &temp_array;
*temp = 0;
int32_t *temp_data = temp->Data();
// Note: we know that dim >= 1 as we would have returned above if dim == 0.
// This will do nothing if (dim-1) == 0 as we have checked the first element.
K2_EVAL(
ctx, dim - 1, lambda_check_row_ids, (int32_t i)->void {
if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad.
});
return (*temp)[0] == 0;
}
bool ValidateRowSplits(const Array1<int32_t> &row_splits,
Array1<int32_t> *temp /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &ctx = row_splits.Context();
const int32_t *data = row_splits.Data();
int32_t dim = row_splits.Dim();
// must have at least one element and row_splits[0] == 0
if (dim == 0 || row_splits[0] != 0) return false;
Array1<int32_t> temp_array;
if (temp == nullptr || temp->Dim() == 0) {
temp_array = Array1<int32_t>(ctx, 1);
} else {
K2_CHECK(IsCompatible(row_splits, *temp));
temp_array = temp->Range(0, 1);
}
temp = &temp_array;
*temp = 0;
int32_t *temp_data = temp->Data();
// Note: we know that dim >= 1 as we would have returned above if dim == 0.
// This will do nothing if (dim-1) == 0 as we have checked the first element.
K2_EVAL(
ctx, dim - 1, lambda_check_row_splits, (int32_t i)->void {
if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad.
});
return (*temp)[0] == 0;
}
bool ValidateRowSplitsAndIds(const Array1<int32_t> &row_splits,
const Array1<int32_t> &row_ids,
Array1<int32_t> *temp /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
// Check if their context are compatible or not while getting
ContextPtr ctx = GetContext(row_splits, row_ids);
int32_t num_rows = row_splits.Dim() - 1, num_elems = row_ids.Dim();
if (num_rows < 0 || (num_rows == 0 && num_elems > 0)) return false;
if (row_splits[0] != 0 || row_ids[0] < 0) return false;
if (num_elems != row_splits[num_rows]) return false;
const int32_t *row_ids_data = row_ids.Data(),
*row_splits_data = row_splits.Data();
Array1<int32_t> temp_array;
if (temp == nullptr || temp->Dim() == 0) {
temp_array = Array1<int32_t>(ctx, 1);
} else {
K2_CHECK(ctx->IsCompatible(*temp->Context()));
temp_array = temp->Range(0, 1);
}
temp = &temp_array;
*temp = 0;
int32_t *temp_data = temp_array.Data();
K2_EVAL(
ctx, ::max(num_elems, num_rows), lambda_check_row_ids,
(int32_t i)->void {
// check row_splits
bool invalid_splits =
(i < num_rows && row_splits_data[i] > row_splits_data[i + 1]);
// check row_ids
bool invalid_ids =
(i < (num_elems - 1) && row_ids_data[i] > row_ids_data[i + 1]);
if (invalid_splits || invalid_ids) *temp_data = 1;
// check if row_splits and row_ids agree with each other
if (i < num_elems) {
int32_t this_row = row_ids_data[i];
if (this_row < 0 || this_row >= num_rows ||
i < row_splits_data[this_row] ||
i >= row_splits_data[this_row + 1])
*temp_data = 1;
}
});
return (*temp)[0] == 0;
}
void RowSplitsToRowIds(const Array1<int32_t> &row_splits,
Array1<int32_t> *row_ids) {
NVTX_RANGE(K2_FUNC);
ContextPtr c = GetContext(row_splits, *row_ids);
int32_t num_elems = row_ids->Dim(), num_rows = row_splits.Dim() - 1;
K2_CHECK_GE(num_rows, 0);
// if there are more than zero elems, there must be at least one row.
K2_CHECK(num_elems == 0 || num_rows > 0);
K2_CHECK_EQ(num_elems, row_splits[num_rows]);
RowSplitsToRowIds(c, num_rows, row_splits.Data(), num_elems, row_ids->Data());
}
void RowIdsToRowSplits(const Array1<int32_t> &row_ids,
Array1<int32_t> *row_splits) {
NVTX_RANGE(K2_FUNC);
ContextPtr c = GetContext(*row_splits, row_ids);
int32_t num_elems = row_ids.Dim(), num_rows = row_splits->Dim() - 1;
K2_CHECK_GE(num_rows, 0);
// if there are more than zero elems, there must be at least one row.
K2_CHECK(num_elems == 0 || num_rows > 0);
if (num_elems > 0) K2_CHECK_GT(num_rows, row_ids[num_elems - 1]);
RowIdsToRowSplits(c, num_elems, row_ids.Data(), false, num_rows,
row_splits->Data());
}
Array1<int32_t> GetCounts(const Array1<int32_t> &src, int32_t n) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(n, 0);
ContextPtr &c = src.Context();
int32_t dim = src.Dim();
const int32_t *src_data = src.Data();
Array1<int32_t> ans(c, n, 0); // init with 0
int32_t *ans_data = ans.Data();
if (n == 0) {
K2_CHECK_EQ(dim, 0);
return ans;
}
DeviceType d = c->GetDeviceType();
if (d == kCpu) {
for (int32_t i = 0; i < dim; ++i) {
++ans_data[src_data[i]];
}
} else {
K2_CHECK_EQ(d, kCuda);
std::size_t temp_storage_bytes = 0;
K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven(
nullptr, temp_storage_bytes, src_data, ans_data, n + 1, 0, n, dim,
c->GetCudaStream())); // The first time is to determine temporary
// device storage requirements.
Array1<int8_t> d_temp_storage(c, temp_storage_bytes);
K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven(
d_temp_storage.Data(), temp_storage_bytes, src_data, ans_data, n + 1, 0,
n, dim, c->GetCudaStream()));
}
return ans;
}
Array1<int32_t> InvertMonotonicDecreasing(const Array1<int32_t> &src) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
int32_t src_dim = src.Dim();
const int32_t *src_data = src.Data();
if (src_dim == 0) {
return Array1<int32_t>(c, 0);
}
K2_DCHECK_GT(src.Back(), 0); // just call Back when debugging
// note `src[0]` may do a DeviceToHost memory copy
int32_t ans_dim = src[0];
Array1<int32_t> ans(c, ans_dim, 0); // init with 0
int32_t *ans_data = ans.Data();
K2_EVAL(
c, src_dim, lambda_set_values, (int32_t i)->void {
K2_DCHECK((i + 1 == src_dim || src_data[i + 1] <= src_data[i]));
if (i + 1 == src_dim || src_data[i + 1] < src_data[i])
ans_data[src_data[i] - 1] = i + 1;
});
MonotonicDecreasingUpperBound(ans, &ans);
return ans;
}
Array1<int32_t> InvertPermutation(const Array1<int32_t> &src) {
ContextPtr &c = src.Context();
int32_t dim = src.Dim();
Array1<int32_t> ans(c, dim);
const int32_t *src_data = src.Data();
int32_t *ans_data = ans.Data();
K2_EVAL(
c, dim, lambda_set_ans, (int32_t i)->void { ans_data[src_data[i]] = i; });
return ans;
}
Array1<int32_t> RowSplitsToSizes(const Array1<int32_t> &row_splits) {
K2_CHECK_GT(row_splits.Dim(), 0);
ContextPtr c = row_splits.Context();
int32_t num_rows = row_splits.Dim() - 1;
Array1<int32_t> sizes(c, num_rows);
const int32_t *row_splits_data = row_splits.Data();
int32_t *sizes_data = sizes.Data();
K2_EVAL(
c, num_rows, lambda_set_sizes, (int32_t i)->void {
sizes_data[i] = row_splits_data[i + 1] - row_splits_data[i];
});
return sizes;
}
// This is modified from RowSplitsToRowIdsKernel.
// When we invoke this we make a big enough grid that there doesn't have to
// be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >=
// num_rows
__global__ void SizesToMergeMapKernel(int32_t num_rows,
int32_t threads_per_row,
const int32_t *row_splits,
int32_t num_elems,
uint32_t *merge_map) {
int32_t thread = blockIdx.x * blockDim.x + threadIdx.x,
num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row,
thread_this_row = thread % threads_per_row;
if (row >= num_rows) return;
K2_CHECK_GE(num_threads / threads_per_row, num_rows);
int32_t this_row_split = row_splits[row],
next_row_split = row_splits[row + 1],
row_length = next_row_split - this_row_split;
#pragma unroll(4)
for (; thread_this_row < row_length; thread_this_row += threads_per_row)
merge_map[this_row_split + thread_this_row] =
uint32_t(row) + uint32_t(num_rows) * uint32_t(thread_this_row);
}
Array1<uint32_t> SizesToMergeMap(ContextPtr c,
const std::vector<int32_t> &sizes) {
int32_t num_srcs = sizes.size();
ContextPtr cpu_context = GetCpuContext();
Array1<int32_t> row_splits_cpu(cpu_context, num_srcs + 1);
int32_t *row_splits_cpu_data = row_splits_cpu.Data();
int32_t tot_size = 0;
row_splits_cpu_data[0] = 0;
for (int32_t i = 0; i < num_srcs; i++) {
tot_size += sizes[i];
row_splits_cpu_data[i+1] = tot_size;
}
Array1<uint32_t> ans(c, tot_size);
if (c->GetDeviceType() == kCpu) {
uint32_t *ans_data = ans.Data();
int32_t cur = 0;
for (int32_t src = 0; src < num_srcs; src++) {
int32_t begin = cur, // i.e. the previous end.
end = row_splits_cpu_data[src+1];
for (; cur != end; ++cur) {
// the 'src' says which source this item came from, and (cur - begin)
// is the position within that source.
ans_data[cur] =
uint32_t(src) + uint32_t(cur - begin) * uint32_t(num_srcs);
}
}
return ans;
}
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
Array1<int32_t> row_splits = row_splits_cpu.To(c);
int32_t *row_splits_data = row_splits.Data();
uint32_t *merge_map_data = ans.Data();
int32_t avg_elems_per_row = (tot_size + num_srcs - 1) / num_srcs,
threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row),
tot_threads = num_srcs * threads_per_row;
int32_t block_size = 256;
int32_t grid_size = NumBlocks(tot_threads, block_size);
hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(SizesToMergeMapKernel), dim3(grid_size), dim3(block_size), 0,
c->GetCudaStream(),
num_srcs, threads_per_row, row_splits_data,
tot_size, merge_map_data));
return ans;
}
} // namespace k2
| c349b578cd9c98ab65ffdc295bd4d3398358ace6.cu | /**
* @brief
* array_ops
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <vector>
#include "k2/csrc/array.h"
#include "k2/csrc/array_ops.h"
#include "k2/csrc/macros.h"
namespace k2 {
// See documentation in header of what this is supposed to do.
// This is similar to the template Append() defined in ops_inl.h,
// but with changes largely about adding `data_offsets`, and
// subtracting one from the dims of all but the last array.
Array1<int32_t> SpliceRowSplits(int32_t num_arrays,
const Array1<int32_t> **src) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_arrays, 0);
ContextPtr &c = src[0]->Context();
// row_splits_vec is the exclusive-sum of the modified dimensions of
// the arrays in `src`. `Modified` means: is subtracted from the dims
// of all but the last array.
std::vector<int32_t> row_splits_vec(num_arrays + 1);
int32_t sum = 0, max_dim = 0;
row_splits_vec[0] = sum;
// `last_elem_ptrs_vec` contains, for each of the arrays in `num_array`, a
// pointer to the last element in that array.
std::vector<const int32_t *> last_elem_ptrs_vec(num_arrays);
for (int32_t i = 0; i < num_arrays; i++) {
K2_CHECK_GE(src[i]->Dim(), 1);
int32_t dim = src[i]->Dim() - (i + 1 < num_arrays ? 1 : 0);
if (dim > max_dim) max_dim = dim;
sum += dim;
row_splits_vec[i + 1] = sum;
last_elem_ptrs_vec[i] = src[i]->Data() + src[i]->Dim() - 1;
}
int32_t ans_size = sum;
Array1<int32_t> ans(c, ans_size);
int32_t *ans_data = ans.Data();
Array1<const int32_t *> last_elems_ptrs(c, last_elem_ptrs_vec);
Array1<int32_t> data_offsets(c, num_arrays);
// note as data_offsets.Dim() == last_elem_ptrs.Dim(), so the last element of
// last_elem_ptrs.Dim() will not be summed to data_offsets, it's OK as we
// don't need that value since we would not drop the last element of the last
// array.
ExclusiveSumDeref(last_elems_ptrs, &data_offsets);
int32_t *data_offsets_data = data_offsets.Data();
if (c->GetDeviceType() == kCpu) {
// a simple loop is faster, although the other branches should still work on
// CPU.
for (int32_t i = 0; i < num_arrays; i++) {
int32_t this_dim = src[i]->Dim();
const int32_t *this_src_data = src[i]->Data();
int32_t data_offset = data_offsets_data[i];
for (int32_t j = 0; j < this_dim; j++) {
ans_data[j] = this_src_data[j] + data_offset;
}
// notice `this_dim - 1` here, it means we will overwrite the copy of last
// element of src[i] when copying elements in src[i+1] in the next
// for-loop, it generates the same result with dropping the last element
// of src[i] as last-elment-of-src[i] == src[i+1]->Data()[0] (equals 0) +
// data_offsets_data[i+1].
ans_data += this_dim - 1;
}
} else {
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
Array1<int32_t> row_splits(c, row_splits_vec);
const int32_t *row_splits_data = row_splits.Data();
std::vector<const int32_t *> src_ptrs_vec(num_arrays);
for (int32_t i = 0; i < num_arrays; i++) src_ptrs_vec[i] = src[i]->Data();
Array1<const int32_t *> src_ptrs(c, src_ptrs_vec);
const int32_t **src_ptrs_data = src_ptrs.Data();
int32_t avg_input_size = ans_size / num_arrays;
if (max_dim < 2 * avg_input_size + 512) {
// here, 2 is a heuristic factor. We're saying, "if the max length of any
// of the source arrays is not too much larger than the average length of
// the source arrays." The `+ 512` is an additional heuristic factor, as
// we care less about launching too many GPU threads if the number of
// elements being processed is small. What we're saying is that the
// arrays' sizes are fairly balanced, so we launch with a simple
// rectangular kernel.
K2_EVAL2(
c, num_arrays, max_dim, lambda_set_data,
(int32_t i, int32_t j)->void {
int32_t row_start = row_splits_data[i],
row_end = row_splits_data[i + 1];
const int32_t *src_ptr = src_ptrs_data[i];
// not we have dropped the last element of src[i] in
// row_splits_data, so here it will not be copied.
if (j < row_end - row_start) {
ans_data[row_start + j] = src_ptr[j] + data_offsets_data[i];
}
});
} else {
int32_t block_dim = 256;
while (block_dim * 4 < avg_input_size && block_dim < 8192) block_dim *= 2;
// `index_map` will map from 'new index' to 'old index', with 0 <=
// old_index < num_arrays... we handle each source array with multiple
// blocks.
// The elements of `index_map` will be of the form:
// old_index + (block_of_this_array << 32).
// where `old_index` is an index into `src` and `block_of_this_array`
// tells us which block it is, as in 0, 1, 2, 3...
// there won't be very many blocks, so it's not a problem to enumerate
// them on CPU.
std::vector<uint64_t> index_map;
index_map.reserve((2 * ans_size) / block_dim);
for (int32_t i = 0; i < num_arrays; i++) {
int32_t this_array_size = src[i]->Dim();
int32_t this_num_blocks = NumBlocks(this_array_size, block_dim);
for (int32_t j = 0; j < this_num_blocks; j++) {
index_map.push_back((static_cast<uint64_t>(j) << 32) +
static_cast<uint64_t>(i));
}
}
Array1<uint64_t> index_map_gpu(c, index_map);
const uint64_t *index_map_data = index_map_gpu.Data();
K2_EVAL2(
c, index_map_gpu.Dim(), block_dim, lambda_set_data_blocks,
(int32_t i, int32_t j) {
uint64_t index = index_map_data[i];
uint32_t orig_i = static_cast<uint32_t>(index),
block_index = static_cast<uint32_t>(index >> 32);
int32_t row_start = row_splits_data[orig_i],
row_end = row_splits_data[orig_i + 1],
orig_j = (block_index * block_dim) + j;
const int32_t *src_ptr = src_ptrs_data[orig_i];
if (orig_j < row_end - row_start) {
ans_data[row_start + orig_j] =
src_ptr[orig_j] + data_offsets_data[orig_i];
}
});
}
}
return ans;
}
bool ValidateRowIds(const Array1<int32_t> &row_ids,
Array1<int32_t> *temp /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &ctx = row_ids.Context();
const int32_t *data = row_ids.Data();
int32_t dim = row_ids.Dim();
if (dim == 0) return true; // will treat this as valid
// note `row_ids[0]` may copy memory from device to host
if (row_ids[0] < 0) return false;
Array1<int32_t> temp_array;
if (temp == nullptr || temp->Dim() == 0) {
temp_array = Array1<int32_t>(ctx, 1);
} else {
K2_CHECK(IsCompatible(row_ids, *temp));
temp_array = temp->Range(0, 1);
}
temp = &temp_array;
*temp = 0;
int32_t *temp_data = temp->Data();
// Note: we know that dim >= 1 as we would have returned above if dim == 0.
// This will do nothing if (dim-1) == 0 as we have checked the first element.
K2_EVAL(
ctx, dim - 1, lambda_check_row_ids, (int32_t i)->void {
if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad.
});
return (*temp)[0] == 0;
}
bool ValidateRowSplits(const Array1<int32_t> &row_splits,
Array1<int32_t> *temp /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &ctx = row_splits.Context();
const int32_t *data = row_splits.Data();
int32_t dim = row_splits.Dim();
// must have at least one element and row_splits[0] == 0
if (dim == 0 || row_splits[0] != 0) return false;
Array1<int32_t> temp_array;
if (temp == nullptr || temp->Dim() == 0) {
temp_array = Array1<int32_t>(ctx, 1);
} else {
K2_CHECK(IsCompatible(row_splits, *temp));
temp_array = temp->Range(0, 1);
}
temp = &temp_array;
*temp = 0;
int32_t *temp_data = temp->Data();
// Note: we know that dim >= 1 as we would have returned above if dim == 0.
// This will do nothing if (dim-1) == 0 as we have checked the first element.
K2_EVAL(
ctx, dim - 1, lambda_check_row_splits, (int32_t i)->void {
if (data[i] > data[i + 1]) *temp_data = 1; // means it's bad.
});
return (*temp)[0] == 0;
}
bool ValidateRowSplitsAndIds(const Array1<int32_t> &row_splits,
const Array1<int32_t> &row_ids,
Array1<int32_t> *temp /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
// Check if their context are compatible or not while getting
ContextPtr ctx = GetContext(row_splits, row_ids);
int32_t num_rows = row_splits.Dim() - 1, num_elems = row_ids.Dim();
if (num_rows < 0 || (num_rows == 0 && num_elems > 0)) return false;
if (row_splits[0] != 0 || row_ids[0] < 0) return false;
if (num_elems != row_splits[num_rows]) return false;
const int32_t *row_ids_data = row_ids.Data(),
*row_splits_data = row_splits.Data();
Array1<int32_t> temp_array;
if (temp == nullptr || temp->Dim() == 0) {
temp_array = Array1<int32_t>(ctx, 1);
} else {
K2_CHECK(ctx->IsCompatible(*temp->Context()));
temp_array = temp->Range(0, 1);
}
temp = &temp_array;
*temp = 0;
int32_t *temp_data = temp_array.Data();
K2_EVAL(
ctx, std::max(num_elems, num_rows), lambda_check_row_ids,
(int32_t i)->void {
// check row_splits
bool invalid_splits =
(i < num_rows && row_splits_data[i] > row_splits_data[i + 1]);
// check row_ids
bool invalid_ids =
(i < (num_elems - 1) && row_ids_data[i] > row_ids_data[i + 1]);
if (invalid_splits || invalid_ids) *temp_data = 1;
// check if row_splits and row_ids agree with each other
if (i < num_elems) {
int32_t this_row = row_ids_data[i];
if (this_row < 0 || this_row >= num_rows ||
i < row_splits_data[this_row] ||
i >= row_splits_data[this_row + 1])
*temp_data = 1;
}
});
return (*temp)[0] == 0;
}
void RowSplitsToRowIds(const Array1<int32_t> &row_splits,
Array1<int32_t> *row_ids) {
NVTX_RANGE(K2_FUNC);
ContextPtr c = GetContext(row_splits, *row_ids);
int32_t num_elems = row_ids->Dim(), num_rows = row_splits.Dim() - 1;
K2_CHECK_GE(num_rows, 0);
// if there are more than zero elems, there must be at least one row.
K2_CHECK(num_elems == 0 || num_rows > 0);
K2_CHECK_EQ(num_elems, row_splits[num_rows]);
RowSplitsToRowIds(c, num_rows, row_splits.Data(), num_elems, row_ids->Data());
}
void RowIdsToRowSplits(const Array1<int32_t> &row_ids,
Array1<int32_t> *row_splits) {
NVTX_RANGE(K2_FUNC);
ContextPtr c = GetContext(*row_splits, row_ids);
int32_t num_elems = row_ids.Dim(), num_rows = row_splits->Dim() - 1;
K2_CHECK_GE(num_rows, 0);
// if there are more than zero elems, there must be at least one row.
K2_CHECK(num_elems == 0 || num_rows > 0);
if (num_elems > 0) K2_CHECK_GT(num_rows, row_ids[num_elems - 1]);
RowIdsToRowSplits(c, num_elems, row_ids.Data(), false, num_rows,
row_splits->Data());
}
Array1<int32_t> GetCounts(const Array1<int32_t> &src, int32_t n) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(n, 0);
ContextPtr &c = src.Context();
int32_t dim = src.Dim();
const int32_t *src_data = src.Data();
Array1<int32_t> ans(c, n, 0); // init with 0
int32_t *ans_data = ans.Data();
if (n == 0) {
K2_CHECK_EQ(dim, 0);
return ans;
}
DeviceType d = c->GetDeviceType();
if (d == kCpu) {
for (int32_t i = 0; i < dim; ++i) {
++ans_data[src_data[i]];
}
} else {
K2_CHECK_EQ(d, kCuda);
std::size_t temp_storage_bytes = 0;
K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven(
nullptr, temp_storage_bytes, src_data, ans_data, n + 1, 0, n, dim,
c->GetCudaStream())); // The first time is to determine temporary
// device storage requirements.
Array1<int8_t> d_temp_storage(c, temp_storage_bytes);
K2_CHECK_CUDA_ERROR(cub::DeviceHistogram::HistogramEven(
d_temp_storage.Data(), temp_storage_bytes, src_data, ans_data, n + 1, 0,
n, dim, c->GetCudaStream()));
}
return ans;
}
Array1<int32_t> InvertMonotonicDecreasing(const Array1<int32_t> &src) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
int32_t src_dim = src.Dim();
const int32_t *src_data = src.Data();
if (src_dim == 0) {
return Array1<int32_t>(c, 0);
}
K2_DCHECK_GT(src.Back(), 0); // just call Back when debugging
// note `src[0]` may do a DeviceToHost memory copy
int32_t ans_dim = src[0];
Array1<int32_t> ans(c, ans_dim, 0); // init with 0
int32_t *ans_data = ans.Data();
K2_EVAL(
c, src_dim, lambda_set_values, (int32_t i)->void {
K2_DCHECK((i + 1 == src_dim || src_data[i + 1] <= src_data[i]));
if (i + 1 == src_dim || src_data[i + 1] < src_data[i])
ans_data[src_data[i] - 1] = i + 1;
});
MonotonicDecreasingUpperBound(ans, &ans);
return ans;
}
Array1<int32_t> InvertPermutation(const Array1<int32_t> &src) {
ContextPtr &c = src.Context();
int32_t dim = src.Dim();
Array1<int32_t> ans(c, dim);
const int32_t *src_data = src.Data();
int32_t *ans_data = ans.Data();
K2_EVAL(
c, dim, lambda_set_ans, (int32_t i)->void { ans_data[src_data[i]] = i; });
return ans;
}
Array1<int32_t> RowSplitsToSizes(const Array1<int32_t> &row_splits) {
K2_CHECK_GT(row_splits.Dim(), 0);
ContextPtr c = row_splits.Context();
int32_t num_rows = row_splits.Dim() - 1;
Array1<int32_t> sizes(c, num_rows);
const int32_t *row_splits_data = row_splits.Data();
int32_t *sizes_data = sizes.Data();
K2_EVAL(
c, num_rows, lambda_set_sizes, (int32_t i)->void {
sizes_data[i] = row_splits_data[i + 1] - row_splits_data[i];
});
return sizes;
}
// This is modified from RowSplitsToRowIdsKernel.
// When we invoke this we make a big enough grid that there doesn't have to
// be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >=
// num_rows
__global__ void SizesToMergeMapKernel(int32_t num_rows,
int32_t threads_per_row,
const int32_t *row_splits,
int32_t num_elems,
uint32_t *merge_map) {
int32_t thread = blockIdx.x * blockDim.x + threadIdx.x,
num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row,
thread_this_row = thread % threads_per_row;
if (row >= num_rows) return;
K2_CHECK_GE(num_threads / threads_per_row, num_rows);
int32_t this_row_split = row_splits[row],
next_row_split = row_splits[row + 1],
row_length = next_row_split - this_row_split;
#pragma unroll(4)
for (; thread_this_row < row_length; thread_this_row += threads_per_row)
merge_map[this_row_split + thread_this_row] =
uint32_t(row) + uint32_t(num_rows) * uint32_t(thread_this_row);
}
Array1<uint32_t> SizesToMergeMap(ContextPtr c,
const std::vector<int32_t> &sizes) {
int32_t num_srcs = sizes.size();
ContextPtr cpu_context = GetCpuContext();
Array1<int32_t> row_splits_cpu(cpu_context, num_srcs + 1);
int32_t *row_splits_cpu_data = row_splits_cpu.Data();
int32_t tot_size = 0;
row_splits_cpu_data[0] = 0;
for (int32_t i = 0; i < num_srcs; i++) {
tot_size += sizes[i];
row_splits_cpu_data[i+1] = tot_size;
}
Array1<uint32_t> ans(c, tot_size);
if (c->GetDeviceType() == kCpu) {
uint32_t *ans_data = ans.Data();
int32_t cur = 0;
for (int32_t src = 0; src < num_srcs; src++) {
int32_t begin = cur, // i.e. the previous end.
end = row_splits_cpu_data[src+1];
for (; cur != end; ++cur) {
// the 'src' says which source this item came from, and (cur - begin)
// is the position within that source.
ans_data[cur] =
uint32_t(src) + uint32_t(cur - begin) * uint32_t(num_srcs);
}
}
return ans;
}
K2_CHECK_EQ(c->GetDeviceType(), kCuda);
Array1<int32_t> row_splits = row_splits_cpu.To(c);
int32_t *row_splits_data = row_splits.Data();
uint32_t *merge_map_data = ans.Data();
int32_t avg_elems_per_row = (tot_size + num_srcs - 1) / num_srcs,
threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row),
tot_threads = num_srcs * threads_per_row;
int32_t block_size = 256;
int32_t grid_size = NumBlocks(tot_threads, block_size);
K2_CUDA_SAFE_CALL(SizesToMergeMapKernel<<<grid_size, block_size, 0,
c->GetCudaStream()>>>(
num_srcs, threads_per_row, row_splits_data,
tot_size, merge_map_data));
return ans;
}
} // namespace k2
|
fd663fee47fae249f2a34d8cee1591e9d815779f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void print_details()
{
printf("blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, gridDim.x : %d, gridDim.y :%d \n",
blockIdx.x, blockIdx.y, blockIdx.z,blockDim.x, blockDim.y, gridDim.x, gridDim.y);
}
//int main()
//{
// int nx, ny;
// nx = 16;
// ny = 16;
//
// dim3 block(8, 8);
// dim3 grid(nx / block.x, ny / block.y);
//
// print_details << <grid, block >> > ();
// hipDeviceSynchronize();
//
// hipDeviceReset();
// return 0;
//}
| fd663fee47fae249f2a34d8cee1591e9d815779f.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void print_details()
{
printf("blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, gridDim.x : %d, gridDim.y :%d \n",
blockIdx.x, blockIdx.y, blockIdx.z,blockDim.x, blockDim.y, gridDim.x, gridDim.y);
}
//int main()
//{
// int nx, ny;
// nx = 16;
// ny = 16;
//
// dim3 block(8, 8);
// dim3 grid(nx / block.x, ny / block.y);
//
// print_details << <grid, block >> > ();
// cudaDeviceSynchronize();
//
// cudaDeviceReset();
// return 0;
//}
|
68f4bee184b2131cbddb91416c47c68c8a1d16f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "jacketSDK.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "hip/device_functions.h"
#define SPDS 15
#define TPB 64
__global__ void pc_D3Q15_Reg(float * fIn, float * fEq, float * rho_d,
float * ux_d, float * uy_d, float * uz_d,
const int * vw_nl, const float * vw_uz,
const int * pe_nl, const float rho_out,
const int * snl, const int nnodes){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<nnodes){
//load density distribution data
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14;
f0 = fIn[tid]; f1=fIn[nnodes+tid];
f2 = fIn[2*nnodes+tid]; f3 = fIn[3*nnodes+tid];
f4 = fIn[4*nnodes+tid]; f5 = fIn[5*nnodes+tid];
f6 = fIn[6*nnodes+tid]; f7 = fIn[7*nnodes+tid];
f8 = fIn[8*nnodes+tid]; f9=fIn[9*nnodes+tid];
f10=fIn[10*nnodes+tid]; f11=fIn[11*nnodes+tid];
f12 = fIn[12*nnodes+tid]; f13=fIn[13*nnodes+tid];
f14=fIn[14*nnodes+tid];
float ux,uy,uz,rho;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14;
ux=f1-f2+f7-f8+f9-f10+f11-f12+f13-f14; ux = ux/rho;
uy=f3-f4+f7+f8-f9-f10+f11+f12-f13-f14; uy = uy/rho;
uz=f5-f6+f7+f8+f9+f10-f11-f12-f13-f14; uz = uz/rho;
//detect boundary nodes and compute their macroscopic properties.
if(snl[tid]==1){
ux=0;uy=0;uz=0;
ux_d[tid]=0.; uy_d[tid]=0.; uz_d[tid]=0.;
}
if(vw_nl[tid]==1){
ux=0;uy=0; uz=vw_uz[tid];
ux_d[tid]=0.; uy_d[tid]=0.; uz_d[tid]=uz;
//set rho based on uz
rho = 1./(1.-uz)*(2.*(f6+f11+f12+f13+f14)+(f0+f1+f2+f3+f4));
rho_d[tid]=rho;//update global array
}
if(pe_nl[tid]==1){
rho=rho_out; ux=0.; uy=0.;
rho_d[tid]=rho; ux_d[tid]=0.; uy_d[tid]=0.;
//set uz based on rho...
uz=-1.+((2.*(f5+f7+f8+f9+f10)+(f0+f1+f2+f3+f4)))/rho_out;
uz_d[tid]=uz;//update global array
}
float cu;
//if it's a boundary node, I compute fEq for everyone so I can
//store it and manipulate it for the boundary condition.
//if it's a solid node or an interior node, I just compute fEq and store
//it to its global value...
if((vw_nl[tid]==1) | (pe_nl[tid]==1)){
float fe0,fe1,fe2,fe3,fe4,fe5,fe6,fe7,fe8,fe9,fe10,fe11,fe12,fe13,fe14;
float ft1,ft2,ft3,ft4,ft5,ft6,ft7,ft8,ft9,ft10,ft11,ft12,ft13,ft14;
float w;
//speed 0 ex=ey=ez=0 w=2./9.
fe0=rho*(2./9.)*(1.-1.5*(ux*ux+uy*uy+uz*uz));
fEq[tid]=fe0;
//speed 1 ex=1 ey=ez=0 w=1./9.
cu=3.*(1.*ux);
fe1=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[nnodes+tid]=fe1;
//speed 2 ex=-1 ey=ez=0 w=1./9.
cu=3.*((-1.)*ux);
fe2=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[2*nnodes+tid]=fe2;
//speed 3 ex=0 ey=1 ez=0 w=1./9.
cu=3.*(1.*uy);
fe3=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[3*nnodes+tid]=fe3;
//speed 4 ex=0 ey=-1 ez=0 w=1./9.
cu=3.*(-1.*uy);
fe4=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[4*nnodes+tid]=fe4;
//speed 5 ex=ey=0 ez=1 w=1./9.
cu=3.*(1.*uz);
fe5=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[5*nnodes+tid]=fe5;
//speed 6 ex=ey=0 ez=-1 w=1./9.
cu=3.*(-1.*uz);
fe6=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[6*nnodes+tid]=fe6;
//speed 7 ex=ey=ez=1 w=1./72.
cu=3.*(ux+uy+uz);
fe7=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[7*nnodes+tid]=fe7;
//speed 8 ex=-1 ey=ez=1 w=1./72.
cu=3.*(-ux+uy+uz);
fe8=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[8*nnodes+tid]=fe8;
//speed 9 ex=1 ey=-1 ez=1 w=1./72.
cu=3.*(ux-uy+uz);
fe9=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[9*nnodes+tid]=fe9;
//speed 10 ex=-1 ey=-1 ez=1 w=1/72
cu=3.*(-ux-uy+uz);
fe10=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[10*nnodes+tid]=fe10;
//speed 11 ex=1 ey=1 ez=-1 w=1/72
cu=3.*(ux+uy-uz);
fe11=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[11*nnodes+tid]=fe11;
//speed 12 ex=-1 ey=1 ez=-1 w=1/72
cu=3.*(-ux+uy-uz);
fe12=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[12*nnodes+tid]=fe12;
//speed 13 ex=1 ey=ez=-1 w=1/72
cu=3.*(ux-uy-uz);
fe13=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[13*nnodes+tid]=fe13;
//speed 14 ex=ey=ez=-1 w=1/72
cu=3.*(-ux-uy-uz);
fe14=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[14*nnodes+tid]=fe14;
if(vw_nl[tid]==1){
//take actions for the west velocity node list
//adjust fIn for the unknown velocities: 5,7,8,9,10
//bounce-back of non-equilibrium parts
//f5, bb_spd=f6
f5=fe5+(f6-fe6);
//f7, bb_spd=f14
f7=fe7+(f14-fe14);
//f8, bb_spd=f13
f8=fe8+(f13-fe13);
//f9, bb_spd=f12
f9=fe9+(f12-fe12);
//f10, bb_spd=f11
f10=fe10+(f11-fe11);
}else{
//take actions for the east pressure node list
f6=fe6+(f5-fe5);
f11=f11+(f10-fe10);
f12=f12+(f9-fe9);
f13=f13+(f8-fe8);
f14=f14+(f7-fe7);
}
//get the non-equilibrium part of each speed
//ft0=f0-fe0;
ft1=f1-fe1;
ft2=f2-fe2;
ft3=f3-fe3;
ft4=f4-fe4;
ft5=f5-fe5;
ft6=f6-fe6;
ft7=f7-fe7;
ft8=f8-fe8;
ft9=f9-fe9;
ft10=f10-fe10;
ft11=f11-fe11;
ft12=f12-fe12;
ft13=f13-fe13;
ft14=f14-fe14;
//now, multiply by f# = ((ft#)*Q_flat)*Q_flat'
f0=0;
f1=ft1+ft2+ft7+ft8+ft9+ft10+ft11+ft12+ft13+ft14;
f2=f1;
f3=ft3+ft4+ft7+ft8+ft9+ft10+ft11+ft12+ft13+ft14;
f4=f3;
f5=ft5+ft6+ft7+ft8+ft9+ft10+ft11+ft12+ft13+ft14;
f6=f5;
f7=ft1+ft2+ft3+ft4+ft5+ft6+9.*ft7+ft8+ft9+ft10+ft11+ft12+ft13+9.*ft14;
f8=ft1+ft2+ft3+ft4+ft5+ft6+ft7+9.*ft8+ft9+ft10+ft11+ft12+9.*ft13+ft14;
f9=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+9.*ft9+ft10+ft11+9.*ft12+ft13+ft14;
f10=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+ft9+9.*ft10+9.*ft11+ft12+ft13+ft14;
f11=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+ft9+9.*ft10+9.*ft11+ft12+ft13+ft14;
f12=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+9.*ft9+ft10+ft11+9.*ft12+ft13+ft14;
f13=ft1+ft2+ft3+ft4+ft5+ft6+ft7+9.*ft8+ft9+ft10+ft11+ft12+9.*ft13+ft14;
f14=ft1+ft2+ft3+ft4+ft5+ft6+9.*ft7+ft8+ft9+ft10+ft11+ft12+ft13+9.*ft14;
//f#=f#*(9/2)*w#
//f0, still equals 0..
cu = 9./2.; w = 1./9.;
//fIn[..] = fe#+f#
fIn[tid]=fe0;
fIn[nnodes+tid]=fe1+f1*(cu)*w;
fIn[2*nnodes+tid]=fe2+f2*(cu)*w;
fIn[3*nnodes+tid]=fe3+f3*cu*w;
fIn[4*nnodes+tid]=fe4+f4*cu*w;
fIn[5*nnodes+tid]=fe5+f5*cu*w;
fIn[6*nnodes+tid]=fe6+f6*cu*w;
w = 1./72.;
fIn[7*nnodes+tid]=fe7+f7*cu*w;
fIn[8*nnodes+tid]=fe8+f8*cu*w;
fIn[9*nnodes+tid]=fe9+f9*cu*w;
fIn[10*nnodes+tid]=fe10+f10*cu*w;
fIn[11*nnodes+tid]=fe11+f11*cu*w;
fIn[12*nnodes+tid]=fe12+f12*cu*w;
fIn[13*nnodes+tid]=fe13+f13*cu*w;
fIn[14*nnodes+tid]=fe14+f14*cu*w;
}else{
//speed 0 ex=ey=ez=0, w= 2./9.
fEq[tid]=rho*(2./9.)*(1.-1.5*(ux*ux+uy*uy+uz*uz));
//speed 1 ex=1 ey=ez=0 w=1./9.
cu=3.*(1.*ux);
fEq[nnodes+tid]=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 2 ex=-1 ey=ez=0 w=1./9.
cu=3.*((-1.)*ux);
fEq[2*nnodes+tid]=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 3 ex=0 ey=1 ez=0 w=1./9.
cu=3.*(1.*uy);
fEq[3*nnodes+tid]=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 4 ex=0 ey=-1 ez=0 w=1./9.
cu=3.*(-1.*uy);
fEq[4*nnodes+tid]=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 5 ex=ey=0 ez=1 w=1./9.
cu=3.*(1.*uz);
fEq[5*nnodes+tid]=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 6 ex=ey=0 ez=-1 w=1./9.
cu=3.*(-1.*uz);
fEq[6*nnodes+tid]=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 7 ex=ey=ez=1 w=1./72.
cu=3.*(ux+uy+uz);
fEq[7*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 8 ex=-1 ey=ez=1 w=1./72.
cu=3.*(-ux+uy+uz);
fEq[8*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 9 ex=1 ey=-1 ez=1 w=1./72.
cu=3.*(ux-uy+uz);
fEq[9*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 10 ex=-1 ey=-1 ez=1 w=1/72
cu=3.*(-ux-uy+uz);
fEq[10*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 11 ex=1 ey=1 ez=-1 w=1/72
cu=3.*(ux+uy-uz);
fEq[11*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 12 ex=-1 ey=1 ez=-1 w=1/72
cu=3.*(-ux+uy-uz);
fEq[12*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 13 ex=1 ey=ez=-1 w=1/72
cu=3.*(ux-uy-uz);
fEq[13*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 14 ex=ey=ez=-1 w=1/72
cu=3.*(-ux-uy-uz);
fEq[14*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
}
}
}
err_t jktFunction(int nlhs, mxArray * plhs[], int nrhs, mxArray *prhs[]){
if(nrhs!=12)
return err("Usage: pc_ch3D_D3Q15_Reg(fIn,fEq,rho,ux,uy,uz,vw_nl,vw_uz,pe_nl,rho_out,snl,nnodes)");
mxArray * m_fIn = prhs[0];
mxArray * m_fEq = prhs[1];
mxArray * m_rho = prhs[2];
mxArray * m_ux = prhs[3];
mxArray * m_uy = prhs[4];
mxArray * m_uz = prhs[5];
mxArray * m_vw_nl = prhs[6];
mxArray * m_vw_uz = prhs[7];
mxArray * m_pe_nl = prhs[8];
float rho_out = mxGetScalar(prhs[9]);
mxArray * m_snl = prhs[10];
int nnodes = mxGetScalar(prhs[11]);
float * fIn;
float * fEq;
float * rho;
float * ux;
float * uy;
float * uz;
int * vw_nl;
float * vw_uz;
int * pe_nl;
int * snl;
jkt_mem((void**)&fIn,m_fIn);
jkt_mem((void**)&fEq,m_fEq);
jkt_mem((void**)&rho,m_rho);
jkt_mem((void**)&ux,m_ux);
jkt_mem((void**)&uy,m_uy);
jkt_mem((void**)&uz,m_uz);
jkt_mem((void**)&vw_nl,m_vw_nl);
jkt_mem((void**)&vw_uz,m_vw_uz);
jkt_mem((void**)&pe_nl,m_pe_nl);
jkt_mem((void**)&snl,m_snl);
dim3 BLOCKS(TPB,1,1);
dim3 GRIDS((nnodes+TPB-1)/TPB,1,1);
hipLaunchKernelGGL(( pc_D3Q15_Reg), dim3(GRIDS),dim3(BLOCKS), 0, 0, fIn,fEq,rho,ux,uy,uz,vw_nl,vw_uz,pe_nl,
rho_out,snl,nnodes);
return errNone;
}
| 68f4bee184b2131cbddb91416c47c68c8a1d16f0.cu | #include "jacketSDK.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "device_functions.h"
#define SPDS 15
#define TPB 64
__global__ void pc_D3Q15_Reg(float * fIn, float * fEq, float * rho_d,
float * ux_d, float * uy_d, float * uz_d,
const int * vw_nl, const float * vw_uz,
const int * pe_nl, const float rho_out,
const int * snl, const int nnodes){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<nnodes){
//load density distribution data
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14;
f0 = fIn[tid]; f1=fIn[nnodes+tid];
f2 = fIn[2*nnodes+tid]; f3 = fIn[3*nnodes+tid];
f4 = fIn[4*nnodes+tid]; f5 = fIn[5*nnodes+tid];
f6 = fIn[6*nnodes+tid]; f7 = fIn[7*nnodes+tid];
f8 = fIn[8*nnodes+tid]; f9=fIn[9*nnodes+tid];
f10=fIn[10*nnodes+tid]; f11=fIn[11*nnodes+tid];
f12 = fIn[12*nnodes+tid]; f13=fIn[13*nnodes+tid];
f14=fIn[14*nnodes+tid];
float ux,uy,uz,rho;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14;
ux=f1-f2+f7-f8+f9-f10+f11-f12+f13-f14; ux = ux/rho;
uy=f3-f4+f7+f8-f9-f10+f11+f12-f13-f14; uy = uy/rho;
uz=f5-f6+f7+f8+f9+f10-f11-f12-f13-f14; uz = uz/rho;
//detect boundary nodes and compute their macroscopic properties.
if(snl[tid]==1){
ux=0;uy=0;uz=0;
ux_d[tid]=0.; uy_d[tid]=0.; uz_d[tid]=0.;
}
if(vw_nl[tid]==1){
ux=0;uy=0; uz=vw_uz[tid];
ux_d[tid]=0.; uy_d[tid]=0.; uz_d[tid]=uz;
//set rho based on uz
rho = 1./(1.-uz)*(2.*(f6+f11+f12+f13+f14)+(f0+f1+f2+f3+f4));
rho_d[tid]=rho;//update global array
}
if(pe_nl[tid]==1){
rho=rho_out; ux=0.; uy=0.;
rho_d[tid]=rho; ux_d[tid]=0.; uy_d[tid]=0.;
//set uz based on rho...
uz=-1.+((2.*(f5+f7+f8+f9+f10)+(f0+f1+f2+f3+f4)))/rho_out;
uz_d[tid]=uz;//update global array
}
float cu;
//if it's a boundary node, I compute fEq for everyone so I can
//store it and manipulate it for the boundary condition.
//if it's a solid node or an interior node, I just compute fEq and store
//it to its global value...
if((vw_nl[tid]==1) | (pe_nl[tid]==1)){
float fe0,fe1,fe2,fe3,fe4,fe5,fe6,fe7,fe8,fe9,fe10,fe11,fe12,fe13,fe14;
float ft1,ft2,ft3,ft4,ft5,ft6,ft7,ft8,ft9,ft10,ft11,ft12,ft13,ft14;
float w;
//speed 0 ex=ey=ez=0 w=2./9.
fe0=rho*(2./9.)*(1.-1.5*(ux*ux+uy*uy+uz*uz));
fEq[tid]=fe0;
//speed 1 ex=1 ey=ez=0 w=1./9.
cu=3.*(1.*ux);
fe1=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[nnodes+tid]=fe1;
//speed 2 ex=-1 ey=ez=0 w=1./9.
cu=3.*((-1.)*ux);
fe2=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[2*nnodes+tid]=fe2;
//speed 3 ex=0 ey=1 ez=0 w=1./9.
cu=3.*(1.*uy);
fe3=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[3*nnodes+tid]=fe3;
//speed 4 ex=0 ey=-1 ez=0 w=1./9.
cu=3.*(-1.*uy);
fe4=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[4*nnodes+tid]=fe4;
//speed 5 ex=ey=0 ez=1 w=1./9.
cu=3.*(1.*uz);
fe5=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[5*nnodes+tid]=fe5;
//speed 6 ex=ey=0 ez=-1 w=1./9.
cu=3.*(-1.*uz);
fe6=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[6*nnodes+tid]=fe6;
//speed 7 ex=ey=ez=1 w=1./72.
cu=3.*(ux+uy+uz);
fe7=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[7*nnodes+tid]=fe7;
//speed 8 ex=-1 ey=ez=1 w=1./72.
cu=3.*(-ux+uy+uz);
fe8=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[8*nnodes+tid]=fe8;
//speed 9 ex=1 ey=-1 ez=1 w=1./72.
cu=3.*(ux-uy+uz);
fe9=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[9*nnodes+tid]=fe9;
//speed 10 ex=-1 ey=-1 ez=1 w=1/72
cu=3.*(-ux-uy+uz);
fe10=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[10*nnodes+tid]=fe10;
//speed 11 ex=1 ey=1 ez=-1 w=1/72
cu=3.*(ux+uy-uz);
fe11=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[11*nnodes+tid]=fe11;
//speed 12 ex=-1 ey=1 ez=-1 w=1/72
cu=3.*(-ux+uy-uz);
fe12=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[12*nnodes+tid]=fe12;
//speed 13 ex=1 ey=ez=-1 w=1/72
cu=3.*(ux-uy-uz);
fe13=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[13*nnodes+tid]=fe13;
//speed 14 ex=ey=ez=-1 w=1/72
cu=3.*(-ux-uy-uz);
fe14=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
fEq[14*nnodes+tid]=fe14;
if(vw_nl[tid]==1){
//take actions for the west velocity node list
//adjust fIn for the unknown velocities: 5,7,8,9,10
//bounce-back of non-equilibrium parts
//f5, bb_spd=f6
f5=fe5+(f6-fe6);
//f7, bb_spd=f14
f7=fe7+(f14-fe14);
//f8, bb_spd=f13
f8=fe8+(f13-fe13);
//f9, bb_spd=f12
f9=fe9+(f12-fe12);
//f10, bb_spd=f11
f10=fe10+(f11-fe11);
}else{
//take actions for the east pressure node list
f6=fe6+(f5-fe5);
f11=f11+(f10-fe10);
f12=f12+(f9-fe9);
f13=f13+(f8-fe8);
f14=f14+(f7-fe7);
}
//get the non-equilibrium part of each speed
//ft0=f0-fe0;
ft1=f1-fe1;
ft2=f2-fe2;
ft3=f3-fe3;
ft4=f4-fe4;
ft5=f5-fe5;
ft6=f6-fe6;
ft7=f7-fe7;
ft8=f8-fe8;
ft9=f9-fe9;
ft10=f10-fe10;
ft11=f11-fe11;
ft12=f12-fe12;
ft13=f13-fe13;
ft14=f14-fe14;
//now, multiply by f# = ((ft#)*Q_flat)*Q_flat'
f0=0;
f1=ft1+ft2+ft7+ft8+ft9+ft10+ft11+ft12+ft13+ft14;
f2=f1;
f3=ft3+ft4+ft7+ft8+ft9+ft10+ft11+ft12+ft13+ft14;
f4=f3;
f5=ft5+ft6+ft7+ft8+ft9+ft10+ft11+ft12+ft13+ft14;
f6=f5;
f7=ft1+ft2+ft3+ft4+ft5+ft6+9.*ft7+ft8+ft9+ft10+ft11+ft12+ft13+9.*ft14;
f8=ft1+ft2+ft3+ft4+ft5+ft6+ft7+9.*ft8+ft9+ft10+ft11+ft12+9.*ft13+ft14;
f9=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+9.*ft9+ft10+ft11+9.*ft12+ft13+ft14;
f10=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+ft9+9.*ft10+9.*ft11+ft12+ft13+ft14;
f11=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+ft9+9.*ft10+9.*ft11+ft12+ft13+ft14;
f12=ft1+ft2+ft3+ft4+ft5+ft6+ft7+ft8+9.*ft9+ft10+ft11+9.*ft12+ft13+ft14;
f13=ft1+ft2+ft3+ft4+ft5+ft6+ft7+9.*ft8+ft9+ft10+ft11+ft12+9.*ft13+ft14;
f14=ft1+ft2+ft3+ft4+ft5+ft6+9.*ft7+ft8+ft9+ft10+ft11+ft12+ft13+9.*ft14;
//f#=f#*(9/2)*w#
//f0, still equals 0..
cu = 9./2.; w = 1./9.;
//fIn[..] = fe#+f#
fIn[tid]=fe0;
fIn[nnodes+tid]=fe1+f1*(cu)*w;
fIn[2*nnodes+tid]=fe2+f2*(cu)*w;
fIn[3*nnodes+tid]=fe3+f3*cu*w;
fIn[4*nnodes+tid]=fe4+f4*cu*w;
fIn[5*nnodes+tid]=fe5+f5*cu*w;
fIn[6*nnodes+tid]=fe6+f6*cu*w;
w = 1./72.;
fIn[7*nnodes+tid]=fe7+f7*cu*w;
fIn[8*nnodes+tid]=fe8+f8*cu*w;
fIn[9*nnodes+tid]=fe9+f9*cu*w;
fIn[10*nnodes+tid]=fe10+f10*cu*w;
fIn[11*nnodes+tid]=fe11+f11*cu*w;
fIn[12*nnodes+tid]=fe12+f12*cu*w;
fIn[13*nnodes+tid]=fe13+f13*cu*w;
fIn[14*nnodes+tid]=fe14+f14*cu*w;
}else{
//speed 0 ex=ey=ez=0, w= 2./9.
fEq[tid]=rho*(2./9.)*(1.-1.5*(ux*ux+uy*uy+uz*uz));
//speed 1 ex=1 ey=ez=0 w=1./9.
cu=3.*(1.*ux);
fEq[nnodes+tid]=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 2 ex=-1 ey=ez=0 w=1./9.
cu=3.*((-1.)*ux);
fEq[2*nnodes+tid]=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 3 ex=0 ey=1 ez=0 w=1./9.
cu=3.*(1.*uy);
fEq[3*nnodes+tid]=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 4 ex=0 ey=-1 ez=0 w=1./9.
cu=3.*(-1.*uy);
fEq[4*nnodes+tid]=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 5 ex=ey=0 ez=1 w=1./9.
cu=3.*(1.*uz);
fEq[5*nnodes+tid]=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 6 ex=ey=0 ez=-1 w=1./9.
cu=3.*(-1.*uz);
fEq[6*nnodes+tid]=rho*(1./9.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 7 ex=ey=ez=1 w=1./72.
cu=3.*(ux+uy+uz);
fEq[7*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 8 ex=-1 ey=ez=1 w=1./72.
cu=3.*(-ux+uy+uz);
fEq[8*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 9 ex=1 ey=-1 ez=1 w=1./72.
cu=3.*(ux-uy+uz);
fEq[9*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 10 ex=-1 ey=-1 ez=1 w=1/72
cu=3.*(-ux-uy+uz);
fEq[10*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 11 ex=1 ey=1 ez=-1 w=1/72
cu=3.*(ux+uy-uz);
fEq[11*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 12 ex=-1 ey=1 ez=-1 w=1/72
cu=3.*(-ux+uy-uz);
fEq[12*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 13 ex=1 ey=ez=-1 w=1/72
cu=3.*(ux-uy-uz);
fEq[13*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
//speed 14 ex=ey=ez=-1 w=1/72
cu=3.*(-ux-uy-uz);
fEq[14*nnodes+tid]=rho*(1./72.)*(1.+cu+0.5*(cu*cu)-
1.5*(ux*ux+uy*uy+uz*uz));
}
}
}
err_t jktFunction(int nlhs, mxArray * plhs[], int nrhs, mxArray *prhs[]){
if(nrhs!=12)
return err("Usage: pc_ch3D_D3Q15_Reg(fIn,fEq,rho,ux,uy,uz,vw_nl,vw_uz,pe_nl,rho_out,snl,nnodes)");
mxArray * m_fIn = prhs[0];
mxArray * m_fEq = prhs[1];
mxArray * m_rho = prhs[2];
mxArray * m_ux = prhs[3];
mxArray * m_uy = prhs[4];
mxArray * m_uz = prhs[5];
mxArray * m_vw_nl = prhs[6];
mxArray * m_vw_uz = prhs[7];
mxArray * m_pe_nl = prhs[8];
float rho_out = mxGetScalar(prhs[9]);
mxArray * m_snl = prhs[10];
int nnodes = mxGetScalar(prhs[11]);
float * fIn;
float * fEq;
float * rho;
float * ux;
float * uy;
float * uz;
int * vw_nl;
float * vw_uz;
int * pe_nl;
int * snl;
jkt_mem((void**)&fIn,m_fIn);
jkt_mem((void**)&fEq,m_fEq);
jkt_mem((void**)&rho,m_rho);
jkt_mem((void**)&ux,m_ux);
jkt_mem((void**)&uy,m_uy);
jkt_mem((void**)&uz,m_uz);
jkt_mem((void**)&vw_nl,m_vw_nl);
jkt_mem((void**)&vw_uz,m_vw_uz);
jkt_mem((void**)&pe_nl,m_pe_nl);
jkt_mem((void**)&snl,m_snl);
dim3 BLOCKS(TPB,1,1);
dim3 GRIDS((nnodes+TPB-1)/TPB,1,1);
pc_D3Q15_Reg<<<GRIDS,BLOCKS>>>(fIn,fEq,rho,ux,uy,uz,vw_nl,vw_uz,pe_nl,
rho_out,snl,nnodes);
return errNone;
}
|
4701e12e787937eb4d54e02d9ffbc5c726986cb5.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by depaulsmiller on 1/15/21.
//
#include <unistd.h>
#include "helper.cuh"
#include <algorithm>
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/json_parser.hpp>
#include <dlfcn.h>
namespace pt = boost::property_tree;
using BatchWrapper = std::vector<RequestWrapper<unsigned long long, data_t *>>;
//#ifdef MODEL_CHANGE
using Model = kvgpu::AnalyticalModel<unsigned long long>;
//#else
//using Model = kvgpu::SimplModel<unsigned long long>;
//#endif
using RB = std::shared_ptr<Communication>;
int totalBatches = 10000;
int BATCHSIZE = 512;
int NUM_THREADS = 12;//std::thread::hardware_concurrency() - 10;
void usage(char *command);
struct ServerConf {
int threads;
int cpu_threads;
int gpus;
int streams;
std::string modelFile;
bool train;
int size;
int batchSize;
bool cache;
ServerConf() {
batchSize = BATCHSIZE;
modelFile = "";
cpu_threads = NUM_THREADS;
threads = 2;//1;//4;
gpus = 1;
streams = 10;//10;
size = 1000000;
train = false;
cache = true;
}
explicit ServerConf(const std::string &filename) {
pt::ptree root;
pt::read_json(filename, root);
cpu_threads = root.get<int>("cpu_threads", NUM_THREADS);
threads = root.get<int>("threads", 4);
streams = root.get<int>("streams", 2);
gpus = root.get<int>("gpus", 2);
modelFile = root.get<std::string>("modelFile", "");
train = root.get<bool>("train", false);
size = root.get<int>("size", 1000000);
batchSize = root.get<int>("batchSize", BATCHSIZE);
cache = root.get<bool>("cache", true);
}
void persist(const std::string &filename) const {
pt::ptree root;
root.put("threads", threads);
root.put("streams", streams);
root.put("gpus", gpus);
root.put("modelFile", modelFile);
root.put("train", train);
root.put("size", size);
root.put("batchSize", batchSize);
root.put("cache", cache);
pt::write_json(filename, root);
}
~ServerConf() = default;
};
int main(int argc, char **argv) {
ServerConf sconf;
bool workloadFilenameSet = false;
std::string workloadFilename;
#ifdef MODEL_CHANGE
std::string dllib = "./libzipfianWorkloadSwitch.so";
#else
std::string dllib = "./libzipfianWorkload.so";
#endif
char c;
while ((c = getopt(argc, argv, "f:w:l:")) != -1) {
switch (c) {
case 'f':
sconf = ServerConf(std::string(optarg));
// optarg is the file
break;
case 'w':
workloadFilenameSet = true;
workloadFilename = optarg;
break;
case 'l':
dllib = optarg;
break;
default:
case '?':
usage(argv[0]);
return 1;
}
}
void (*initWorkload)() = nullptr;
void (*initWorkloadFile)(std::string) = nullptr;
BatchWrapper (*generateWorkloadBatch)(unsigned int *, unsigned) = nullptr;
int (*getBatchesToRun)() = nullptr;
std::vector<BatchWrapper> (*getPopulationBatches)(unsigned int *, unsigned) = nullptr;
auto handler = dlopen(dllib.c_str(), RTLD_LAZY);
if (!handler) {
std::cerr << dlerror() << std::endl;
return 1;
}
initWorkload = (void (*)()) dlsym(handler, "initWorkload");
initWorkloadFile = (void (*)(std::string)) dlsym(handler, "initWorkloadFile");
generateWorkloadBatch = (BatchWrapper(*)(unsigned *, unsigned)) dlsym(handler, "generateWorkloadBatch");
getBatchesToRun = (int (*)()) dlsym(handler, "getBatchesToRun");
getPopulationBatches = (std::vector<BatchWrapper> (*)(unsigned int *, unsigned)) dlsym(handler,
"getPopulationBatches");
#ifdef MODEL_CHANGE
auto workloadSwitch = (void (*)()) dlsym(handler, "changeWorkload");
#endif
if (workloadFilenameSet) {
initWorkloadFile(workloadFilename);
} else {
initWorkload();
}
totalBatches = getBatchesToRun();
std::vector<PartitionedSlabUnifiedConfig> conf;
for (int i = 0; i < sconf.gpus; i++) {
for (int j = 0; j < sconf.streams; j++) {
gpuErrchk(hipSetDevice(i));
hipStream_t stream = hipStreamDefault;
if (j != 0) {
gpuErrchk(hipStreamCreate(&stream));
}
conf.push_back({sconf.size, i, stream});
}
}
std::unique_ptr<KVStoreCtx<Model>> ctx = nullptr;
if (sconf.modelFile != "") {
ctx = std::make_unique<KVStoreCtx<Model>>(conf, sconf.cpu_threads, sconf.modelFile);
} else {
//#ifdef MODEL_CHANGE
unsigned tseed = time(nullptr);
std::vector<std::pair<unsigned long long, unsigned>> trainVec;
std::hash<unsigned long long> hfn{};
for (int i = 0; i < 10000; i++) {
BatchWrapper b = generateWorkloadBatch(&tseed, sconf.batchSize);
for (auto &elm : b) {
trainVec.push_back({elm.key, hfn(elm.key)});
}
}
Model m;
m.train(trainVec);
m.persist("./temp.json");
#ifdef MODEL_CHANGE
workloadSwitch();
#endif
ctx = std::make_unique<KVStoreCtx<Model>>(conf, sconf.cpu_threads, m);
//#else
// ctx = std::make_unique<KVStoreCtx<unsigned long long, data_t, Model>>(conf, sconf.cpu_threads);
//#endif
}
GeneralClient<Model> *client = nullptr;
if (sconf.cache) {
if (sconf.gpus == 0) {
client = new JustCacheKVStoreClient<Model>(*ctx);
} else {
client = new KVStoreClient<Model>(*ctx);
}
} else {
client = new NoCacheKVStoreClient<Model>(*ctx);
}
unsigned popSeed = time(nullptr);
auto pop = getPopulationBatches(&popSeed, BATCHSIZE);
for (auto &b : pop) {
bool retry;
int size = b.size();
do {
retry = false;
loadBalanceSet = true;
auto rb = std::make_shared<LocalCommunication>(sconf.batchSize);
auto start = std::chrono::high_resolution_clock::now();
client->batch(b, rb, start);
int count = 0;
do {
Response response;
if (rb->try_recv(response)) {
count++;
if (response.retry) {
retry = true;
}
}
} while (count < size);
} while (retry);
}
std::cerr << "Populated" << std::endl;
client->resetStats();
std::vector<std::thread> threads;
auto *q = new tbb::concurrent_queue<std::pair<BatchWrapper, RB>>[sconf.threads];
std::atomic_bool reclaim{false};
std::atomic_bool changing{false};
auto *block = new block_t(sconf.threads);
for (int i = 0; i < sconf.threads; ++i) {
threads.push_back(std::thread([&client, &reclaim, &q, &changing, &block, sconf](int tid) {
init_loadbalance(sconf.cpu_threads);
std::shared_ptr<Communication> lastResBuf = nullptr;
while (!reclaim) {
std::pair<BatchWrapper, RB> p;
if (changing) {
block->wait();
while (changing) {
if (q[tid].try_pop(p)) {
auto start = std::chrono::high_resolution_clock::now();
client->batch_drop_modifications(p.first, p.second, start);
}
}
}
if (q[tid].try_pop(p)) {
auto start = std::chrono::high_resolution_clock::now();
lastResBuf = p.second;
client->batch(p.first, p.second, start);
}
}
std::pair<BatchWrapper, RB> p;
while (q[tid].try_pop(p)) {
auto start = std::chrono::high_resolution_clock::now();
lastResBuf = p.second;
client->batch(p.first, p.second, start);
}
int count = 0;
do {
Response response;
if (lastResBuf->try_recv(response)) {
count++;
}
} while (count < sconf.batchSize);
}, i));
}
auto startTime = std::chrono::high_resolution_clock::now();
std::vector<std::thread> threads2;
int clients = 8;
#ifdef MODEL_CHANGE
std::atomic_bool finishBatching;
finishBatching = false;
auto fn = [&finishBatching, clients, &q, &sconf, generateWorkloadBatch, &changing, &block, &client](int tid) {
unsigned tseed = time(nullptr);
int i = 0;
while (!finishBatching) {
/*if (tid == 0 && i == totalBatches / clients / 10) {
std::cerr << "Changing\n";
auto tmp = Model(18000);
double time;
client->change_model(changing, tmp, block, time);
std::cerr << "Changed " << time * 1e3 << "\n";
}*/
auto rb = std::make_shared<ResultsBuffers<data_t>>(sconf.batchSize);
std::pair<BatchWrapper, RB> p = {
generateWorkloadBatch(&tseed, sconf.batchSize),
std::move(rb)};
q[(tid + i) % sconf.threads].push(std::move(p));
i++;
}
};
#else
auto fn = [clients, &q, &sconf, generateWorkloadBatch, &changing, &block, &client](int tid) {
unsigned tseed = time(nullptr);
for (int i = 0; i < totalBatches / clients; i++) {
/*if (tid == 0 && i == totalBatches / clients / 10) {
std::cerr << "Changing\n";
auto tmp = Model(18000);
double time;
client->change_model(changing, tmp, block, time);
std::cerr << "Changed " << time * 1e3 << "\n";
}*/
auto rb = std::make_shared<LocalCommunication>(sconf.batchSize);
std::pair<BatchWrapper, RB> p = {
generateWorkloadBatch(&tseed, sconf.batchSize),
std::move(rb)};
q[(tid + i) % sconf.threads].push(std::move(p));
}
if (tid == 0) {
for (int i = 0; i < totalBatches - (totalBatches / clients) * clients; i++) {
/*if (tid == 0 && i == totalBatches / clients / 10) {
std::cerr << "Changing\n";
auto tmp = kvgpu::SimplModel<unsigned>(18000);
changing = true;
client->change_model(tmp, block);
changing = false;
std::cerr << "Changed\n";
}*/
auto rb = std::make_shared<LocalCommunication>(sconf.batchSize);
std::pair<BatchWrapper, RB> p = {
generateWorkloadBatch(&tseed, sconf.batchSize),
std::move(rb)};
q[(tid + i) % sconf.threads].push(std::move(p));
}
}
};
#endif
for (int j = 0; j < clients; j++) {
threads2.push_back(std::thread(fn, j));
}
#ifdef MODEL_CHANGE
std::cerr << "Sleep" << std::endl;
sleep(1);
//workloadSwitch();
//sleep(1);
std::vector<std::pair<unsigned long long, unsigned>> trainVec;
unsigned threadSeeed = time(nullptr);
for (int i = 0; i < 10000; i++) {
BatchWrapper b = generateWorkloadBatch(&threadSeeed, sconf.batchSize);
std::hash<unsigned long long> hfn{};
for (auto &elm : b) {
trainVec.push_back({elm.key, hfn(elm.key)});
}
}
Model m;
m.train(trainVec);
m.persist("temp2.json");
std::cerr << "Changing\n";
double modelchange_time;
auto changeTime = std::chrono::high_resolution_clock::now();
client->change_model(changing, m, block, modelchange_time);
std::cerr << "Changed " << modelchange_time * 1e3 << "\n";
sleep(5);
finishBatching = true;
#endif
for (auto &t : threads2) {
t.join();
}
auto endTimeArrival = std::chrono::high_resolution_clock::now();
reclaim = true;
std::cerr << "Awake and joining\n";
for (auto &t : threads) {
t.join();
}
auto times = client->getCacheTimes();
auto endTime = std::chrono::high_resolution_clock::now();
size_t ops = client->getOps();
std::sort(times.begin(), times.end(),
[](const std::pair<std::chrono::high_resolution_clock::time_point, std::vector<std::chrono::high_resolution_clock::time_point>> &lhs,
const std::pair<std::chrono::high_resolution_clock::time_point, std::vector<std::chrono::high_resolution_clock::time_point>> &rhs) {
return lhs.first < rhs.first;
});
std::vector<std::pair<std::chrono::high_resolution_clock::time_point, std::vector<double>>> times2;
for (auto &t : times) {
std::vector<double> tmp;
for (auto &t2 : t.second) {
tmp.push_back(std::chrono::duration<double>(t2 - t.first).count());
}
times2.push_back({t.first, tmp});
}
std::chrono::duration<double> dur = endTime - startTime;
std::chrono::duration<double> durArr = endTimeArrival - startTime;
if (!times.empty()) {
if (sconf.cache) {
auto s = client->getStart();
std::cout << "TABLE: Latency of Hot Storage" << std::endl;
std::cout << "Timestamp\tAvg Latency\tMin Latency\tMax Latency\tOps" << std::endl;
for (auto &t : times2) {
if (!t.second.empty()) {
double avg = 0.0;
std::for_each(t.second.begin(), t.second.end(), [&avg](double d) {
avg += d;
});
avg /= t.second.size();
std::cout << std::chrono::duration<double>(t.first - s).count() << "\t" << avg * 1e3 << "\t"
<< t.second[0] * 1e3 << "\t" << t.second[t.second.size() - 1] * 1e3 << "\t" <<
t.second.size() << std::endl;
}
}
//delete barrier;
std::cout << std::endl;
std::cout << "TABLE: Hot Storage Latencies" << std::endl;
std::cout << "Latency" << std::endl;
for (auto &t : times2) {
for (auto &t2 : t.second) {
std::cout << t2 * 1e3 << std::endl;
}
}
std::cout << std::endl;
}
client->stat();
std::cerr << "Arrival Rate (Mops) " << (sconf.batchSize * times.size()) / durArr.count() / 1e6 << std::endl;
std::cerr << "Throughput (Mops) " << ((double) ops + client->getHits()) / dur.count() / 1e6 << std::endl;
if (sconf.cache) {
std::cerr << "Hit Rate\tHits" << std::endl;
std::cerr << client->hitRate() << "\t" << client->getHits() << std::endl;
std::cerr << std::endl;
}
std::cout << "TABLE: Throughput" << std::endl;
std::cout << "Throughput" << std::endl;
std::cout << ((double) ops + client->getHits()) / dur.count() / 1e6 << std::endl;
#if MODEL_CHANGE
std::cout << "TABLE: Model Change" << std::endl;
std::cout << "Latency\tStart" << std::endl;
std::cout << modelchange_time * 1e3 << "\t" << std::chrono::duration<double>(changeTime - client->getStart()).count() << std::endl;
#endif
}
delete client;
delete block;
dlclose(handler);
return 0;
}
void usage(char *command) {
using namespace std;
cout << command << " [-f <config file>]" << std::endl;
}
| 4701e12e787937eb4d54e02d9ffbc5c726986cb5.cu | //
// Created by depaulsmiller on 1/15/21.
//
#include <unistd.h>
#include "helper.cuh"
#include <algorithm>
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/json_parser.hpp>
#include <dlfcn.h>
namespace pt = boost::property_tree;
using BatchWrapper = std::vector<RequestWrapper<unsigned long long, data_t *>>;
//#ifdef MODEL_CHANGE
using Model = kvgpu::AnalyticalModel<unsigned long long>;
//#else
//using Model = kvgpu::SimplModel<unsigned long long>;
//#endif
using RB = std::shared_ptr<Communication>;
int totalBatches = 10000;
int BATCHSIZE = 512;
int NUM_THREADS = 12;//std::thread::hardware_concurrency() - 10;
void usage(char *command);
struct ServerConf {
int threads;
int cpu_threads;
int gpus;
int streams;
std::string modelFile;
bool train;
int size;
int batchSize;
bool cache;
ServerConf() {
batchSize = BATCHSIZE;
modelFile = "";
cpu_threads = NUM_THREADS;
threads = 2;//1;//4;
gpus = 1;
streams = 10;//10;
size = 1000000;
train = false;
cache = true;
}
explicit ServerConf(const std::string &filename) {
pt::ptree root;
pt::read_json(filename, root);
cpu_threads = root.get<int>("cpu_threads", NUM_THREADS);
threads = root.get<int>("threads", 4);
streams = root.get<int>("streams", 2);
gpus = root.get<int>("gpus", 2);
modelFile = root.get<std::string>("modelFile", "");
train = root.get<bool>("train", false);
size = root.get<int>("size", 1000000);
batchSize = root.get<int>("batchSize", BATCHSIZE);
cache = root.get<bool>("cache", true);
}
void persist(const std::string &filename) const {
pt::ptree root;
root.put("threads", threads);
root.put("streams", streams);
root.put("gpus", gpus);
root.put("modelFile", modelFile);
root.put("train", train);
root.put("size", size);
root.put("batchSize", batchSize);
root.put("cache", cache);
pt::write_json(filename, root);
}
~ServerConf() = default;
};
int main(int argc, char **argv) {
ServerConf sconf;
bool workloadFilenameSet = false;
std::string workloadFilename;
#ifdef MODEL_CHANGE
std::string dllib = "./libzipfianWorkloadSwitch.so";
#else
std::string dllib = "./libzipfianWorkload.so";
#endif
char c;
while ((c = getopt(argc, argv, "f:w:l:")) != -1) {
switch (c) {
case 'f':
sconf = ServerConf(std::string(optarg));
// optarg is the file
break;
case 'w':
workloadFilenameSet = true;
workloadFilename = optarg;
break;
case 'l':
dllib = optarg;
break;
default:
case '?':
usage(argv[0]);
return 1;
}
}
void (*initWorkload)() = nullptr;
void (*initWorkloadFile)(std::string) = nullptr;
BatchWrapper (*generateWorkloadBatch)(unsigned int *, unsigned) = nullptr;
int (*getBatchesToRun)() = nullptr;
std::vector<BatchWrapper> (*getPopulationBatches)(unsigned int *, unsigned) = nullptr;
auto handler = dlopen(dllib.c_str(), RTLD_LAZY);
if (!handler) {
std::cerr << dlerror() << std::endl;
return 1;
}
initWorkload = (void (*)()) dlsym(handler, "initWorkload");
initWorkloadFile = (void (*)(std::string)) dlsym(handler, "initWorkloadFile");
generateWorkloadBatch = (BatchWrapper(*)(unsigned *, unsigned)) dlsym(handler, "generateWorkloadBatch");
getBatchesToRun = (int (*)()) dlsym(handler, "getBatchesToRun");
getPopulationBatches = (std::vector<BatchWrapper> (*)(unsigned int *, unsigned)) dlsym(handler,
"getPopulationBatches");
#ifdef MODEL_CHANGE
auto workloadSwitch = (void (*)()) dlsym(handler, "changeWorkload");
#endif
if (workloadFilenameSet) {
initWorkloadFile(workloadFilename);
} else {
initWorkload();
}
totalBatches = getBatchesToRun();
std::vector<PartitionedSlabUnifiedConfig> conf;
for (int i = 0; i < sconf.gpus; i++) {
for (int j = 0; j < sconf.streams; j++) {
gpuErrchk(cudaSetDevice(i));
cudaStream_t stream = cudaStreamDefault;
if (j != 0) {
gpuErrchk(cudaStreamCreate(&stream));
}
conf.push_back({sconf.size, i, stream});
}
}
std::unique_ptr<KVStoreCtx<Model>> ctx = nullptr;
if (sconf.modelFile != "") {
ctx = std::make_unique<KVStoreCtx<Model>>(conf, sconf.cpu_threads, sconf.modelFile);
} else {
//#ifdef MODEL_CHANGE
unsigned tseed = time(nullptr);
std::vector<std::pair<unsigned long long, unsigned>> trainVec;
std::hash<unsigned long long> hfn{};
for (int i = 0; i < 10000; i++) {
BatchWrapper b = generateWorkloadBatch(&tseed, sconf.batchSize);
for (auto &elm : b) {
trainVec.push_back({elm.key, hfn(elm.key)});
}
}
Model m;
m.train(trainVec);
m.persist("./temp.json");
#ifdef MODEL_CHANGE
workloadSwitch();
#endif
ctx = std::make_unique<KVStoreCtx<Model>>(conf, sconf.cpu_threads, m);
//#else
// ctx = std::make_unique<KVStoreCtx<unsigned long long, data_t, Model>>(conf, sconf.cpu_threads);
//#endif
}
GeneralClient<Model> *client = nullptr;
if (sconf.cache) {
if (sconf.gpus == 0) {
client = new JustCacheKVStoreClient<Model>(*ctx);
} else {
client = new KVStoreClient<Model>(*ctx);
}
} else {
client = new NoCacheKVStoreClient<Model>(*ctx);
}
unsigned popSeed = time(nullptr);
auto pop = getPopulationBatches(&popSeed, BATCHSIZE);
for (auto &b : pop) {
bool retry;
int size = b.size();
do {
retry = false;
loadBalanceSet = true;
auto rb = std::make_shared<LocalCommunication>(sconf.batchSize);
auto start = std::chrono::high_resolution_clock::now();
client->batch(b, rb, start);
int count = 0;
do {
Response response;
if (rb->try_recv(response)) {
count++;
if (response.retry) {
retry = true;
}
}
} while (count < size);
} while (retry);
}
std::cerr << "Populated" << std::endl;
client->resetStats();
std::vector<std::thread> threads;
auto *q = new tbb::concurrent_queue<std::pair<BatchWrapper, RB>>[sconf.threads];
std::atomic_bool reclaim{false};
std::atomic_bool changing{false};
auto *block = new block_t(sconf.threads);
for (int i = 0; i < sconf.threads; ++i) {
threads.push_back(std::thread([&client, &reclaim, &q, &changing, &block, sconf](int tid) {
init_loadbalance(sconf.cpu_threads);
std::shared_ptr<Communication> lastResBuf = nullptr;
while (!reclaim) {
std::pair<BatchWrapper, RB> p;
if (changing) {
block->wait();
while (changing) {
if (q[tid].try_pop(p)) {
auto start = std::chrono::high_resolution_clock::now();
client->batch_drop_modifications(p.first, p.second, start);
}
}
}
if (q[tid].try_pop(p)) {
auto start = std::chrono::high_resolution_clock::now();
lastResBuf = p.second;
client->batch(p.first, p.second, start);
}
}
std::pair<BatchWrapper, RB> p;
while (q[tid].try_pop(p)) {
auto start = std::chrono::high_resolution_clock::now();
lastResBuf = p.second;
client->batch(p.first, p.second, start);
}
int count = 0;
do {
Response response;
if (lastResBuf->try_recv(response)) {
count++;
}
} while (count < sconf.batchSize);
}, i));
}
auto startTime = std::chrono::high_resolution_clock::now();
std::vector<std::thread> threads2;
int clients = 8;
#ifdef MODEL_CHANGE
std::atomic_bool finishBatching;
finishBatching = false;
auto fn = [&finishBatching, clients, &q, &sconf, generateWorkloadBatch, &changing, &block, &client](int tid) {
unsigned tseed = time(nullptr);
int i = 0;
while (!finishBatching) {
/*if (tid == 0 && i == totalBatches / clients / 10) {
std::cerr << "Changing\n";
auto tmp = Model(18000);
double time;
client->change_model(changing, tmp, block, time);
std::cerr << "Changed " << time * 1e3 << "\n";
}*/
auto rb = std::make_shared<ResultsBuffers<data_t>>(sconf.batchSize);
std::pair<BatchWrapper, RB> p = {
generateWorkloadBatch(&tseed, sconf.batchSize),
std::move(rb)};
q[(tid + i) % sconf.threads].push(std::move(p));
i++;
}
};
#else
auto fn = [clients, &q, &sconf, generateWorkloadBatch, &changing, &block, &client](int tid) {
unsigned tseed = time(nullptr);
for (int i = 0; i < totalBatches / clients; i++) {
/*if (tid == 0 && i == totalBatches / clients / 10) {
std::cerr << "Changing\n";
auto tmp = Model(18000);
double time;
client->change_model(changing, tmp, block, time);
std::cerr << "Changed " << time * 1e3 << "\n";
}*/
auto rb = std::make_shared<LocalCommunication>(sconf.batchSize);
std::pair<BatchWrapper, RB> p = {
generateWorkloadBatch(&tseed, sconf.batchSize),
std::move(rb)};
q[(tid + i) % sconf.threads].push(std::move(p));
}
if (tid == 0) {
for (int i = 0; i < totalBatches - (totalBatches / clients) * clients; i++) {
/*if (tid == 0 && i == totalBatches / clients / 10) {
std::cerr << "Changing\n";
auto tmp = kvgpu::SimplModel<unsigned>(18000);
changing = true;
client->change_model(tmp, block);
changing = false;
std::cerr << "Changed\n";
}*/
auto rb = std::make_shared<LocalCommunication>(sconf.batchSize);
std::pair<BatchWrapper, RB> p = {
generateWorkloadBatch(&tseed, sconf.batchSize),
std::move(rb)};
q[(tid + i) % sconf.threads].push(std::move(p));
}
}
};
#endif
for (int j = 0; j < clients; j++) {
threads2.push_back(std::thread(fn, j));
}
#ifdef MODEL_CHANGE
std::cerr << "Sleep" << std::endl;
sleep(1);
//workloadSwitch();
//sleep(1);
std::vector<std::pair<unsigned long long, unsigned>> trainVec;
unsigned threadSeeed = time(nullptr);
for (int i = 0; i < 10000; i++) {
BatchWrapper b = generateWorkloadBatch(&threadSeeed, sconf.batchSize);
std::hash<unsigned long long> hfn{};
for (auto &elm : b) {
trainVec.push_back({elm.key, hfn(elm.key)});
}
}
Model m;
m.train(trainVec);
m.persist("temp2.json");
std::cerr << "Changing\n";
double modelchange_time;
auto changeTime = std::chrono::high_resolution_clock::now();
client->change_model(changing, m, block, modelchange_time);
std::cerr << "Changed " << modelchange_time * 1e3 << "\n";
sleep(5);
finishBatching = true;
#endif
for (auto &t : threads2) {
t.join();
}
auto endTimeArrival = std::chrono::high_resolution_clock::now();
reclaim = true;
std::cerr << "Awake and joining\n";
for (auto &t : threads) {
t.join();
}
auto times = client->getCacheTimes();
auto endTime = std::chrono::high_resolution_clock::now();
size_t ops = client->getOps();
std::sort(times.begin(), times.end(),
[](const std::pair<std::chrono::high_resolution_clock::time_point, std::vector<std::chrono::high_resolution_clock::time_point>> &lhs,
const std::pair<std::chrono::high_resolution_clock::time_point, std::vector<std::chrono::high_resolution_clock::time_point>> &rhs) {
return lhs.first < rhs.first;
});
std::vector<std::pair<std::chrono::high_resolution_clock::time_point, std::vector<double>>> times2;
for (auto &t : times) {
std::vector<double> tmp;
for (auto &t2 : t.second) {
tmp.push_back(std::chrono::duration<double>(t2 - t.first).count());
}
times2.push_back({t.first, tmp});
}
std::chrono::duration<double> dur = endTime - startTime;
std::chrono::duration<double> durArr = endTimeArrival - startTime;
if (!times.empty()) {
if (sconf.cache) {
auto s = client->getStart();
std::cout << "TABLE: Latency of Hot Storage" << std::endl;
std::cout << "Timestamp\tAvg Latency\tMin Latency\tMax Latency\tOps" << std::endl;
for (auto &t : times2) {
if (!t.second.empty()) {
double avg = 0.0;
std::for_each(t.second.begin(), t.second.end(), [&avg](double d) {
avg += d;
});
avg /= t.second.size();
std::cout << std::chrono::duration<double>(t.first - s).count() << "\t" << avg * 1e3 << "\t"
<< t.second[0] * 1e3 << "\t" << t.second[t.second.size() - 1] * 1e3 << "\t" <<
t.second.size() << std::endl;
}
}
//delete barrier;
std::cout << std::endl;
std::cout << "TABLE: Hot Storage Latencies" << std::endl;
std::cout << "Latency" << std::endl;
for (auto &t : times2) {
for (auto &t2 : t.second) {
std::cout << t2 * 1e3 << std::endl;
}
}
std::cout << std::endl;
}
client->stat();
std::cerr << "Arrival Rate (Mops) " << (sconf.batchSize * times.size()) / durArr.count() / 1e6 << std::endl;
std::cerr << "Throughput (Mops) " << ((double) ops + client->getHits()) / dur.count() / 1e6 << std::endl;
if (sconf.cache) {
std::cerr << "Hit Rate\tHits" << std::endl;
std::cerr << client->hitRate() << "\t" << client->getHits() << std::endl;
std::cerr << std::endl;
}
std::cout << "TABLE: Throughput" << std::endl;
std::cout << "Throughput" << std::endl;
std::cout << ((double) ops + client->getHits()) / dur.count() / 1e6 << std::endl;
#if MODEL_CHANGE
std::cout << "TABLE: Model Change" << std::endl;
std::cout << "Latency\tStart" << std::endl;
std::cout << modelchange_time * 1e3 << "\t" << std::chrono::duration<double>(changeTime - client->getStart()).count() << std::endl;
#endif
}
delete client;
delete block;
dlclose(handler);
return 0;
}
void usage(char *command) {
using namespace std;
cout << command << " [-f <config file>]" << std::endl;
}
|
51f2da9f983f1631cf8ab75913555b340285d2e8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <vector>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
#define N 500000
// Simple short kernels
__global__
void kernel_a(float* x, float* y){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) y[idx] += 1;
}
__global__
void kernel_c(float* x, float* y){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) y[idx] += 1;
}
int main(){
hipStream_t stream1;
hipStreamCreateWithFlags(&stream1, hipStreamNonBlocking);
hipblasHandle_t cublas_handle;
hipblasCreate(&cublas_handle);
hipblasSetStream(cublas_handle, stream1);
// Set up host data and initialize
float* h_x;
float* h_y;
h_x = (float*) malloc(N * sizeof(float));
h_y = (float*) malloc(N * sizeof(float));
for (int i = 0; i < N; ++i){
h_x[i] = float(i);
h_y[i] = float(i);
}
// Print out the first 25 values of h_y
for (int i = 0; i < 25; ++i){
printf("%2.0f ", h_y[i]);
}
printf("\n");
// Set up device data
float* d_x;
float* d_y;
float d_a = 5.0;
hipMalloc((void**) &d_x, N * sizeof(float));
hipMalloc((void**) &d_y, N * sizeof(float));
cudaCheckErrors("hipMalloc failed");
hipblasSetVector(N, sizeof(h_x[0]), h_x, 1, d_x, 1); // similar to cudaMemcpyHtoD
hipblasSetVector(N, sizeof(h_y[0]), h_y, 1, d_y, 1); // similar to cudaMemcpyHtoD
cudaCheckErrors("hipblasSetVector failed");
// Set up graph
hipGraph_t graph; // main graph
hipGraph_t libraryGraph; // sub graph for cuBLAS call
std::vector<hipGraphNode_t> nodeDependencies;
hipGraphNode_t kernelNode1, kernelNode2, libraryNode;
cudaKernelNodeParams kernelNode1Params {0};
cudaKernelNodeParams kernelNode2Params {0};
cudaGraphCreate(&graph, 0); // create the graph
cudaCheckErrors("cudaGraphCreate failure");
// kernel_a and kernel_c use same args
void *kernelArgs[2] = {(void *)&d_x, (void *)&d_y};
int threads = 512;
int blocks = (N + (threads - 1) / threads);
// Adding 1st node, kernel_a, as head node of graph
kernelNode1Params.func = (void *)kernel_a;
kernelNode1Params.gridDim = dim3(blocks, 1, 1);
kernelNode1Params.blockDim = dim3(threads, 1, 1);
kernelNode1Params.sharedMemBytes = 0;
kernelNode1Params.kernelParams = (void **)kernelArgs;
kernelNode1Params.extra = NULL;
cudaGraphAddKernelNode(&kernelNode1, graph, NULL,
0, &kernelNode1Params);
cudaCheckErrors("Adding kernelNode1 failed");
nodeDependencies.push_back(kernelNode1); // manage dependecy vector
// Adding 2nd node, libraryNode, with kernelNode1 as dependency
hipStreamBeginCapture(stream1, hipStreamCaptureModeGlobal);
cudaCheckErrors("Stream capture begin failure");
// Library call
hipblasSaxpy(cublas_handle, N, &d_a, d_x, 1, d_y, 1);
cudaCheckErrors("hipblasSaxpy failure");
hipStreamEndCapture(stream1, &libraryGraph);
cudaCheckErrors("Stream capture end failure");
cudaGraphAddChildGraphNode(&libraryNode, graph, nodeDependencies.data(),
nodeDependencies.size(), libraryGraph);
cudaCheckErrors("Adding libraryNode failed");
nodeDependencies.clear();
nodeDependencies.push_back(libraryNode); // manage dependency vector
// Adding 3rd node, kernel_c, with libraryNode as dependency
kernelNode2Params.func = (void *)kernel_c;
kernelNode2Params.gridDim = dim3(blocks, 1, 1);
kernelNode2Params.blockDim = dim3(threads, 1, 1);
kernelNode2Params.sharedMemBytes = 0;
kernelNode2Params.kernelParams = (void **)kernelArgs;
kernelNode2Params.extra = NULL;
cudaGraphAddKernelNode(&kernelNode2, graph, nodeDependencies.data(),
nodeDependencies.size(), &kernelNode2Params);
cudaCheckErrors("Adding kernelNode2 failed");
nodeDependencies.clear();
nodeDependencies.push_back(kernelNode2); // manage dependency vector
hipGraphNode_t *nodes = NULL;
size_t numNodes = 0;
hipGraphGetNodes(graph, nodes, &numNodes);
cudaCheckErrors("Graph get nodes failed");
printf("Number of the nodes in the graph = %zu\n", numNodes);
hipGraphExec_t instance;
hipGraphInstantiate(&instance, graph, NULL, NULL, 0);
cudaCheckErrors("Graph instantiation failed");
// Launch the graph instance 100 times
for (int i = 0; i < 100; ++i){
hipGraphLaunch(instance, stream1);
hipStreamSynchronize(stream1);
}
cudaCheckErrors("Graph launch failed");
hipDeviceSynchronize();
// Copy memory back to host
hipMemcpy(h_y, d_y, N, hipMemcpyDeviceToHost);
cudaCheckErrors("Finishing memcpy failed");
hipDeviceSynchronize();
// Print out the first 25 values of h_y
for (int i = 0; i < 25; ++i){
printf("%2.0f ", h_y[i]);
}
printf("\n");
return 0;
}
| 51f2da9f983f1631cf8ab75913555b340285d2e8.cu | #include <stdio.h>
#include <vector>
#include <cuda_runtime_api.h>
#include <cublas_v2.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
#define N 500000
// Simple short kernels
__global__
void kernel_a(float* x, float* y){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) y[idx] += 1;
}
__global__
void kernel_c(float* x, float* y){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) y[idx] += 1;
}
int main(){
cudaStream_t stream1;
cudaStreamCreateWithFlags(&stream1, cudaStreamNonBlocking);
cublasHandle_t cublas_handle;
cublasCreate(&cublas_handle);
cublasSetStream(cublas_handle, stream1);
// Set up host data and initialize
float* h_x;
float* h_y;
h_x = (float*) malloc(N * sizeof(float));
h_y = (float*) malloc(N * sizeof(float));
for (int i = 0; i < N; ++i){
h_x[i] = float(i);
h_y[i] = float(i);
}
// Print out the first 25 values of h_y
for (int i = 0; i < 25; ++i){
printf("%2.0f ", h_y[i]);
}
printf("\n");
// Set up device data
float* d_x;
float* d_y;
float d_a = 5.0;
cudaMalloc((void**) &d_x, N * sizeof(float));
cudaMalloc((void**) &d_y, N * sizeof(float));
cudaCheckErrors("cudaMalloc failed");
cublasSetVector(N, sizeof(h_x[0]), h_x, 1, d_x, 1); // similar to cudaMemcpyHtoD
cublasSetVector(N, sizeof(h_y[0]), h_y, 1, d_y, 1); // similar to cudaMemcpyHtoD
cudaCheckErrors("cublasSetVector failed");
// Set up graph
cudaGraph_t graph; // main graph
cudaGraph_t libraryGraph; // sub graph for cuBLAS call
std::vector<cudaGraphNode_t> nodeDependencies;
cudaGraphNode_t kernelNode1, kernelNode2, libraryNode;
cudaKernelNodeParams kernelNode1Params {0};
cudaKernelNodeParams kernelNode2Params {0};
cudaGraphCreate(&graph, 0); // create the graph
cudaCheckErrors("cudaGraphCreate failure");
// kernel_a and kernel_c use same args
void *kernelArgs[2] = {(void *)&d_x, (void *)&d_y};
int threads = 512;
int blocks = (N + (threads - 1) / threads);
// Adding 1st node, kernel_a, as head node of graph
kernelNode1Params.func = (void *)kernel_a;
kernelNode1Params.gridDim = dim3(blocks, 1, 1);
kernelNode1Params.blockDim = dim3(threads, 1, 1);
kernelNode1Params.sharedMemBytes = 0;
kernelNode1Params.kernelParams = (void **)kernelArgs;
kernelNode1Params.extra = NULL;
cudaGraphAddKernelNode(&kernelNode1, graph, NULL,
0, &kernelNode1Params);
cudaCheckErrors("Adding kernelNode1 failed");
nodeDependencies.push_back(kernelNode1); // manage dependecy vector
// Adding 2nd node, libraryNode, with kernelNode1 as dependency
cudaStreamBeginCapture(stream1, cudaStreamCaptureModeGlobal);
cudaCheckErrors("Stream capture begin failure");
// Library call
cublasSaxpy(cublas_handle, N, &d_a, d_x, 1, d_y, 1);
cudaCheckErrors("cublasSaxpy failure");
cudaStreamEndCapture(stream1, &libraryGraph);
cudaCheckErrors("Stream capture end failure");
cudaGraphAddChildGraphNode(&libraryNode, graph, nodeDependencies.data(),
nodeDependencies.size(), libraryGraph);
cudaCheckErrors("Adding libraryNode failed");
nodeDependencies.clear();
nodeDependencies.push_back(libraryNode); // manage dependency vector
// Adding 3rd node, kernel_c, with libraryNode as dependency
kernelNode2Params.func = (void *)kernel_c;
kernelNode2Params.gridDim = dim3(blocks, 1, 1);
kernelNode2Params.blockDim = dim3(threads, 1, 1);
kernelNode2Params.sharedMemBytes = 0;
kernelNode2Params.kernelParams = (void **)kernelArgs;
kernelNode2Params.extra = NULL;
cudaGraphAddKernelNode(&kernelNode2, graph, nodeDependencies.data(),
nodeDependencies.size(), &kernelNode2Params);
cudaCheckErrors("Adding kernelNode2 failed");
nodeDependencies.clear();
nodeDependencies.push_back(kernelNode2); // manage dependency vector
cudaGraphNode_t *nodes = NULL;
size_t numNodes = 0;
cudaGraphGetNodes(graph, nodes, &numNodes);
cudaCheckErrors("Graph get nodes failed");
printf("Number of the nodes in the graph = %zu\n", numNodes);
cudaGraphExec_t instance;
cudaGraphInstantiate(&instance, graph, NULL, NULL, 0);
cudaCheckErrors("Graph instantiation failed");
// Launch the graph instance 100 times
for (int i = 0; i < 100; ++i){
cudaGraphLaunch(instance, stream1);
cudaStreamSynchronize(stream1);
}
cudaCheckErrors("Graph launch failed");
cudaDeviceSynchronize();
// Copy memory back to host
cudaMemcpy(h_y, d_y, N, cudaMemcpyDeviceToHost);
cudaCheckErrors("Finishing memcpy failed");
cudaDeviceSynchronize();
// Print out the first 25 values of h_y
for (int i = 0; i < 25; ++i){
printf("%2.0f ", h_y[i]);
}
printf("\n");
return 0;
}
|
53d80caf372a1292744af3af18801a0dd6f25352.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* sobfu includes */
#include <sobfu/cuda/utils.hpp>
#include <sobfu/reductor.hpp>
#define FULL_MASK 0xffffffff
/*
* OWN KERNELS
*/
template <unsigned int blockSize, bool nIsPow2>
__global__ void sobfu::device::reduce_data_kernel(float2 *g_idata_global, float2 *g_idata_n, float *g_odata,
unsigned int n) {
float *sdata = SharedMemory<float>();
/* perform first level of reduction, reading from global memory, writing to shared memory */
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
float mySum = 0.f;
/* we reduce multiple elements per thread; the number is determined by the umber of active thread blocks (via
* gridDim); more blocks will result in a larger gridSize and therefore fewer elements per thread */
while (i < n) {
mySum += (g_idata_global[i].x - g_idata_n[i].x) * (g_idata_global[i].x - g_idata_n[i].x);
/* ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays */
if (nIsPow2 || i + blockSize < n) {
mySum += (g_idata_global[i + blockSize].x - g_idata_n[i + blockSize].x) *
(g_idata_global[i + blockSize].x - g_idata_n[i + blockSize].x);
}
i += gridSize;
}
/* each thread puts its local sum into shared memory */
sdata[tid] = mySum;
__syncthreads();
/* do reduction in shared mem */
if ((blockSize >= 512) && (tid < 256)) {
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) && (tid < 128)) {
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if (tid < 32) {
/* fetch final intermediate sum from 2nd warp */
if (blockSize >= 64)
mySum += sdata[tid + 32];
/* reduce final warp using shuffle */
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(FULL_MASK, mySum, offset);
}
}
#else
/* fully unroll reduction within a single warp */
if ((blockSize >= 64) && (tid < 32)) {
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16)) {
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8)) {
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4)) {
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2)) {
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && (tid < 1)) {
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
/* write result for this block to global mem */
if (tid == 0)
g_odata[blockIdx.x] = mySum;
}
template <unsigned int blockSize, bool nIsPow2>
__global__ void sobfu::device::reduce_reg_sobolev_kernel(Mat4f *g_idata, float *g_odata, unsigned int n) {
float *sdata = SharedMemory<float>();
/* perform first level of reduction, reading from global memory, writing to shared memory */
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
float mySum = 0.f;
/* we reduce multiple elements per thread; the number is determined by the umber of active thread blocks (via
* gridDim); more blocks will result in a larger gridSize and therefore fewer elements per thread */
while (i < n) {
mySum += norm_sq(g_idata[i].data[0]) + norm_sq(g_idata[i].data[1]) + norm_sq(g_idata[i].data[2]);
/* ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays */
if (nIsPow2 || i + blockSize < n) {
mySum += norm_sq(g_idata[i + blockSize].data[0]) + norm_sq(g_idata[i + blockSize].data[1]) +
norm_sq(g_idata[i + blockSize].data[2]);
}
i += gridSize;
}
/* each thread puts its local sum into shared memory */
sdata[tid] = mySum;
__syncthreads();
/* do reduction in shared mem */
if ((blockSize >= 512) && (tid < 256)) {
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) && (tid < 128)) {
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if (tid < 32) {
/* fetch final intermediate sum from 2nd warp */
if (blockSize >= 64)
mySum += sdata[tid + 32];
/* reduce final warp using shuffle */
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(FULL_MASK, mySum, offset);
}
}
#else
/* fully unroll reduction within a single warp */
if ((blockSize >= 64) && (tid < 32)) {
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16)) {
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8)) {
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4)) {
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2)) {
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && (tid < 1)) {
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
/* write result for this block to global mem */
if (tid == 0)
g_odata[blockIdx.x] = mySum;
}
template <unsigned int blockSize, bool nIsPow2>
__global__ void sobfu::device::reduce_voxel_max_energy_kernel(float2 *d_idata_global, float2 *d_idata_n,
Mat4f *d_idata_reg, float2 *d_o_data, float w_reg,
unsigned int n) {
float2 *sdata = SharedMemory<float2>();
/* perform first level of reduction, reading from global memory, writing to shared memory */
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
float2 local_max;
local_max.x = 0.f;
local_max.y = 0.f;
/* we reduce multiple elements per thread; the number is determined by the umber of active thread blocks (via
* gridDim); more blocks will result in a larger gridSize and therefore fewer elements per thread */
while (i < n) {
float temp = (d_idata_global[i].x - d_idata_n[i].x) * (d_idata_global[i].x - d_idata_n[i].x) +
w_reg * (norm_sq(d_idata_reg[i].data[0]) + norm_sq(d_idata_reg[i].data[1]) +
norm_sq(d_idata_reg[i].data[2]));
if (temp > local_max.x) {
local_max.x = temp;
local_max.y = (float) i;
}
/* ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays */
if (nIsPow2 || i + blockSize < n) {
temp = (d_idata_global[i + blockSize].x - d_idata_n[i + blockSize].x) *
(d_idata_global[i + blockSize].x - d_idata_n[i + blockSize].x) +
w_reg * (norm_sq(d_idata_reg[i + blockSize].data[0]) + norm_sq(d_idata_reg[i + blockSize].data[1]) +
norm_sq(d_idata_reg[i + blockSize].data[2]));
if (temp > local_max.x) {
local_max.x = temp;
local_max.y = (float) i + blockSize;
}
}
i += gridSize;
}
/* each thread puts its local sum into shared memory */
sdata[tid] = local_max;
__syncthreads();
/* do reduction in shared mem */
if ((blockSize >= 512) && (tid < 256)) {
if (sdata[tid + 256].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 256];
}
}
__syncthreads();
if ((blockSize >= 256) && (tid < 128)) {
if (sdata[tid + 128].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 128];
}
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
if (sdata[tid + 64].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 64];
}
}
__syncthreads();
if ((blockSize >= 64) && (tid < 32)) {
if (sdata[tid + 32].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 32];
}
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16)) {
if (sdata[tid + 16].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 16];
}
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8)) {
if (sdata[tid + 8].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 8];
}
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4)) {
if (sdata[tid + 4].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 4];
}
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2)) {
if (sdata[tid + 2].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 2];
}
}
__syncthreads();
if ((blockSize >= 2) && (tid < 1)) {
if (sdata[tid + 1].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 1];
}
}
__syncthreads();
/* write result for this block to global mem */
if (tid == 0) {
d_o_data[blockIdx.x] = local_max;
}
}
template <unsigned int blockSize, bool nIsPow2>
__global__ void sobfu::device::reduce_max_kernel(float4 *updates, float2 *g_o_max_data, unsigned int n) {
float2 *sdata = SharedMemory<float2>();
/* perform first level of reduction, reading from global memory, writing to shared memory */
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
float2 local_max;
local_max.x = 0.f;
local_max.y = 0.f;
/* we reduce multiple elements per thread; the number is determined by the umber of active thread blocks (via
* gridDim); more blocks will result in a larger gridSize and therefore fewer elements per thread */
while (i < n) {
if (norm(updates[i]) > local_max.x) {
local_max.x = norm(updates[i]);
local_max.y = (float) i;
}
/* ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays */
if (nIsPow2 || i + blockSize < n) {
if (norm(updates[i + blockSize]) > local_max.x) {
local_max.x = norm(updates[i + blockSize]);
local_max.y = (float) i + blockSize;
}
}
i += gridSize;
}
/* each thread puts its local sum into shared memory */
sdata[tid] = local_max;
__syncthreads();
/* do reduction in shared mem */
if ((blockSize >= 512) && (tid < 256)) {
if (sdata[tid + 256].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 256];
}
}
__syncthreads();
if ((blockSize >= 256) && (tid < 128)) {
if (sdata[tid + 128].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 128];
}
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
if (sdata[tid + 64].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 64];
}
}
__syncthreads();
if ((blockSize >= 64) && (tid < 32)) {
if (sdata[tid + 32].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 32];
}
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16)) {
if (sdata[tid + 16].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 16];
}
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8)) {
if (sdata[tid + 8].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 8];
}
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4)) {
if (sdata[tid + 4].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 4];
}
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2)) {
if (sdata[tid + 2].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 2];
}
}
__syncthreads();
if ((blockSize >= 2) && (tid < 1)) {
if (sdata[tid + 1].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 1];
}
}
__syncthreads();
/* write result for this block to global mem */
if (tid == 0) {
g_o_max_data[blockIdx.x] = local_max;
}
}
/* wrapper function for kernel launch */
void sobfu::device::reduce_data(int size, int threads, int blocks, float2 *d_idata_global, float2 *d_idata_n,
float *d_odata) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
/* when there is only one warp per block, we need to allocate two warps worth of shared memory so that we don't
* index shared memory out of bounds */
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
if (isPow2(size)) {
switch (threads) {
case 512:
hipLaunchKernelGGL(( reduce_data_kernel<512, true>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( reduce_data_kernel<256, true>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( reduce_data_kernel<128, true>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( reduce_data_kernel<64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( reduce_data_kernel<32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( reduce_data_kernel<16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( reduce_data_kernel<8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( reduce_data_kernel<4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( reduce_data_kernel<2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( reduce_data_kernel<1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
}
} else {
switch (threads) {
case 512:
hipLaunchKernelGGL(( reduce_data_kernel<512, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( reduce_data_kernel<256, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( reduce_data_kernel<128, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( reduce_data_kernel<64, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( reduce_data_kernel<32, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( reduce_data_kernel<16, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( reduce_data_kernel<8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( reduce_data_kernel<4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( reduce_data_kernel<2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( reduce_data_kernel<1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_odata, size);
break;
}
}
}
void sobfu::device::reduce_reg_sobolev(int size, int threads, int blocks, Mat4f *d_idata, float *d_odata) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
/* when there is only one warp per block, we need to allocate two warps worth of shared memory so that we don't
* index shared memory out of bounds */
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
if (isPow2(size)) {
switch (threads) {
case 512:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
}
} else {
switch (threads) {
case 512:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 32:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 16:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 8:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 4:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 2:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case 1:
hipLaunchKernelGGL(( reduce_reg_sobolev_kernel<1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
}
}
}
void sobfu::device::reduce_voxel_max_energy(int size, int threads, int blocks, float2 *d_idata_global,
float2 *d_idata_n, Mat4f *d_idata_reg, float w_reg, float2 *d_odata) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
/* when there is only one warp per block, we need to allocate two warps worth of shared memory so that we don't
* index shared memory out of bounds */
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float2) : threads * sizeof(float2);
if (isPow2(size)) {
switch (threads) {
case 512:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<512, true>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 256:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<256, true>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 128:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<128, true>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 64:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<64, true>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 32:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<32, true>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 16:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<16, true>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 8:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<8, true>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 4:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<4, true>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 2:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<2, true>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 1:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<1, true>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
}
} else {
switch (threads) {
case 512:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<512, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 256:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<256, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 128:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<128, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 64:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<64, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 32:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<32, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 16:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<16, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 8:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<8, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 4:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<4, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 2:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<2, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 1:
hipLaunchKernelGGL(( reduce_voxel_max_energy_kernel<1, false>)
, dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
}
}
}
void sobfu::device::reduce_max(int size, int threads, int blocks, float4 *updates, float2 *d_o_max_data) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
/* when there is only one warp per block, we need to allocate two warps worth of shared memory so that we don't
* index shared memory out of bounds */
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float2) : threads * sizeof(float2);
if (isPow2(size)) {
switch (threads) {
case 512:
hipLaunchKernelGGL(( reduce_max_kernel<512, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 256:
hipLaunchKernelGGL(( reduce_max_kernel<256, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 128:
hipLaunchKernelGGL(( reduce_max_kernel<128, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 64:
hipLaunchKernelGGL(( reduce_max_kernel<64, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 32:
hipLaunchKernelGGL(( reduce_max_kernel<32, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 16:
hipLaunchKernelGGL(( reduce_max_kernel<16, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 8:
hipLaunchKernelGGL(( reduce_max_kernel<8, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 4:
hipLaunchKernelGGL(( reduce_max_kernel<4, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 2:
hipLaunchKernelGGL(( reduce_max_kernel<2, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 1:
hipLaunchKernelGGL(( reduce_max_kernel<1, true>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
}
} else {
switch (threads) {
case 512:
hipLaunchKernelGGL(( reduce_max_kernel<512, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 256:
hipLaunchKernelGGL(( reduce_max_kernel<256, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 128:
hipLaunchKernelGGL(( reduce_max_kernel<128, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 64:
hipLaunchKernelGGL(( reduce_max_kernel<64, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 32:
hipLaunchKernelGGL(( reduce_max_kernel<32, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 16:
hipLaunchKernelGGL(( reduce_max_kernel<16, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 8:
hipLaunchKernelGGL(( reduce_max_kernel<8, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 4:
hipLaunchKernelGGL(( reduce_max_kernel<4, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 2:
hipLaunchKernelGGL(( reduce_max_kernel<2, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
case 1:
hipLaunchKernelGGL(( reduce_max_kernel<1, false>), dim3(dimGrid), dim3(dimBlock), smemSize, 0, updates, d_o_max_data, size);
break;
}
}
}
| 53d80caf372a1292744af3af18801a0dd6f25352.cu | /* sobfu includes */
#include <sobfu/cuda/utils.hpp>
#include <sobfu/reductor.hpp>
#define FULL_MASK 0xffffffff
/*
* OWN KERNELS
*/
template <unsigned int blockSize, bool nIsPow2>
__global__ void sobfu::device::reduce_data_kernel(float2 *g_idata_global, float2 *g_idata_n, float *g_odata,
unsigned int n) {
float *sdata = SharedMemory<float>();
/* perform first level of reduction, reading from global memory, writing to shared memory */
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
float mySum = 0.f;
/* we reduce multiple elements per thread; the number is determined by the umber of active thread blocks (via
* gridDim); more blocks will result in a larger gridSize and therefore fewer elements per thread */
while (i < n) {
mySum += (g_idata_global[i].x - g_idata_n[i].x) * (g_idata_global[i].x - g_idata_n[i].x);
/* ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays */
if (nIsPow2 || i + blockSize < n) {
mySum += (g_idata_global[i + blockSize].x - g_idata_n[i + blockSize].x) *
(g_idata_global[i + blockSize].x - g_idata_n[i + blockSize].x);
}
i += gridSize;
}
/* each thread puts its local sum into shared memory */
sdata[tid] = mySum;
__syncthreads();
/* do reduction in shared mem */
if ((blockSize >= 512) && (tid < 256)) {
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) && (tid < 128)) {
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if (tid < 32) {
/* fetch final intermediate sum from 2nd warp */
if (blockSize >= 64)
mySum += sdata[tid + 32];
/* reduce final warp using shuffle */
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(FULL_MASK, mySum, offset);
}
}
#else
/* fully unroll reduction within a single warp */
if ((blockSize >= 64) && (tid < 32)) {
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16)) {
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8)) {
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4)) {
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2)) {
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && (tid < 1)) {
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
/* write result for this block to global mem */
if (tid == 0)
g_odata[blockIdx.x] = mySum;
}
template <unsigned int blockSize, bool nIsPow2>
__global__ void sobfu::device::reduce_reg_sobolev_kernel(Mat4f *g_idata, float *g_odata, unsigned int n) {
float *sdata = SharedMemory<float>();
/* perform first level of reduction, reading from global memory, writing to shared memory */
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
float mySum = 0.f;
/* we reduce multiple elements per thread; the number is determined by the umber of active thread blocks (via
* gridDim); more blocks will result in a larger gridSize and therefore fewer elements per thread */
while (i < n) {
mySum += norm_sq(g_idata[i].data[0]) + norm_sq(g_idata[i].data[1]) + norm_sq(g_idata[i].data[2]);
/* ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays */
if (nIsPow2 || i + blockSize < n) {
mySum += norm_sq(g_idata[i + blockSize].data[0]) + norm_sq(g_idata[i + blockSize].data[1]) +
norm_sq(g_idata[i + blockSize].data[2]);
}
i += gridSize;
}
/* each thread puts its local sum into shared memory */
sdata[tid] = mySum;
__syncthreads();
/* do reduction in shared mem */
if ((blockSize >= 512) && (tid < 256)) {
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) && (tid < 128)) {
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if (tid < 32) {
/* fetch final intermediate sum from 2nd warp */
if (blockSize >= 64)
mySum += sdata[tid + 32];
/* reduce final warp using shuffle */
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(FULL_MASK, mySum, offset);
}
}
#else
/* fully unroll reduction within a single warp */
if ((blockSize >= 64) && (tid < 32)) {
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16)) {
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8)) {
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4)) {
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2)) {
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && (tid < 1)) {
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
/* write result for this block to global mem */
if (tid == 0)
g_odata[blockIdx.x] = mySum;
}
template <unsigned int blockSize, bool nIsPow2>
__global__ void sobfu::device::reduce_voxel_max_energy_kernel(float2 *d_idata_global, float2 *d_idata_n,
Mat4f *d_idata_reg, float2 *d_o_data, float w_reg,
unsigned int n) {
float2 *sdata = SharedMemory<float2>();
/* perform first level of reduction, reading from global memory, writing to shared memory */
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
float2 local_max;
local_max.x = 0.f;
local_max.y = 0.f;
/* we reduce multiple elements per thread; the number is determined by the umber of active thread blocks (via
* gridDim); more blocks will result in a larger gridSize and therefore fewer elements per thread */
while (i < n) {
float temp = (d_idata_global[i].x - d_idata_n[i].x) * (d_idata_global[i].x - d_idata_n[i].x) +
w_reg * (norm_sq(d_idata_reg[i].data[0]) + norm_sq(d_idata_reg[i].data[1]) +
norm_sq(d_idata_reg[i].data[2]));
if (temp > local_max.x) {
local_max.x = temp;
local_max.y = (float) i;
}
/* ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays */
if (nIsPow2 || i + blockSize < n) {
temp = (d_idata_global[i + blockSize].x - d_idata_n[i + blockSize].x) *
(d_idata_global[i + blockSize].x - d_idata_n[i + blockSize].x) +
w_reg * (norm_sq(d_idata_reg[i + blockSize].data[0]) + norm_sq(d_idata_reg[i + blockSize].data[1]) +
norm_sq(d_idata_reg[i + blockSize].data[2]));
if (temp > local_max.x) {
local_max.x = temp;
local_max.y = (float) i + blockSize;
}
}
i += gridSize;
}
/* each thread puts its local sum into shared memory */
sdata[tid] = local_max;
__syncthreads();
/* do reduction in shared mem */
if ((blockSize >= 512) && (tid < 256)) {
if (sdata[tid + 256].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 256];
}
}
__syncthreads();
if ((blockSize >= 256) && (tid < 128)) {
if (sdata[tid + 128].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 128];
}
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
if (sdata[tid + 64].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 64];
}
}
__syncthreads();
if ((blockSize >= 64) && (tid < 32)) {
if (sdata[tid + 32].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 32];
}
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16)) {
if (sdata[tid + 16].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 16];
}
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8)) {
if (sdata[tid + 8].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 8];
}
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4)) {
if (sdata[tid + 4].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 4];
}
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2)) {
if (sdata[tid + 2].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 2];
}
}
__syncthreads();
if ((blockSize >= 2) && (tid < 1)) {
if (sdata[tid + 1].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 1];
}
}
__syncthreads();
/* write result for this block to global mem */
if (tid == 0) {
d_o_data[blockIdx.x] = local_max;
}
}
template <unsigned int blockSize, bool nIsPow2>
__global__ void sobfu::device::reduce_max_kernel(float4 *updates, float2 *g_o_max_data, unsigned int n) {
float2 *sdata = SharedMemory<float2>();
/* perform first level of reduction, reading from global memory, writing to shared memory */
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockSize * 2 + threadIdx.x;
unsigned int gridSize = blockSize * 2 * gridDim.x;
float2 local_max;
local_max.x = 0.f;
local_max.y = 0.f;
/* we reduce multiple elements per thread; the number is determined by the umber of active thread blocks (via
* gridDim); more blocks will result in a larger gridSize and therefore fewer elements per thread */
while (i < n) {
if (norm(updates[i]) > local_max.x) {
local_max.x = norm(updates[i]);
local_max.y = (float) i;
}
/* ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays */
if (nIsPow2 || i + blockSize < n) {
if (norm(updates[i + blockSize]) > local_max.x) {
local_max.x = norm(updates[i + blockSize]);
local_max.y = (float) i + blockSize;
}
}
i += gridSize;
}
/* each thread puts its local sum into shared memory */
sdata[tid] = local_max;
__syncthreads();
/* do reduction in shared mem */
if ((blockSize >= 512) && (tid < 256)) {
if (sdata[tid + 256].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 256];
}
}
__syncthreads();
if ((blockSize >= 256) && (tid < 128)) {
if (sdata[tid + 128].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 128];
}
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64)) {
if (sdata[tid + 64].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 64];
}
}
__syncthreads();
if ((blockSize >= 64) && (tid < 32)) {
if (sdata[tid + 32].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 32];
}
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16)) {
if (sdata[tid + 16].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 16];
}
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8)) {
if (sdata[tid + 8].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 8];
}
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4)) {
if (sdata[tid + 4].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 4];
}
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2)) {
if (sdata[tid + 2].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 2];
}
}
__syncthreads();
if ((blockSize >= 2) && (tid < 1)) {
if (sdata[tid + 1].x > local_max.x) {
sdata[tid] = local_max = sdata[tid + 1];
}
}
__syncthreads();
/* write result for this block to global mem */
if (tid == 0) {
g_o_max_data[blockIdx.x] = local_max;
}
}
/* wrapper function for kernel launch */
void sobfu::device::reduce_data(int size, int threads, int blocks, float2 *d_idata_global, float2 *d_idata_n,
float *d_odata) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
/* when there is only one warp per block, we need to allocate two warps worth of shared memory so that we don't
* index shared memory out of bounds */
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
if (isPow2(size)) {
switch (threads) {
case 512:
reduce_data_kernel<512, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 256:
reduce_data_kernel<256, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 128:
reduce_data_kernel<128, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 64:
reduce_data_kernel<64, true><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 32:
reduce_data_kernel<32, true><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 16:
reduce_data_kernel<16, true><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 8:
reduce_data_kernel<8, true><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 4:
reduce_data_kernel<4, true><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 2:
reduce_data_kernel<2, true><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 1:
reduce_data_kernel<1, true><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
}
} else {
switch (threads) {
case 512:
reduce_data_kernel<512, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 256:
reduce_data_kernel<256, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 128:
reduce_data_kernel<128, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 64:
reduce_data_kernel<64, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 32:
reduce_data_kernel<32, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 16:
reduce_data_kernel<16, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 8:
reduce_data_kernel<8, false><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 4:
reduce_data_kernel<4, false><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 2:
reduce_data_kernel<2, false><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
case 1:
reduce_data_kernel<1, false><<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_odata, size);
break;
}
}
}
void sobfu::device::reduce_reg_sobolev(int size, int threads, int blocks, Mat4f *d_idata, float *d_odata) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
/* when there is only one warp per block, we need to allocate two warps worth of shared memory so that we don't
* index shared memory out of bounds */
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
if (isPow2(size)) {
switch (threads) {
case 512:
reduce_reg_sobolev_kernel<512, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 256:
reduce_reg_sobolev_kernel<256, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 128:
reduce_reg_sobolev_kernel<128, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 64:
reduce_reg_sobolev_kernel<64, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 32:
reduce_reg_sobolev_kernel<32, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 16:
reduce_reg_sobolev_kernel<16, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 8:
reduce_reg_sobolev_kernel<8, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 4:
reduce_reg_sobolev_kernel<4, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 2:
reduce_reg_sobolev_kernel<2, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 1:
reduce_reg_sobolev_kernel<1, true><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
}
} else {
switch (threads) {
case 512:
reduce_reg_sobolev_kernel<512, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 256:
reduce_reg_sobolev_kernel<256, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 128:
reduce_reg_sobolev_kernel<128, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 64:
reduce_reg_sobolev_kernel<64, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 32:
reduce_reg_sobolev_kernel<32, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 16:
reduce_reg_sobolev_kernel<16, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 8:
reduce_reg_sobolev_kernel<8, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 4:
reduce_reg_sobolev_kernel<4, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 2:
reduce_reg_sobolev_kernel<2, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case 1:
reduce_reg_sobolev_kernel<1, false><<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
}
}
}
void sobfu::device::reduce_voxel_max_energy(int size, int threads, int blocks, float2 *d_idata_global,
float2 *d_idata_n, Mat4f *d_idata_reg, float w_reg, float2 *d_odata) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
/* when there is only one warp per block, we need to allocate two warps worth of shared memory so that we don't
* index shared memory out of bounds */
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float2) : threads * sizeof(float2);
if (isPow2(size)) {
switch (threads) {
case 512:
reduce_voxel_max_energy_kernel<512, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 256:
reduce_voxel_max_energy_kernel<256, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 128:
reduce_voxel_max_energy_kernel<128, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 64:
reduce_voxel_max_energy_kernel<64, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 32:
reduce_voxel_max_energy_kernel<32, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 16:
reduce_voxel_max_energy_kernel<16, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 8:
reduce_voxel_max_energy_kernel<8, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 4:
reduce_voxel_max_energy_kernel<4, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 2:
reduce_voxel_max_energy_kernel<2, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 1:
reduce_voxel_max_energy_kernel<1, true>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
}
} else {
switch (threads) {
case 512:
reduce_voxel_max_energy_kernel<512, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 256:
reduce_voxel_max_energy_kernel<256, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 128:
reduce_voxel_max_energy_kernel<128, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 64:
reduce_voxel_max_energy_kernel<64, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 32:
reduce_voxel_max_energy_kernel<32, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 16:
reduce_voxel_max_energy_kernel<16, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 8:
reduce_voxel_max_energy_kernel<8, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 4:
reduce_voxel_max_energy_kernel<4, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 2:
reduce_voxel_max_energy_kernel<2, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
case 1:
reduce_voxel_max_energy_kernel<1, false>
<<<dimGrid, dimBlock, smemSize>>>(d_idata_global, d_idata_n, d_idata_reg, d_odata, w_reg, size);
break;
}
}
}
void sobfu::device::reduce_max(int size, int threads, int blocks, float4 *updates, float2 *d_o_max_data) {
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
/* when there is only one warp per block, we need to allocate two warps worth of shared memory so that we don't
* index shared memory out of bounds */
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float2) : threads * sizeof(float2);
if (isPow2(size)) {
switch (threads) {
case 512:
reduce_max_kernel<512, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 256:
reduce_max_kernel<256, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 128:
reduce_max_kernel<128, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 64:
reduce_max_kernel<64, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 32:
reduce_max_kernel<32, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 16:
reduce_max_kernel<16, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 8:
reduce_max_kernel<8, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 4:
reduce_max_kernel<4, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 2:
reduce_max_kernel<2, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 1:
reduce_max_kernel<1, true><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
}
} else {
switch (threads) {
case 512:
reduce_max_kernel<512, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 256:
reduce_max_kernel<256, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 128:
reduce_max_kernel<128, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 64:
reduce_max_kernel<64, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 32:
reduce_max_kernel<32, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 16:
reduce_max_kernel<16, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 8:
reduce_max_kernel<8, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 4:
reduce_max_kernel<4, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 2:
reduce_max_kernel<2, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
case 1:
reduce_max_kernel<1, false><<<dimGrid, dimBlock, smemSize>>>(updates, d_o_max_data, size);
break;
}
}
}
|
0025b9d4ceee8813948ef898405b923831faddfa.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
static const unsigned int c1 = 0xcc9e2d51;
static const unsigned int c2 = 0x1b873593;
static const unsigned int r1 = 15;
static const unsigned int r2 = 13;
static const unsigned int m = 5;
static const unsigned int n = 0xe6546b64;
__device__ inline unsigned int h1(unsigned int k, unsigned int hash) {
k *= c1;
k = (k << r1) | (k >> (32-r1));
k *= c2;
hash ^= k;
hash = ((hash << r2) | (hash >> (32-r2)) * m) + n;
return hash;
}
__device__ inline unsigned int mmhash(unsigned int v1, unsigned int v2, unsigned int v3, unsigned int mod, unsigned int seed)
{
unsigned int hash = seed;
hash = h1(v1, hash);
hash = h1(v2, hash);
hash = h1(v3, hash);
hash ^= (hash >> 16);
hash *= 0x85ebca6b;
hash ^= (hash >> 13);
hash *= 0xc2b2ae35;
hash ^= (hash >> 16);
return (hash % mod);
}
#define DBSIZE (8*1024)
__global__ void __treePack(int *idata, int *treenodes, int *icats, int *jc, long long *out, int *fieldlens,
int nrows, int ncols, int ntrees, int nsamps) {
__shared__ int dbuff[DBSIZE];
__shared__ int fl[32];
int j, k, ic, ival;
int seed = 45123421;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
if (tid < 6) {
fl[tid] = fieldlens[tid];
}
__syncthreads();
int vshift = fl[5];
int ishift = fl[4] + vshift;
int jshift = fl[3] + ishift;
int nshift = fl[2] + jshift;
int tshift = fl[1] + nshift;
int cmask = (1 << fl[5]) - 1;
int vmask = (1 << fl[4]) - 1;
int imask = (1 << fl[3]) - 1;
int jmask = (1 << fl[2]) - 1;
int nmask = (1 << fl[1]) - 1;
int tmask = (1 << fl[0]) - 1;
int nc = (DBSIZE / nrows);
int itree = threadIdx.y;
int jfeat = threadIdx.x;
for (int i = nc * blockIdx.x; i < ncols; i += nc * gridDim.x) {
int ctodo = min(nc, ncols - i);
for (j = tid; j < nrows * ctodo; j += blockDim.x*blockDim.y) {
dbuff[j] = idata[j + i * nrows];
}
__syncthreads();
for (j = i; j < i + ctodo; j++) {
for (itree = threadIdx.y; itree < ntrees; itree += blockDim.y) {
int inode = treenodes[itree + j * ntrees];
int ifeat = mmhash(itree, inode, jfeat, nrows, seed);
long long hdr = (((long long)(tmask & itree)) << tshift) | (((long long)(nmask & inode)) << nshift) |
(((long long)(jmask & jfeat)) << jshift) | (((long long)(imask & ifeat)) << ishift) ;
for (k = jc[j]; k < jc[j+1]; k++) {
ic = icats[k];
if (jfeat < nsamps) {
ival = dbuff[ifeat + (j - i) * nrows];
out[jfeat + nsamps * (itree + ntrees * k)] = hdr | (((long long)(vmask & ival)) << vshift) | ((long long)(ic & cmask));
}
}
}
}
__syncthreads();
}
}
int treePack(int *fdata, int *treenodes, int *icats, int *jc, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps) {
int ntx = 32 * (1 + (nsamps - 1)/32);
int nty = min(1024 / ntx, ntrees);
dim3 bdim(ntx, nty, 1);
int nb = min(32, 1 + (ncols-1)/32);
hipLaunchKernelGGL(( __treePack), dim3(nb),dim3(bdim), 0, 0, fdata, treenodes, icats, jc, out, fieldlens, nrows, ncols, ntrees, nsamps);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
class entImpty {
public:
static __device__ inline float fupdate(int v) { return (float)v * logf((float)max(1, v)); }
static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return logf(vs) - vacc / vs; }
};
class giniImpty {
public:
static __device__ inline float fupdate(int v) { return (float)v * (float)v; }
static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return 1.0f - vacc / (vs*vs); }
};
#if __CUDA_ARCH__ >= 300
__device__ inline void accumup2(int &cnt, float &update) {
#pragma unroll
for (int h = 1; h < 32; h = h + h) {
float tmpx = __shfl_up(update, h);
int tmp = __shfl_up(cnt, h);
if (threadIdx.x >=h) {
update += tmpx;
cnt += tmp;
}
}
}
__device__ inline void accumup3(int &cnt, float &update, float &updatet) {
#pragma unroll
for (int h = 1; h < 32; h = h + h) {
float tmpx = __shfl_up(update, h);
float tmpy = __shfl_up(updatet, h);
int tmp = __shfl_up(cnt, h);
if (threadIdx.x >=h) {
update += tmpx;
updatet += tmpy;
cnt += tmp;
}
}
}
__device__ inline void accumdown3(int &cnt, float &update, float &updatet, int bound) {
#pragma unroll
for (int h = 1; h < 32; h = h + h) {
float tmpx = __shfl_down(update, h);
float tmpy = __shfl_down(updatet, h);
int tmp = __shfl_down(cnt, h);
if (threadIdx.x + h <= bound) {
update += tmpx;
updatet += tmpy;
cnt += tmp;
}
}
}
__device__ inline void minup2(float &impty, int &ival) {
#pragma unroll
for (int h = 1; h < 32; h = h + h) {
float tmpx = __shfl_up(impty, h);
int tmp = __shfl_up(ival, h);
if (threadIdx.x >= h && tmpx < impty) {
impty = tmpx;
ival = tmp;
}
}
}
__device__ inline void maxup2(int &v, int &indx) {
#pragma unroll
for (int h = 1; h < 32; h = h + h) {
int tmpv = __shfl_up(v, h);
int tmpi = __shfl_up(indx, h);
if (threadIdx.x >= h && tmpv > v) {
v = tmpv;
indx = tmpi;
}
}
}
template<typename T>
__global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens,
int nnodes, int ncats, int nsamps) {
__shared__ int catcnt[DBSIZE/2];
__shared__ int cattot[DBSIZE/2];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
if (tid < 6) {
catcnt[tid] = fieldlens[tid];
}
__syncthreads();
int vshift = catcnt[5];
int ishift = catcnt[4] + vshift;
int cmask = (1 << catcnt[5]) - 1;
int vmask = (1 << catcnt[4]) - 1;
int imask = (1 << catcnt[3]) - 1;
__syncthreads();
int i, j, k, jc0, jc1, jlast;
long long key;
int cold, ctot, ctt, ctotall, cnew, cnt, ival, icat, lastival, bestival, tmp, maxcnt, imaxcnt;
float update, updatet, cacc, cact, caccall, impty, minimpty, lastimpty, tmpx;
for (i = threadIdx.y + blockDim.y * blockIdx.x; i < nnodes*nsamps; i += blockDim.y * gridDim.x) {
// Process a group with fixed itree, inode, and ifeat
jc0 = jc[i]; // The range of indices for this group
jc1 = jc[i+1];
__syncthreads();
// Clear the cat counts for this group
for (j = tid; j < DBSIZE/2; j += blockDim.x * blockDim.y) {
catcnt[j] = 0;
cattot[j] = 0;
}
__syncthreads();
// First pass gets counts for each category and the (ci)log(ci) sum for this block
ctot = 0;
cacc = 0.0f;
maxcnt = -1;
imaxcnt = -1;
for (j = jc0; j < jc1; j += blockDim.x) {
if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts
key = keys[j + threadIdx.x]; // Each (x) thread handles a different input
cnt = counts[j + threadIdx.x];
icat = ((int)key) & cmask; // Extract the cat id and integer value
}
jlast = min(31, jc1 - j - 1);
for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread
if (threadIdx.x == k) { // in this warp gets the old and new counts
cold = cattot[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k
cnew = cold + cnt;
cattot[icat + ncats * threadIdx.y] = cnew;
}
}
update = T::fupdate(cnew) - T::fupdate(cold);
accumup2(cnt,update);
ctot += cnt; // Now update the total c and total ci log ci sums
cacc += update;
ctot = __shfl(ctot, jlast);
cacc = __shfl(cacc, jlast);
if (cnew > maxcnt) { // Compute and distribute the max cnt
maxcnt = cnew;
imaxcnt = icat;
}
maxup2(maxcnt, imaxcnt);
maxcnt = __shfl(maxcnt, jlast);
imaxcnt = __shfl(imaxcnt, jlast);
}
__syncthreads();
// if (threadIdx.x == 0 && i < 32) printf("cuda %d %d %f\n", i, ctot, cacc);
// Second pass to compute impurity at every input point
caccall = cacc; // Save the total count and (ci)log(ci) sum
cact = cacc;
ctotall = ctot;
ctot = 0;
cacc = 0.0f;
lastival = -1;
lastimpty = 1e7f;
minimpty = 1e7f;
for (j = jc0; j < jc1; j += blockDim.x) {
if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts
key = keys[j + threadIdx.x]; // Each (x) thread handles a different input
cnt = counts[j + threadIdx.x];
icat = ((int)key) & cmask; // Extract the cat id and integer value
ival = ((int)(key >> vshift)) & vmask;
}
jlast = min(31, jc1 - j - 1);
for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread
if (threadIdx.x == k) { // in this warp gets the old and new counts
cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k
ctt = cattot[icat + ncats * threadIdx.y];
cnew = cold + cnt;
catcnt[icat + ncats * threadIdx.y] = cnew;
}
}
update = T::fupdate(cnew) - T::fupdate(cold); // Compute the impurity updates for this input
updatet = T::fupdate(ctt-cnew) - T::fupdate(ctt-cold);
accumup3(cnt, update, updatet);
ctot += cnt; // Now update the total c and total ci log ci sums
cacc += update;
cact += updatet;
impty = T::fresult(cacc, ctot) + T::fresult(cact, ctotall-ctot); // And the impurity for this input
// if (i == 0) printf("cuda pos %d impty %f icat %d cnts %d %d cacc %f %d\n", j + threadIdx.x, impty, icat, cold, cnew, cacc, ctot);
tmp = __shfl_up(ival, 1); // Need the last impurity and ival in order
tmpx = __shfl_up(impty, 1); // to restrict the partition feature to a value boundary
if (threadIdx.x > 0) {
lastival = tmp;
lastimpty = tmpx;
}
if (ival == lastival) lastimpty = 1e7f; // Eliminate values which are not at value boundaries
if (lastimpty < minimpty) {
minimpty = lastimpty;
bestival = ival;
}
minup2(minimpty,bestival);
minimpty = __shfl(minimpty, jlast); // Carefully copy the last active thread to all threads, needed outside this loop
bestival = __shfl(bestival, jlast);
ctot = __shfl(ctot, jlast);
cacc = __shfl(cacc, jlast);
cact = __shfl(cact, jlast);
lastival = __shfl(ival, jlast);
lastimpty = __shfl(impty, jlast);
}
if (threadIdx.x == 0) {
outv[i] = bestival; // Output the best split feature value
outf[i] = ((int)(key >> ishift)) & imask; // Save the feature index
outg[i] = T::fresult(caccall, ctotall) - minimpty; // And the impurity gain
outc[i] = imaxcnt;
}
}
}
template<typename T>
__global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens,
int nnodes, int ncats, int nsamps) {
__shared__ int catcnt[DBSIZE];
__shared__ int cattot[DBSIZE/4];
__shared__ int stott[32];
__shared__ float sacct[32];
__shared__ int slastival[64];
__shared__ int sbestival[32];
__shared__ float sminimpty[32];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
if (tid < 6) {
catcnt[tid] = fieldlens[tid];
}
__syncthreads();
int vshift = catcnt[5];
int ishift = catcnt[4] + vshift;
int cmask = (1 << catcnt[5]) - 1;
int vmask = (1 << catcnt[4]) - 1;
int imask = (1 << catcnt[3]) - 1;
__syncthreads();
int i, j, k, h, jc0, jc1, ilast, jlast;
long long key;
int cold, tot, ctt, tott, cnew, cnt, ncnt, tcnt, ival, icat, lastival, bestival, tmp;
float update, updatet, acc, acct, impty, minimpty;
for (i = blockIdx.x; i < nnodes*nsamps; i += gridDim.x) {
// Process a group with fixed itree, inode, and ifeat
jc0 = jc[i]; // The range of indices for this group
jc1 = jc[i+1];
__syncthreads();
// Clear the cat counts and totals
for (j = threadIdx.x; j < ncats; j += blockDim.x) {
catcnt[j + threadIdx.y * blockDim.x] = 0;
if (threadIdx.y == 0) cattot[j] = 0;
}
if (threadIdx.y == 0) {
sminimpty[threadIdx.x] = 1e7f;
sbestival[threadIdx.x] = -1;
}
__syncthreads();
// First pass gets counts for each category and the (ci)log(ci) sum for this entire ifeat group
for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) {
if (j + tid < jc1) { // Read a block of keys and counts
key = keys[j + tid];
cnt = counts[j + tid];
icat = ((int)key) & cmask; // Extract the cat id
atomicAdd(&cattot[icat + threadIdx.y * ncats], cnt); // Update count totals
}
}
__syncthreads();
tott = 0; // Compute total count and (c)log(c) for the entire ifeat group
acct = 0;
if (threadIdx.y == 0) {
for (k = 0; k < ncats; k += blockDim.x) {
if (k + threadIdx.x < ncats) {
tcnt = cattot[k + threadIdx.x];
update = T::fupdate(tcnt);
} else {
tcnt = 0;
update = 0;
}
accumup2(tcnt,update);
ilast = min(31, ncats - k - 1);
tcnt = __shfl(tcnt, ilast);
update = __shfl(update, ilast);
tott += tcnt;
acct += update;
}
stott[threadIdx.x] = tott;
sacct[threadIdx.x] = acct;
}
tott = stott[threadIdx.x];
// if (tid == 0 && i < 32) printf("cuda %d %d %f\n", i, tott, acct);
// Main loop, work on blocks of 1024 (ideally)
for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) {
for (k = 0; k < ncats; k += blockDim.x) { // copy cumcounts from last row of last iteration to the first row
tmp = catcnt[k + threadIdx.x + (blockDim.y -1) * ncats];
__syncthreads();
if (threadIdx.y == 0) {
catcnt[k + threadIdx.x] = tmp;
} else {
catcnt[k + threadIdx.x + threadIdx.y * ncats] = 0;
}
__syncthreads();
}
if (j + tid < jc1) { // Read a block of keys and counts
key = keys[j + tid];
cnt = counts[j + tid];
icat = ((int)key) & cmask; // Extract the cat id and integer value;
ival = ((int)(key >> vshift)) & vmask;
atomicAdd(&catcnt[icat + threadIdx.y * ncats], cnt); // Update count totals
}
jlast = min(31, jc1 - j - threadIdx.y * 32 - 1); // Save the last value in this group
if (threadIdx.x == jlast) {
slastival[threadIdx.y + 1] = ival;
}
__syncthreads();
for (k = 0; k < ncats; k += blockDim.x) { // Form the cumsum along columns of catcnts
for (h = 1; h < blockDim.y; h = h + h) {
if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) {
tmp = catcnt[k + threadIdx.x + ncats * threadIdx.y];
}
__syncthreads();
if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) {
catcnt[k + threadIdx.x + ncats * (threadIdx.y + h)] += tmp;
}
__syncthreads();
}
}
tot = 0; // Local to a yblock (row) of catcnts
acc = 0.0f;
acct = 0.0f;
for (k = 0; k < ncats; k += blockDim.x) { // Now sum within a row (yblock)
if (k + threadIdx.x < ncats) {
cnt = catcnt[k + threadIdx.x + threadIdx.y * ncats];
update = T::fupdate(cnt);
updatet = T::fupdate(cattot[k + threadIdx.x] - cnt);
} else {
cnt = 0;
update = 0;
updatet = 0;
}
accumup3(cnt,update,updatet);
ilast = min(31, ncats - k - 1);
update = __shfl(update, ilast);
updatet = __shfl(updatet, ilast);
cnt = __shfl(cnt, ilast);
tot += cnt;
acc += update;
acct += updatet;
}
__syncthreads();
// OK, we have everything needed now to compute impurity for the rows in this yblock:
// tot, acc, acct at the end of the block
lastival = -1;
minimpty = 1e7f;
ncnt = -cnt;
for (k = jlast; k >= 0; k--) { // Sequentially update counts so that each thread
if (threadIdx.x == k) { // in this warp gets the old and new counts
cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k
ctt = cattot[icat + ncats * threadIdx.y];
cnew = cold + ncnt;
catcnt[icat + ncats * threadIdx.y] = cnew;
}
}
update = T::fupdate(cnew) - T::fupdate(cold);
updatet = T::fupdate(ctt - cnew) - T::fupdate(ctt - cold);
accumdown3(ncnt,update,updatet,jlast);
tot += cnt; // Now update the total c and total ci log ci sums
acc += update;
acct += updatet;
impty = T::fresult(acc, tot) + T::fresult(acct, tott - tot); // And the impurity for this input
tmp = __shfl_up(ival, 1);
if (threadIdx.x > 0) { // Get the last ival to check for a boundary
lastival = tmp;
} else {
lastival = slastival[threadIdx.y];
}
__syncthreads();
if (tid == 0) {
tmp = slastival[33];
slastival[0] = tmp;
}
__syncthreads();
if (ival == lastival) impty = 1e7f; // Eliminate values which are not at value boundaries
if (impty < minimpty) {
minimpty = impty;
bestival = ival;
}
minup2(minimpty,bestival);
minimpty = __shfl(minimpty, jlast);
bestival = __shfl(bestival, jlast);
if (threadIdx.x == 0) {
sminimpty[threadIdx.y] = minimpty;
sbestival[threadIdx.y] = bestival;
}
__syncthreads();
if (threadIdx.y == 0) {
minimpty = sminimpty[threadIdx.x];
bestival = sbestival[threadIdx.x];
minup2(minimpty,bestival);
minimpty = __shfl(minimpty, blockDim.y - 1);
bestival = __shfl(bestival, blockDim.y - 1);
sminimpty[threadIdx.x] = minimpty;
sbestival[threadIdx.x] = bestival;
}
__syncthreads();
}
if (tid == 0) {
outv[i] = bestival; // Output the best split feature value
outf[i] = (int)((key >> ishift) & imask); // Save the feature index
// outg[i] = T::fresult(sacct[0], tott) - minimpty; // And the impurity gain
outg[i] = T::fresult(sacct[0], tott); // And the impurity gain
}
__syncthreads();
}
}
#else
template<class T>
__global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens,
int nnodes, int ncats, int nsamps) {}
template<class T>
__global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens,
int nnodes, int ncats, int nsamps) {}
#endif
int minImpurity(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens,
int nnodes, int ncats, int nsamps, int impType) {
// Note: its safe to round ncats up to a multiple of 32, since its only used to split shmem
int ny = min(32, DBSIZE/ncats/2);
dim3 tdim(32, ny, 1);
int ng = min(64, nnodes*nsamps);
if ((impType & 2) == 0) {
if ((impType & 1) == 0) {
hipLaunchKernelGGL(( __minImpuritya<entImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps);
} else {
hipLaunchKernelGGL(( __minImpuritya<giniImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps);
}
} else {
if ((impType & 1) == 0) {
hipLaunchKernelGGL(( __minImpurityb<entImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps);
} else {
hipLaunchKernelGGL(( __minImpurityb<giniImpty>), dim3(ng),dim3(tdim), 0, 0, keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps);
}
}
fflush(stdout);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __findBoundaries(long long *keys, int *jc, int n, int njc, int shift) {
__shared__ int dbuff[1024];
int i, j, iv, lasti;
int imin = ((int)(32 * ((((long long)n) * blockIdx.x) / (gridDim.x * 32))));
int imax = min(n, ((int)(32 * ((((long long)n) * (blockIdx.x + 1)) / (gridDim.x * 32) + 1))));
int tid = threadIdx.x + blockDim.x * threadIdx.y;
if (tid == 0 && blockIdx.x == 0) {
jc[0] = 0;
}
__syncthreads();
lasti = 0x7fffffff;
for (i = imin; i <= imax; i += blockDim.x * blockDim.y) {
iv = njc;
if (i + tid < imax) {
iv = (int)(keys[i + tid] >> shift);
dbuff[tid] = iv;
}
__syncthreads();
if (i + tid < imax || i + tid == n) {
if (tid > 0) lasti = dbuff[tid - 1];
if (iv > lasti) {
for (j = lasti+1; j <= iv; j++) {
jc[j] = i + tid;
}
}
if (tid == 0) {
lasti = dbuff[blockDim.x * blockDim.y - 1];
}
}
__syncthreads();
}
}
int findBoundaries(long long *keys, int *jc, int n, int njc, int shift) {
int ny = min(32, 1 + (n-1)/32);
dim3 tdim(32, ny, 1);
int ng = min(64, 1+n/32/ny);
hipLaunchKernelGGL(( __findBoundaries), dim3(ng),dim3(tdim), 0, 0, keys, jc, n, njc, shift);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
template<typename T>
__global__ void mergeIndsP1(T *keys, int *cspine, T *ispine, T *vspine, int n) {
__shared__ T dbuff[1024];
int i, j, itodo, doit, total;
T thisval, lastval, endval, tmp;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int imin = (int)(((long long)n) * blockIdx.x / gridDim.x);
int imax = (int)(((long long)n) * (blockIdx.x + 1) / gridDim.x);
total = 0;
if (tid == 0) {
lastval = keys[imin];
ispine[blockIdx.x] = lastval;
}
for (i = imin; i < imax; i += blockDim.x * blockDim.y) {
itodo = min(1024, imax - i);
__syncthreads();
if (i + tid < imax) {
thisval = keys[i + tid];
dbuff[tid] = thisval;
}
__syncthreads();
if (tid > 0 && i + tid < imax) lastval = dbuff[tid - 1];
if (tid == 0) endval = dbuff[itodo-1];
__syncthreads();
if (i + tid < imax) {
dbuff[tid] = (thisval == lastval) ? 0 : 1;
}
__syncthreads();
for (j = 1; j < itodo; j = j << 1) {
doit = tid + j < itodo && (tid & ((j << 1)-1)) == 0;
if (doit) {
tmp = dbuff[tid] + dbuff[tid + j];
}
__syncthreads();
if (doit) {
dbuff[tid] = tmp;
}
__syncthreads();
}
if (tid == 0) {
total += dbuff[0];
lastval = endval;
}
}
if (tid == 0) {
cspine[blockIdx.x] = total;
vspine[blockIdx.x] = endval;
}
}
template<typename T>
__global__ void fixSpine(int *cspine, T *ispine, T *vspine, int n) {
__shared__ int counts[1024];
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int i, tmp;
if (tid < n) {
counts[tid] = cspine[tid];
}
__syncthreads();
if (tid < n - 1) {
if (ispine[tid + 1] != vspine[tid]) {
counts[tid + 1] += 1;
}
}
if (tid == 0) {
counts[0] += 1;
}
__syncthreads();
for (i = 1; i < n; i = i << 1) {
if (tid >= i) {
tmp = counts[tid - i];
}
__syncthreads();
if (tid >= i) {
counts[tid] += tmp;
}
__syncthreads();
}
if (tid < n) {
cspine[tid] = counts[tid];
}
}
template<typename T>
__global__ void mergeIndsP2(T *keys, T *okeys, int *counts, int *cspine, int n) {
__shared__ T dbuff[1024];
__shared__ T obuff[2048];
__shared__ int ocnts[2048];
int i, j, itodo, doit, thiscnt, lastcnt, obase, odone, total;
T thisval, lastval, endval, tmp;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int imin = (int)(((long long)n) * blockIdx.x / gridDim.x);
int imax = (int)(((long long)n) * (blockIdx.x + 1) / gridDim.x);
odone = cspine[blockIdx.x];
obase = 0;
if (tid == 0) {
lastval = keys[imin];
}
for (i = imin; i < imax; i += blockDim.x * blockDim.y) {
itodo = min(1024, imax - i);
__syncthreads();
if (i + tid < imax) { // Copy a block of input data into dbuff
thisval = keys[i + tid];
dbuff[tid] = thisval;
}
__syncthreads();
if (tid > 0 && i + tid < imax) lastval = dbuff[tid - 1];
if (tid == 0) endval = dbuff[itodo-1];
__syncthreads();
if (i + tid < imax) {
ocnts[tid] = (thisval == lastval) ? 0 : 1; // Bit that indicates a change of index
}
__syncthreads();
for (j = 1; j < itodo; j = j << 1) { // Cumsum of these bits = where to put key
doit = tid + j < itodo && (tid & ((j << 1)-1)) == 0;
if (doit) {
tmp = ocnts[tid] + ocnts[tid + j];
}
__syncthreads();
if (doit) {
ocnts[tid] = tmp;
}
__syncthreads();
}
total = ocnts[itodo-1];
if (tid > 0 && i + tid < imax) { // Find where the index changes
thiscnt = ocnts[tid];
lastcnt = ocnts[tid-1];
}
__syncthreads();
if (tid > 0 && i + tid < imax) { // and save the key/counts there in buffer memory
if (thiscnt > lastcnt) {
obuff[obase + thiscnt] = thisval;
ocnts[obase + thiscnt] = i + tid;
}
}
__syncthreads();
obase += total;
if (obase > 1024) { // Buffer full so flush it
okeys[odone+tid] = obuff[tid];
counts[odone+tid] = ocnts[tid] - ocnts[tid-1]; // Need to fix wraparound
odone += 1024;
}
__syncthreads();
if (obase > 1024) { // Copy top to bottom of buffer
obuff[tid] = obuff[tid+1024];
ocnts[tid] = ocnts[tid+1024];
}
obase -= 1024;
}
if (tid < obase) { // Flush out anything that's left
okeys[odone+tid] = obuff[tid];
counts[odone+tid] = ocnts[tid] - ocnts[tid-1]; // Need to fix wraparound
}
}
| 0025b9d4ceee8813948ef898405b923831faddfa.cu | #include <cuda_runtime.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
static const unsigned int c1 = 0xcc9e2d51;
static const unsigned int c2 = 0x1b873593;
static const unsigned int r1 = 15;
static const unsigned int r2 = 13;
static const unsigned int m = 5;
static const unsigned int n = 0xe6546b64;
__device__ inline unsigned int h1(unsigned int k, unsigned int hash) {
k *= c1;
k = (k << r1) | (k >> (32-r1));
k *= c2;
hash ^= k;
hash = ((hash << r2) | (hash >> (32-r2)) * m) + n;
return hash;
}
__device__ inline unsigned int mmhash(unsigned int v1, unsigned int v2, unsigned int v3, unsigned int mod, unsigned int seed)
{
unsigned int hash = seed;
hash = h1(v1, hash);
hash = h1(v2, hash);
hash = h1(v3, hash);
hash ^= (hash >> 16);
hash *= 0x85ebca6b;
hash ^= (hash >> 13);
hash *= 0xc2b2ae35;
hash ^= (hash >> 16);
return (hash % mod);
}
#define DBSIZE (8*1024)
__global__ void __treePack(int *idata, int *treenodes, int *icats, int *jc, long long *out, int *fieldlens,
int nrows, int ncols, int ntrees, int nsamps) {
__shared__ int dbuff[DBSIZE];
__shared__ int fl[32];
int j, k, ic, ival;
int seed = 45123421;
int tid = threadIdx.x + blockDim.x * threadIdx.y;
if (tid < 6) {
fl[tid] = fieldlens[tid];
}
__syncthreads();
int vshift = fl[5];
int ishift = fl[4] + vshift;
int jshift = fl[3] + ishift;
int nshift = fl[2] + jshift;
int tshift = fl[1] + nshift;
int cmask = (1 << fl[5]) - 1;
int vmask = (1 << fl[4]) - 1;
int imask = (1 << fl[3]) - 1;
int jmask = (1 << fl[2]) - 1;
int nmask = (1 << fl[1]) - 1;
int tmask = (1 << fl[0]) - 1;
int nc = (DBSIZE / nrows);
int itree = threadIdx.y;
int jfeat = threadIdx.x;
for (int i = nc * blockIdx.x; i < ncols; i += nc * gridDim.x) {
int ctodo = min(nc, ncols - i);
for (j = tid; j < nrows * ctodo; j += blockDim.x*blockDim.y) {
dbuff[j] = idata[j + i * nrows];
}
__syncthreads();
for (j = i; j < i + ctodo; j++) {
for (itree = threadIdx.y; itree < ntrees; itree += blockDim.y) {
int inode = treenodes[itree + j * ntrees];
int ifeat = mmhash(itree, inode, jfeat, nrows, seed);
long long hdr = (((long long)(tmask & itree)) << tshift) | (((long long)(nmask & inode)) << nshift) |
(((long long)(jmask & jfeat)) << jshift) | (((long long)(imask & ifeat)) << ishift) ;
for (k = jc[j]; k < jc[j+1]; k++) {
ic = icats[k];
if (jfeat < nsamps) {
ival = dbuff[ifeat + (j - i) * nrows];
out[jfeat + nsamps * (itree + ntrees * k)] = hdr | (((long long)(vmask & ival)) << vshift) | ((long long)(ic & cmask));
}
}
}
}
__syncthreads();
}
}
int treePack(int *fdata, int *treenodes, int *icats, int *jc, long long *out, int *fieldlens, int nrows, int ncols, int ntrees, int nsamps) {
int ntx = 32 * (1 + (nsamps - 1)/32);
int nty = min(1024 / ntx, ntrees);
dim3 bdim(ntx, nty, 1);
int nb = min(32, 1 + (ncols-1)/32);
__treePack<<<nb,bdim>>>(fdata, treenodes, icats, jc, out, fieldlens, nrows, ncols, ntrees, nsamps);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
class entImpty {
public:
static __device__ inline float fupdate(int v) { return (float)v * logf((float)max(1, v)); }
static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return logf(vs) - vacc / vs; }
};
class giniImpty {
public:
static __device__ inline float fupdate(int v) { return (float)v * (float)v; }
static __device__ inline float fresult(float vacc, int vsum) { float vs = (float)max(1, vsum); return 1.0f - vacc / (vs*vs); }
};
#if __CUDA_ARCH__ >= 300
__device__ inline void accumup2(int &cnt, float &update) {
#pragma unroll
for (int h = 1; h < 32; h = h + h) {
float tmpx = __shfl_up(update, h);
int tmp = __shfl_up(cnt, h);
if (threadIdx.x >=h) {
update += tmpx;
cnt += tmp;
}
}
}
__device__ inline void accumup3(int &cnt, float &update, float &updatet) {
#pragma unroll
for (int h = 1; h < 32; h = h + h) {
float tmpx = __shfl_up(update, h);
float tmpy = __shfl_up(updatet, h);
int tmp = __shfl_up(cnt, h);
if (threadIdx.x >=h) {
update += tmpx;
updatet += tmpy;
cnt += tmp;
}
}
}
__device__ inline void accumdown3(int &cnt, float &update, float &updatet, int bound) {
#pragma unroll
for (int h = 1; h < 32; h = h + h) {
float tmpx = __shfl_down(update, h);
float tmpy = __shfl_down(updatet, h);
int tmp = __shfl_down(cnt, h);
if (threadIdx.x + h <= bound) {
update += tmpx;
updatet += tmpy;
cnt += tmp;
}
}
}
__device__ inline void minup2(float &impty, int &ival) {
#pragma unroll
for (int h = 1; h < 32; h = h + h) {
float tmpx = __shfl_up(impty, h);
int tmp = __shfl_up(ival, h);
if (threadIdx.x >= h && tmpx < impty) {
impty = tmpx;
ival = tmp;
}
}
}
__device__ inline void maxup2(int &v, int &indx) {
#pragma unroll
for (int h = 1; h < 32; h = h + h) {
int tmpv = __shfl_up(v, h);
int tmpi = __shfl_up(indx, h);
if (threadIdx.x >= h && tmpv > v) {
v = tmpv;
indx = tmpi;
}
}
}
template<typename T>
__global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens,
int nnodes, int ncats, int nsamps) {
__shared__ int catcnt[DBSIZE/2];
__shared__ int cattot[DBSIZE/2];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
if (tid < 6) {
catcnt[tid] = fieldlens[tid];
}
__syncthreads();
int vshift = catcnt[5];
int ishift = catcnt[4] + vshift;
int cmask = (1 << catcnt[5]) - 1;
int vmask = (1 << catcnt[4]) - 1;
int imask = (1 << catcnt[3]) - 1;
__syncthreads();
int i, j, k, jc0, jc1, jlast;
long long key;
int cold, ctot, ctt, ctotall, cnew, cnt, ival, icat, lastival, bestival, tmp, maxcnt, imaxcnt;
float update, updatet, cacc, cact, caccall, impty, minimpty, lastimpty, tmpx;
for (i = threadIdx.y + blockDim.y * blockIdx.x; i < nnodes*nsamps; i += blockDim.y * gridDim.x) {
// Process a group with fixed itree, inode, and ifeat
jc0 = jc[i]; // The range of indices for this group
jc1 = jc[i+1];
__syncthreads();
// Clear the cat counts for this group
for (j = tid; j < DBSIZE/2; j += blockDim.x * blockDim.y) {
catcnt[j] = 0;
cattot[j] = 0;
}
__syncthreads();
// First pass gets counts for each category and the (ci)log(ci) sum for this block
ctot = 0;
cacc = 0.0f;
maxcnt = -1;
imaxcnt = -1;
for (j = jc0; j < jc1; j += blockDim.x) {
if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts
key = keys[j + threadIdx.x]; // Each (x) thread handles a different input
cnt = counts[j + threadIdx.x];
icat = ((int)key) & cmask; // Extract the cat id and integer value
}
jlast = min(31, jc1 - j - 1);
for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread
if (threadIdx.x == k) { // in this warp gets the old and new counts
cold = cattot[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k
cnew = cold + cnt;
cattot[icat + ncats * threadIdx.y] = cnew;
}
}
update = T::fupdate(cnew) - T::fupdate(cold);
accumup2(cnt,update);
ctot += cnt; // Now update the total c and total ci log ci sums
cacc += update;
ctot = __shfl(ctot, jlast);
cacc = __shfl(cacc, jlast);
if (cnew > maxcnt) { // Compute and distribute the max cnt
maxcnt = cnew;
imaxcnt = icat;
}
maxup2(maxcnt, imaxcnt);
maxcnt = __shfl(maxcnt, jlast);
imaxcnt = __shfl(imaxcnt, jlast);
}
__syncthreads();
// if (threadIdx.x == 0 && i < 32) printf("cuda %d %d %f\n", i, ctot, cacc);
// Second pass to compute impurity at every input point
caccall = cacc; // Save the total count and (ci)log(ci) sum
cact = cacc;
ctotall = ctot;
ctot = 0;
cacc = 0.0f;
lastival = -1;
lastimpty = 1e7f;
minimpty = 1e7f;
for (j = jc0; j < jc1; j += blockDim.x) {
if (j + threadIdx.x < jc1) { // Read a block of (32) keys and counts
key = keys[j + threadIdx.x]; // Each (x) thread handles a different input
cnt = counts[j + threadIdx.x];
icat = ((int)key) & cmask; // Extract the cat id and integer value
ival = ((int)(key >> vshift)) & vmask;
}
jlast = min(31, jc1 - j - 1);
for (k = 0; k <= jlast; k++) { // Sequentially update counts so that each thread
if (threadIdx.x == k) { // in this warp gets the old and new counts
cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k
ctt = cattot[icat + ncats * threadIdx.y];
cnew = cold + cnt;
catcnt[icat + ncats * threadIdx.y] = cnew;
}
}
update = T::fupdate(cnew) - T::fupdate(cold); // Compute the impurity updates for this input
updatet = T::fupdate(ctt-cnew) - T::fupdate(ctt-cold);
accumup3(cnt, update, updatet);
ctot += cnt; // Now update the total c and total ci log ci sums
cacc += update;
cact += updatet;
impty = T::fresult(cacc, ctot) + T::fresult(cact, ctotall-ctot); // And the impurity for this input
// if (i == 0) printf("cuda pos %d impty %f icat %d cnts %d %d cacc %f %d\n", j + threadIdx.x, impty, icat, cold, cnew, cacc, ctot);
tmp = __shfl_up(ival, 1); // Need the last impurity and ival in order
tmpx = __shfl_up(impty, 1); // to restrict the partition feature to a value boundary
if (threadIdx.x > 0) {
lastival = tmp;
lastimpty = tmpx;
}
if (ival == lastival) lastimpty = 1e7f; // Eliminate values which are not at value boundaries
if (lastimpty < minimpty) {
minimpty = lastimpty;
bestival = ival;
}
minup2(minimpty,bestival);
minimpty = __shfl(minimpty, jlast); // Carefully copy the last active thread to all threads, needed outside this loop
bestival = __shfl(bestival, jlast);
ctot = __shfl(ctot, jlast);
cacc = __shfl(cacc, jlast);
cact = __shfl(cact, jlast);
lastival = __shfl(ival, jlast);
lastimpty = __shfl(impty, jlast);
}
if (threadIdx.x == 0) {
outv[i] = bestival; // Output the best split feature value
outf[i] = ((int)(key >> ishift)) & imask; // Save the feature index
outg[i] = T::fresult(caccall, ctotall) - minimpty; // And the impurity gain
outc[i] = imaxcnt;
}
}
}
template<typename T>
__global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens,
int nnodes, int ncats, int nsamps) {
__shared__ int catcnt[DBSIZE];
__shared__ int cattot[DBSIZE/4];
__shared__ int stott[32];
__shared__ float sacct[32];
__shared__ int slastival[64];
__shared__ int sbestival[32];
__shared__ float sminimpty[32];
int tid = threadIdx.x + blockDim.x * threadIdx.y;
if (tid < 6) {
catcnt[tid] = fieldlens[tid];
}
__syncthreads();
int vshift = catcnt[5];
int ishift = catcnt[4] + vshift;
int cmask = (1 << catcnt[5]) - 1;
int vmask = (1 << catcnt[4]) - 1;
int imask = (1 << catcnt[3]) - 1;
__syncthreads();
int i, j, k, h, jc0, jc1, ilast, jlast;
long long key;
int cold, tot, ctt, tott, cnew, cnt, ncnt, tcnt, ival, icat, lastival, bestival, tmp;
float update, updatet, acc, acct, impty, minimpty;
for (i = blockIdx.x; i < nnodes*nsamps; i += gridDim.x) {
// Process a group with fixed itree, inode, and ifeat
jc0 = jc[i]; // The range of indices for this group
jc1 = jc[i+1];
__syncthreads();
// Clear the cat counts and totals
for (j = threadIdx.x; j < ncats; j += blockDim.x) {
catcnt[j + threadIdx.y * blockDim.x] = 0;
if (threadIdx.y == 0) cattot[j] = 0;
}
if (threadIdx.y == 0) {
sminimpty[threadIdx.x] = 1e7f;
sbestival[threadIdx.x] = -1;
}
__syncthreads();
// First pass gets counts for each category and the (ci)log(ci) sum for this entire ifeat group
for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) {
if (j + tid < jc1) { // Read a block of keys and counts
key = keys[j + tid];
cnt = counts[j + tid];
icat = ((int)key) & cmask; // Extract the cat id
atomicAdd(&cattot[icat + threadIdx.y * ncats], cnt); // Update count totals
}
}
__syncthreads();
tott = 0; // Compute total count and (c)log(c) for the entire ifeat group
acct = 0;
if (threadIdx.y == 0) {
for (k = 0; k < ncats; k += blockDim.x) {
if (k + threadIdx.x < ncats) {
tcnt = cattot[k + threadIdx.x];
update = T::fupdate(tcnt);
} else {
tcnt = 0;
update = 0;
}
accumup2(tcnt,update);
ilast = min(31, ncats - k - 1);
tcnt = __shfl(tcnt, ilast);
update = __shfl(update, ilast);
tott += tcnt;
acct += update;
}
stott[threadIdx.x] = tott;
sacct[threadIdx.x] = acct;
}
tott = stott[threadIdx.x];
// if (tid == 0 && i < 32) printf("cuda %d %d %f\n", i, tott, acct);
// Main loop, work on blocks of 1024 (ideally)
for (j = jc0; j < jc1; j += blockDim.x * blockDim.x) {
for (k = 0; k < ncats; k += blockDim.x) { // copy cumcounts from last row of last iteration to the first row
tmp = catcnt[k + threadIdx.x + (blockDim.y -1) * ncats];
__syncthreads();
if (threadIdx.y == 0) {
catcnt[k + threadIdx.x] = tmp;
} else {
catcnt[k + threadIdx.x + threadIdx.y * ncats] = 0;
}
__syncthreads();
}
if (j + tid < jc1) { // Read a block of keys and counts
key = keys[j + tid];
cnt = counts[j + tid];
icat = ((int)key) & cmask; // Extract the cat id and integer value;
ival = ((int)(key >> vshift)) & vmask;
atomicAdd(&catcnt[icat + threadIdx.y * ncats], cnt); // Update count totals
}
jlast = min(31, jc1 - j - threadIdx.y * 32 - 1); // Save the last value in this group
if (threadIdx.x == jlast) {
slastival[threadIdx.y + 1] = ival;
}
__syncthreads();
for (k = 0; k < ncats; k += blockDim.x) { // Form the cumsum along columns of catcnts
for (h = 1; h < blockDim.y; h = h + h) {
if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) {
tmp = catcnt[k + threadIdx.x + ncats * threadIdx.y];
}
__syncthreads();
if (k + threadIdx.x < ncats && blockIdx.y + h < blockDim.y) {
catcnt[k + threadIdx.x + ncats * (threadIdx.y + h)] += tmp;
}
__syncthreads();
}
}
tot = 0; // Local to a yblock (row) of catcnts
acc = 0.0f;
acct = 0.0f;
for (k = 0; k < ncats; k += blockDim.x) { // Now sum within a row (yblock)
if (k + threadIdx.x < ncats) {
cnt = catcnt[k + threadIdx.x + threadIdx.y * ncats];
update = T::fupdate(cnt);
updatet = T::fupdate(cattot[k + threadIdx.x] - cnt);
} else {
cnt = 0;
update = 0;
updatet = 0;
}
accumup3(cnt,update,updatet);
ilast = min(31, ncats - k - 1);
update = __shfl(update, ilast);
updatet = __shfl(updatet, ilast);
cnt = __shfl(cnt, ilast);
tot += cnt;
acc += update;
acct += updatet;
}
__syncthreads();
// OK, we have everything needed now to compute impurity for the rows in this yblock:
// tot, acc, acct at the end of the block
lastival = -1;
minimpty = 1e7f;
ncnt = -cnt;
for (k = jlast; k >= 0; k--) { // Sequentially update counts so that each thread
if (threadIdx.x == k) { // in this warp gets the old and new counts
cold = catcnt[icat + ncats * threadIdx.y]; // i.e. data for item k is in thread k
ctt = cattot[icat + ncats * threadIdx.y];
cnew = cold + ncnt;
catcnt[icat + ncats * threadIdx.y] = cnew;
}
}
update = T::fupdate(cnew) - T::fupdate(cold);
updatet = T::fupdate(ctt - cnew) - T::fupdate(ctt - cold);
accumdown3(ncnt,update,updatet,jlast);
tot += cnt; // Now update the total c and total ci log ci sums
acc += update;
acct += updatet;
impty = T::fresult(acc, tot) + T::fresult(acct, tott - tot); // And the impurity for this input
tmp = __shfl_up(ival, 1);
if (threadIdx.x > 0) { // Get the last ival to check for a boundary
lastival = tmp;
} else {
lastival = slastival[threadIdx.y];
}
__syncthreads();
if (tid == 0) {
tmp = slastival[33];
slastival[0] = tmp;
}
__syncthreads();
if (ival == lastival) impty = 1e7f; // Eliminate values which are not at value boundaries
if (impty < minimpty) {
minimpty = impty;
bestival = ival;
}
minup2(minimpty,bestival);
minimpty = __shfl(minimpty, jlast);
bestival = __shfl(bestival, jlast);
if (threadIdx.x == 0) {
sminimpty[threadIdx.y] = minimpty;
sbestival[threadIdx.y] = bestival;
}
__syncthreads();
if (threadIdx.y == 0) {
minimpty = sminimpty[threadIdx.x];
bestival = sbestival[threadIdx.x];
minup2(minimpty,bestival);
minimpty = __shfl(minimpty, blockDim.y - 1);
bestival = __shfl(bestival, blockDim.y - 1);
sminimpty[threadIdx.x] = minimpty;
sbestival[threadIdx.x] = bestival;
}
__syncthreads();
}
if (tid == 0) {
outv[i] = bestival; // Output the best split feature value
outf[i] = (int)((key >> ishift) & imask); // Save the feature index
// outg[i] = T::fresult(sacct[0], tott) - minimpty; // And the impurity gain
outg[i] = T::fresult(sacct[0], tott); // And the impurity gain
}
__syncthreads();
}
}
#else
template<class T>
__global__ void __minImpuritya(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens,
int nnodes, int ncats, int nsamps) {}
template<class T>
__global__ void __minImpurityb(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens,
int nnodes, int ncats, int nsamps) {}
#endif
int minImpurity(long long *keys, int *counts, int *outv, int *outf, float *outg, int *outc, int *jc, int *fieldlens,
int nnodes, int ncats, int nsamps, int impType) {
// Note: its safe to round ncats up to a multiple of 32, since its only used to split shmem
int ny = min(32, DBSIZE/ncats/2);
dim3 tdim(32, ny, 1);
int ng = min(64, nnodes*nsamps);
if ((impType & 2) == 0) {
if ((impType & 1) == 0) {
__minImpuritya<entImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps);
} else {
__minImpuritya<giniImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps);
}
} else {
if ((impType & 1) == 0) {
__minImpurityb<entImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps);
} else {
__minImpurityb<giniImpty><<<ng,tdim>>>(keys, counts, outv, outf, outg, outc, jc, fieldlens, nnodes, ncats, nsamps);
}
}
fflush(stdout);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __findBoundaries(long long *keys, int *jc, int n, int njc, int shift) {
__shared__ int dbuff[1024];
int i, j, iv, lasti;
int imin = ((int)(32 * ((((long long)n) * blockIdx.x) / (gridDim.x * 32))));
int imax = min(n, ((int)(32 * ((((long long)n) * (blockIdx.x + 1)) / (gridDim.x * 32) + 1))));
int tid = threadIdx.x + blockDim.x * threadIdx.y;
if (tid == 0 && blockIdx.x == 0) {
jc[0] = 0;
}
__syncthreads();
lasti = 0x7fffffff;
for (i = imin; i <= imax; i += blockDim.x * blockDim.y) {
iv = njc;
if (i + tid < imax) {
iv = (int)(keys[i + tid] >> shift);
dbuff[tid] = iv;
}
__syncthreads();
if (i + tid < imax || i + tid == n) {
if (tid > 0) lasti = dbuff[tid - 1];
if (iv > lasti) {
for (j = lasti+1; j <= iv; j++) {
jc[j] = i + tid;
}
}
if (tid == 0) {
lasti = dbuff[blockDim.x * blockDim.y - 1];
}
}
__syncthreads();
}
}
int findBoundaries(long long *keys, int *jc, int n, int njc, int shift) {
int ny = min(32, 1 + (n-1)/32);
dim3 tdim(32, ny, 1);
int ng = min(64, 1+n/32/ny);
__findBoundaries<<<ng,tdim>>>(keys, jc, n, njc, shift);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
template<typename T>
__global__ void mergeIndsP1(T *keys, int *cspine, T *ispine, T *vspine, int n) {
__shared__ T dbuff[1024];
int i, j, itodo, doit, total;
T thisval, lastval, endval, tmp;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int imin = (int)(((long long)n) * blockIdx.x / gridDim.x);
int imax = (int)(((long long)n) * (blockIdx.x + 1) / gridDim.x);
total = 0;
if (tid == 0) {
lastval = keys[imin];
ispine[blockIdx.x] = lastval;
}
for (i = imin; i < imax; i += blockDim.x * blockDim.y) {
itodo = min(1024, imax - i);
__syncthreads();
if (i + tid < imax) {
thisval = keys[i + tid];
dbuff[tid] = thisval;
}
__syncthreads();
if (tid > 0 && i + tid < imax) lastval = dbuff[tid - 1];
if (tid == 0) endval = dbuff[itodo-1];
__syncthreads();
if (i + tid < imax) {
dbuff[tid] = (thisval == lastval) ? 0 : 1;
}
__syncthreads();
for (j = 1; j < itodo; j = j << 1) {
doit = tid + j < itodo && (tid & ((j << 1)-1)) == 0;
if (doit) {
tmp = dbuff[tid] + dbuff[tid + j];
}
__syncthreads();
if (doit) {
dbuff[tid] = tmp;
}
__syncthreads();
}
if (tid == 0) {
total += dbuff[0];
lastval = endval;
}
}
if (tid == 0) {
cspine[blockIdx.x] = total;
vspine[blockIdx.x] = endval;
}
}
template<typename T>
__global__ void fixSpine(int *cspine, T *ispine, T *vspine, int n) {
__shared__ int counts[1024];
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int i, tmp;
if (tid < n) {
counts[tid] = cspine[tid];
}
__syncthreads();
if (tid < n - 1) {
if (ispine[tid + 1] != vspine[tid]) {
counts[tid + 1] += 1;
}
}
if (tid == 0) {
counts[0] += 1;
}
__syncthreads();
for (i = 1; i < n; i = i << 1) {
if (tid >= i) {
tmp = counts[tid - i];
}
__syncthreads();
if (tid >= i) {
counts[tid] += tmp;
}
__syncthreads();
}
if (tid < n) {
cspine[tid] = counts[tid];
}
}
template<typename T>
__global__ void mergeIndsP2(T *keys, T *okeys, int *counts, int *cspine, int n) {
__shared__ T dbuff[1024];
__shared__ T obuff[2048];
__shared__ int ocnts[2048];
int i, j, itodo, doit, thiscnt, lastcnt, obase, odone, total;
T thisval, lastval, endval, tmp;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int imin = (int)(((long long)n) * blockIdx.x / gridDim.x);
int imax = (int)(((long long)n) * (blockIdx.x + 1) / gridDim.x);
odone = cspine[blockIdx.x];
obase = 0;
if (tid == 0) {
lastval = keys[imin];
}
for (i = imin; i < imax; i += blockDim.x * blockDim.y) {
itodo = min(1024, imax - i);
__syncthreads();
if (i + tid < imax) { // Copy a block of input data into dbuff
thisval = keys[i + tid];
dbuff[tid] = thisval;
}
__syncthreads();
if (tid > 0 && i + tid < imax) lastval = dbuff[tid - 1];
if (tid == 0) endval = dbuff[itodo-1];
__syncthreads();
if (i + tid < imax) {
ocnts[tid] = (thisval == lastval) ? 0 : 1; // Bit that indicates a change of index
}
__syncthreads();
for (j = 1; j < itodo; j = j << 1) { // Cumsum of these bits = where to put key
doit = tid + j < itodo && (tid & ((j << 1)-1)) == 0;
if (doit) {
tmp = ocnts[tid] + ocnts[tid + j];
}
__syncthreads();
if (doit) {
ocnts[tid] = tmp;
}
__syncthreads();
}
total = ocnts[itodo-1];
if (tid > 0 && i + tid < imax) { // Find where the index changes
thiscnt = ocnts[tid];
lastcnt = ocnts[tid-1];
}
__syncthreads();
if (tid > 0 && i + tid < imax) { // and save the key/counts there in buffer memory
if (thiscnt > lastcnt) {
obuff[obase + thiscnt] = thisval;
ocnts[obase + thiscnt] = i + tid;
}
}
__syncthreads();
obase += total;
if (obase > 1024) { // Buffer full so flush it
okeys[odone+tid] = obuff[tid];
counts[odone+tid] = ocnts[tid] - ocnts[tid-1]; // Need to fix wraparound
odone += 1024;
}
__syncthreads();
if (obase > 1024) { // Copy top to bottom of buffer
obuff[tid] = obuff[tid+1024];
ocnts[tid] = ocnts[tid+1024];
}
obase -= 1024;
}
if (tid < obase) { // Flush out anything that's left
okeys[odone+tid] = obuff[tid];
counts[odone+tid] = ocnts[tid] - ocnts[tid-1]; // Need to fix wraparound
}
}
|
91987636b9027f8de0117b2241d9818d95d0ae4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "load_balanced_search.cuh"
__device__ int __forceinline__ get_next_power_of_2(int x)
{
if(x < 0)
{
return 0;
}
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x>> 16;
return x+1;
}
//Speciailized binary search for the LBS problem: We want to return the greatest index in array that is less than key
__device__ int binary_search(int *array, int array_size, int key, int low = 0)
{
//int low = 0;
int high = array_size-1;
while(low <= high)
{
int mid = low + ((high - low) / 2);
int midVal = array[mid];
if(midVal < key)
{
low = mid + 1;
}
else if(midVal > key)
{
high = mid - 1;
}
else
{
/*while(mid < high)
{
if(array[mid+1] == array[mid])
{
mid++;
}
else
{
break;
}
}*/
return mid; //guarantee O(log n), and use the "normal method" to ensure the right answer is found
}
}
return high; //key not found - return the lower key since we want the greatest index *less* than the key
}
//TODO: Scale to larger sets of edges, extend to blocks, etc.
//More efficient for threads to process consecutive elements in results rather than stride-32 elements?
//Use a warp to extract edges that need to be traversed
//Does the edge frontier size need to be stored to global memory? It can be obtained directly from the scan and edge counts? If it doesn't affect performance much then it's worth keeping I guess.
__device__ void load_balance_search_warp(const int vertex_frontier_size, int *edge_frontier_size, const int *edge_counts, int *scanned_edges, int *result)
{
__shared__ typename cub::WarpScan<int>::TempStorage temp_storage;
int total_edges = 0;
//Ensure all threads in the warp execute WarpScan and get the value of total_edges
int vertex_frontier_rounded = get_next_power_of_2(vertex_frontier_size);
if(vertex_frontier_rounded < WARP_SIZE)
{
vertex_frontier_rounded = WARP_SIZE; //Must be at least the size of the warp for the syncthreads in the next loop to work correctly
}
for(int i=getLaneId(); i<vertex_frontier_rounded; i+=WARP_SIZE)
{
int local_count = i < vertex_frontier_size ? edge_counts[i] : 0;
int current_edges;
cub::WarpScan<int>(temp_storage).ExclusiveSum(local_count,scanned_edges[i],current_edges);
__syncthreads(); //Needed for reuse of WarpScan
if((i != getLaneId()) && (i < vertex_frontier_size))
{
scanned_edges[i] += total_edges; //Add previous number of edges for subsequent loop iterations
}
total_edges += current_edges;
}
if(getLaneId() == 0)
{
edge_frontier_size[0] = scanned_edges[vertex_frontier_size-1]+edge_counts[vertex_frontier_size-1];
}
int ind = 0;
for(int i=getLaneId(); i<total_edges; i+=WARP_SIZE)
{
while(ind < vertex_frontier_size && i >= scanned_edges[ind])
{
ind++;
}
if(ind >= vertex_frontier_size) //boundary condition
{
result[i] = vertex_frontier_size-1;
}
else
{
result[i] = ind-1;
}
}
//This is actually way slower than the naive approach, at least for the inputs I've tested so far. Perhaps that input isn't large enough?
/*for(int i=getLaneId(); i<total_edges; i+=WARP_SIZE)
{
if(i != getLaneId())
{
result[i] = binary_search(scanned_edges,vertex_frontier_size,i,result[i-WARP_SIZE]);
}
else
{
result[i] = binary_search(scanned_edges,vertex_frontier_size,i);
}
}*/
}
//TODO: Reorganize so that each thread has multiple items to scan at once (check occupancy for this), tuning
__device__ void load_balance_search_block(const int vertex_frontier_size, int *edge_frontier_size, const int *edge_counts, int *scanned_edges, int *result)
{
typedef hipcub::BlockScan<int,BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
int total_edges = 0;
//Ensure all threads in the block execute BlockScan and get the value of current_edges
__shared__ int vertex_frontier_rounded;
if(threadIdx.x == 0)
{
vertex_frontier_rounded = get_next_power_of_2(vertex_frontier_size);
if(vertex_frontier_rounded < blockDim.x)
{
vertex_frontier_rounded = blockDim.x; //Must be at least the size of the warp for the syncthreads in the next loop to work correctly
}
}
__syncthreads();
for(int i=threadIdx.x; i<vertex_frontier_rounded; i+=blockDim.x)
{
int local_count[ITEMS_PER_THREAD];
for(int j=0; j<ITEMS_PER_THREAD; j++)
{
if((ITEMS_PER_THREAD*i)+j < vertex_frontier_size)
{
local_count[j] = edge_counts[ITEMS_PER_THREAD*i+j];
}
else
{
local_count[j] = 0;
}
}
int current_edges;
BlockScan(temp_storage).ExclusiveSum(local_count,local_count,current_edges);
__syncthreads(); //Needed for reuse of WarpScan
for(int j=0; j<ITEMS_PER_THREAD; j++)
{
if((ITEMS_PER_THREAD*i)+j < vertex_frontier_size)
{
scanned_edges[ITEMS_PER_THREAD*i+j] = local_count[j] + total_edges;
}
}
total_edges += current_edges;
}
__syncthreads();
if(threadIdx.x == 0)
{
edge_frontier_size[0] = scanned_edges[vertex_frontier_size-1]+edge_counts[vertex_frontier_size-1];
}
__syncthreads();
//LBS work below takes multiple orders of magnitude longer than scanning work above
int ind = 0;
for(int i=threadIdx.x; i<edge_frontier_size[0]; i+=blockDim.x)
{
while(ind < vertex_frontier_size && i >= scanned_edges[ind])
{
ind++;
}
if(ind >= vertex_frontier_size) //boundary condition
{
result[i] = vertex_frontier_size-1;
}
else
{
result[i] = ind-1;
}
}
}
__global__ void extract_edges_warp(int vertex_frontier_size, int *edge_counts, int *scanned_edges, int *result, int *edges)
{
load_balance_search_warp(vertex_frontier_size,edges,edge_counts,scanned_edges,result);
}
__global__ void extract_edges_block(int vertex_frontier_size, int *edge_counts, int *scanned_edges, int *result, int *edges)
{
load_balance_search_block(vertex_frontier_size,edges,edge_counts,scanned_edges,result);
}
| 91987636b9027f8de0117b2241d9818d95d0ae4c.cu | #include "load_balanced_search.cuh"
__device__ int __forceinline__ get_next_power_of_2(int x)
{
if(x < 0)
{
return 0;
}
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x>> 16;
return x+1;
}
//Speciailized binary search for the LBS problem: We want to return the greatest index in array that is less than key
__device__ int binary_search(int *array, int array_size, int key, int low = 0)
{
//int low = 0;
int high = array_size-1;
while(low <= high)
{
int mid = low + ((high - low) / 2);
int midVal = array[mid];
if(midVal < key)
{
low = mid + 1;
}
else if(midVal > key)
{
high = mid - 1;
}
else
{
/*while(mid < high)
{
if(array[mid+1] == array[mid])
{
mid++;
}
else
{
break;
}
}*/
return mid; //guarantee O(log n), and use the "normal method" to ensure the right answer is found
}
}
return high; //key not found - return the lower key since we want the greatest index *less* than the key
}
//TODO: Scale to larger sets of edges, extend to blocks, etc.
//More efficient for threads to process consecutive elements in results rather than stride-32 elements?
//Use a warp to extract edges that need to be traversed
//Does the edge frontier size need to be stored to global memory? It can be obtained directly from the scan and edge counts? If it doesn't affect performance much then it's worth keeping I guess.
__device__ void load_balance_search_warp(const int vertex_frontier_size, int *edge_frontier_size, const int *edge_counts, int *scanned_edges, int *result)
{
__shared__ typename cub::WarpScan<int>::TempStorage temp_storage;
int total_edges = 0;
//Ensure all threads in the warp execute WarpScan and get the value of total_edges
int vertex_frontier_rounded = get_next_power_of_2(vertex_frontier_size);
if(vertex_frontier_rounded < WARP_SIZE)
{
vertex_frontier_rounded = WARP_SIZE; //Must be at least the size of the warp for the syncthreads in the next loop to work correctly
}
for(int i=getLaneId(); i<vertex_frontier_rounded; i+=WARP_SIZE)
{
int local_count = i < vertex_frontier_size ? edge_counts[i] : 0;
int current_edges;
cub::WarpScan<int>(temp_storage).ExclusiveSum(local_count,scanned_edges[i],current_edges);
__syncthreads(); //Needed for reuse of WarpScan
if((i != getLaneId()) && (i < vertex_frontier_size))
{
scanned_edges[i] += total_edges; //Add previous number of edges for subsequent loop iterations
}
total_edges += current_edges;
}
if(getLaneId() == 0)
{
edge_frontier_size[0] = scanned_edges[vertex_frontier_size-1]+edge_counts[vertex_frontier_size-1];
}
int ind = 0;
for(int i=getLaneId(); i<total_edges; i+=WARP_SIZE)
{
while(ind < vertex_frontier_size && i >= scanned_edges[ind])
{
ind++;
}
if(ind >= vertex_frontier_size) //boundary condition
{
result[i] = vertex_frontier_size-1;
}
else
{
result[i] = ind-1;
}
}
//This is actually way slower than the naive approach, at least for the inputs I've tested so far. Perhaps that input isn't large enough?
/*for(int i=getLaneId(); i<total_edges; i+=WARP_SIZE)
{
if(i != getLaneId())
{
result[i] = binary_search(scanned_edges,vertex_frontier_size,i,result[i-WARP_SIZE]);
}
else
{
result[i] = binary_search(scanned_edges,vertex_frontier_size,i);
}
}*/
}
//TODO: Reorganize so that each thread has multiple items to scan at once (check occupancy for this), tuning
__device__ void load_balance_search_block(const int vertex_frontier_size, int *edge_frontier_size, const int *edge_counts, int *scanned_edges, int *result)
{
typedef cub::BlockScan<int,BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
int total_edges = 0;
//Ensure all threads in the block execute BlockScan and get the value of current_edges
__shared__ int vertex_frontier_rounded;
if(threadIdx.x == 0)
{
vertex_frontier_rounded = get_next_power_of_2(vertex_frontier_size);
if(vertex_frontier_rounded < blockDim.x)
{
vertex_frontier_rounded = blockDim.x; //Must be at least the size of the warp for the syncthreads in the next loop to work correctly
}
}
__syncthreads();
for(int i=threadIdx.x; i<vertex_frontier_rounded; i+=blockDim.x)
{
int local_count[ITEMS_PER_THREAD];
for(int j=0; j<ITEMS_PER_THREAD; j++)
{
if((ITEMS_PER_THREAD*i)+j < vertex_frontier_size)
{
local_count[j] = edge_counts[ITEMS_PER_THREAD*i+j];
}
else
{
local_count[j] = 0;
}
}
int current_edges;
BlockScan(temp_storage).ExclusiveSum(local_count,local_count,current_edges);
__syncthreads(); //Needed for reuse of WarpScan
for(int j=0; j<ITEMS_PER_THREAD; j++)
{
if((ITEMS_PER_THREAD*i)+j < vertex_frontier_size)
{
scanned_edges[ITEMS_PER_THREAD*i+j] = local_count[j] + total_edges;
}
}
total_edges += current_edges;
}
__syncthreads();
if(threadIdx.x == 0)
{
edge_frontier_size[0] = scanned_edges[vertex_frontier_size-1]+edge_counts[vertex_frontier_size-1];
}
__syncthreads();
//LBS work below takes multiple orders of magnitude longer than scanning work above
int ind = 0;
for(int i=threadIdx.x; i<edge_frontier_size[0]; i+=blockDim.x)
{
while(ind < vertex_frontier_size && i >= scanned_edges[ind])
{
ind++;
}
if(ind >= vertex_frontier_size) //boundary condition
{
result[i] = vertex_frontier_size-1;
}
else
{
result[i] = ind-1;
}
}
}
__global__ void extract_edges_warp(int vertex_frontier_size, int *edge_counts, int *scanned_edges, int *result, int *edges)
{
load_balance_search_warp(vertex_frontier_size,edges,edge_counts,scanned_edges,result);
}
__global__ void extract_edges_block(int vertex_frontier_size, int *edge_counts, int *scanned_edges, int *result, int *edges)
{
load_balance_search_block(vertex_frontier_size,edges,edge_counts,scanned_edges,result);
}
|
a746a3b50215ea1f5ca4778ccc9762522707d8e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_divScalarMany_f (int n, int sizeSubImage,float *result,double *resultDouble, float *x, float *div)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
int id2=id/sizeSubImage;
if (id < n)
{
if (div[id2]!=0){
result[id] = x[id] / div[id2];
resultDouble[id] =0;
resultDouble[id] =(double)(result[id]);
}
}
} | a746a3b50215ea1f5ca4778ccc9762522707d8e7.cu | #include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in doubleing-point.
extern "C"
// Round to nearest integer value in doubleing-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two doubleing point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the doubleing-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision doubleing-point remainder.
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//WARNING : device_sum size should be gridDim.x
__global__ void vec_divScalarMany_f (int n, int sizeSubImage,float *result,double *resultDouble, float *x, float *div)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int id = idy * gridDim.x * blockDim.x + idx;
int id2=id/sizeSubImage;
if (id < n)
{
if (div[id2]!=0){
result[id] = x[id] / div[id2];
resultDouble[id] =0;
resultDouble[id] =(double)(result[id]);
}
}
} |
7504f018cddaeca9b3ba768b15b18c50e3da4ceb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "getIntYArray_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int2 *d_input = NULL;
hipMalloc(&d_input, XSIZE*YSIZE);
int startPos = 1;
int rLen = 1;
int *d_output = NULL;
hipMalloc(&d_output, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
getIntYArray_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_input,startPos,rLen,d_output);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
getIntYArray_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_input,startPos,rLen,d_output);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
getIntYArray_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_input,startPos,rLen,d_output);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7504f018cddaeca9b3ba768b15b18c50e3da4ceb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "getIntYArray_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int2 *d_input = NULL;
cudaMalloc(&d_input, XSIZE*YSIZE);
int startPos = 1;
int rLen = 1;
int *d_output = NULL;
cudaMalloc(&d_output, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
getIntYArray_kernel<<<gridBlock,threadBlock>>>(d_input,startPos,rLen,d_output);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
getIntYArray_kernel<<<gridBlock,threadBlock>>>(d_input,startPos,rLen,d_output);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
getIntYArray_kernel<<<gridBlock,threadBlock>>>(d_input,startPos,rLen,d_output);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ed44d6ea77e465ea1c718fda8d0beb57a3e6cfc4.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <algorithm>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cusp/complex.h>
#include <cusp/coo_matrix.h>
#include <cusp/multiply.h>
#include <cusp/print.h>
#include <helper_cuda.h>
#include <stdio.h>
#include <math.h>
#define N_THREADS_X 16
#define N_THREADS_Y 16
__global__ void K_CheckNonZerosInCol(float *raw, int rows, int cols, int *nonZeros){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < rows){
for (int i = 0; i < cols; i++){
if (raw[idx * cols + i] > 0){
nonZeros[idx] = 1;
}
}
}
}
__global__ void K_Mean(float* mat_sum_depths, float *out_mean, int rows, int cols, float divide_by){
float sum_depths = 0;
int columIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (columIdx < cols){
for (int i = 0; i < rows; i++){
sum_depths += mat_sum_depths[columIdx + (i * cols)];
}
out_mean[columIdx] = sum_depths / divide_by ;
}
}
__global__ void K_Variance(float* layer, float* mean, int rows, int cols){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if ((idx < cols) && (idy < rows)){
float meanv = mean[idx];
float value = meanv - layer[idy * cols + idx];
layer[idy * cols + idx] = value * value;
}
}
__global__ void K_StandardDeviation(float* mat_sum_depths, float *out_sd, int rows, int cols, int depths){
float sum_depths = 0;
int columIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (columIdx < cols){
for (int i = 0; i < rows; i++){
sum_depths += mat_sum_depths[columIdx + (i * cols)];
}
out_sd[columIdx] = sqrt(sum_depths / (rows * depths));
}
}
__global__ void AddKernel(float* _mat1, float *_mat2, float *_res,
int rows1, int cols, int cols2){
int idX = blockDim.x * blockIdx.x + threadIdx.x;
int idY = blockDim.y * blockIdx.y + threadIdx.y;
int id = idY * cols2 + idX;
if (id < rows1 * cols2){
_res[id] = 0;
int mat1_row = idY * cols;
for ( int i = 0; i < cols; i++){
int mat2_col = i * cols2 + idX;
_res[id] += _mat1[mat1_row + i] * _mat2[mat2_col];
}
}
}
__global__ void StandardScoreKernel(float* _mat, int rows, int cols,
float* meanSD, float *res){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < cols){
for (int i = 0; i < rows; i++){
int idxElement = i * cols + idx;
float _mean = meanSD[idx];
float _sd = meanSD[cols + idx];
res[idxElement] = (_mat[idxElement] - _mean) / _sd;
}
}
}
__global__ void ConfidenceKernel(float *values, float *diagonal, int elements, float *result){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < elements){
if (diagonal[idx] > 0)
result[idx] = values[idx] / diagonal[idx];
}
}
// INICIO DOS PROV-KERNELS...
__global__ void sumKernel(float* values1, float* values2, int elements, float* result) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < elements) {
result[idx] = values1[idx]+values2[idx];
}
}
__global__ void subtractKernel(float* values1, float* values2, int elements, float* result) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < elements) {
result[idx] = values1[idx]-values2[idx];
}
}
__global__ void binarizeKernel(float* values, int elements, float* result) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < elements) {
if (values[idx] > 0) {
result[idx] = 1;
} else {
result[idx] = 0;
}
}
}
__global__ void transposeKernel(float* values, int elements, float* result) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < elements) {
result[idx] = values[idx];
}
}
__global__ void invertKernel(float* values, int elements, float* result) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < elements) {
if (values[idx] > 0) {
result[idx] = 0;
} else {
result[idx] = 1;
}
}
}
__global__ void diagonalizeKernel(float* values, int v, float* result) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < v && j < v) {
if (i==j) {
result[i*v + j] = values[i*v + j];
} else {
result[i*v + j] = 0;
}
}
}
__global__ void upperDiagonalKernel(float* values, int v, float* result) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < v && j < v) {
if (j >= i) {
result[i*v + j] = values[i*v + j];
} else {
result[i*v + j] = 0;
}
}
}
__global__ void lowerDiagonalKernel(float* values, int v, float* result) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < v && j < v) {
if (i >= j) {
result[i*v + j] = values[i*v + j];
} else {
result[i*v + j] = 0;
}
}
}
__global__ void prepareClosureKernel(float* values, int v, float* result) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < v && j < v) {
result[i*v + j] = values[i*v + j] > 0 ? 1 : 0;
if (i == j) {
result[i*v + j] = 1;
}
}
}
__global__ void transitiveClosureKernel(float* values, int k, int v, float* result) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < v) {
if (j < v) {
if (((result[i*v + k] != 0) && (result[k*v + j] != 0))) {
if (i != j) { // ignorar prprio n (i=j).
float distIK = (i == k ? 0 : result[i*v + k]);
float distKJ = (k == j ? 0 : result[k*v + j]);
if (result[i*v + j] == 0) { // caso em que no foi calculado result entre IJ ainda.
result[i*v + j] = distIK + distKJ;
} else if (distIK + distKJ < result[i*v + j]){ // atualizar se novos result forem menores que o atual.
result[i*v + j] = distIK + distKJ;
}
}
}
}
}
}
__global__ void rasterizeClosureKernel(float* matrix, int v) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < v && j < v) {
if (matrix[i*v + j] > 0) {
matrix[i*v + j] = 1 / matrix[i*v + j];
}
}
}
// FIM DOS PROV-KERNELS!
extern "C" {
void g_ResetAndSetGPUDevice(int gpuDevice) {
checkCudaErrors(hipSetDevice(gpuDevice));
checkCudaErrors(hipDeviceReset());
checkCudaErrors(hipSetDevice(gpuDevice));
}
int g_getDeviceCount() {
int nDevices = 0;
if (hipGetDeviceCount(&nDevices) != hipSuccess) {
hipGetLastError();
nDevices = 0;
}
return nDevices;
}
bool g_IsDeviceEnabled() {
return g_getDeviceCount()>0;
}
void g_StandardDeviation(float* mat, int rows, int cols,
float* meanSD, float* result){
float *d_mat;
float *d_meanSD;
float *d_result;
checkCudaErrors(hipMalloc(&d_mat, sizeof(float) * rows * cols));
checkCudaErrors(hipMemcpy(d_mat, mat, sizeof(float) * rows * cols, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_meanSD, sizeof(float) * 2 * cols));
checkCudaErrors(hipMemcpy(d_meanSD, meanSD, sizeof(float) * 2 * cols, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_result, sizeof(float) * rows * cols));
dim3 blockDim(N_THREADS_X * N_THREADS_Y, 1, 1);
dim3 gridDim(ceil((float)cols/(N_THREADS_X * N_THREADS_Y)), 1, 1);
hipLaunchKernelGGL(( StandardScoreKernel), dim3(gridDim), dim3(blockDim), 0, 0, d_mat, rows, cols, d_meanSD, d_result);
checkCudaErrors(hipMemcpy(result, d_result, sizeof(float) * rows * cols, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_mat));
checkCudaErrors(hipFree(d_meanSD));
checkCudaErrors(hipFree(d_result));
}
void g_MeanSD(int rows, int cols, int depth, float *h_data, float *result, bool considerZeros){
float *h_layer_keys;
checkCudaErrors(hipMalloc(&h_layer_keys, sizeof(float) * rows * cols));
for (int i = 0; i < rows; i++){
for (int j = 0; j < cols; j++){
h_layer_keys[i * cols + j] = j;
}
}
float *d_raw, *d_val_res, *d_mean_sd, *d_sum_depths;
int *d_kraw, *d_keys_res, *d_nonZeros;
checkCudaErrors(hipMalloc((void**) &d_raw, sizeof(float) * rows * cols));
checkCudaErrors(hipMalloc((void**) &d_kraw, sizeof(int) * rows * cols));
checkCudaErrors(hipMalloc((void**) &d_val_res, sizeof(float) * rows * cols));
checkCudaErrors(hipMalloc((void**) &d_keys_res, sizeof(int) * rows * cols));
checkCudaErrors(hipMalloc((void**) &d_mean_sd, sizeof(float) * cols * 2));
checkCudaErrors(hipMalloc((void**) &d_sum_depths, sizeof(float) * depth * cols));
checkCudaErrors(hipMemset(d_val_res, 0, sizeof(float) * rows * cols));
checkCudaErrors(hipMemset(d_keys_res, 0, sizeof(int) * rows * cols));
if (!considerZeros){
checkCudaErrors(hipMalloc((void**) &d_nonZeros, sizeof(int) * rows));
checkCudaErrors(hipMemset(d_nonZeros, 0, sizeof(int) * rows));
}
thrust::device_ptr<float> dev_ptr(d_raw);
thrust::device_ptr<int> dev_ptr_k(d_kraw);
thrust::device_ptr<int> dev_ptr_k_res(d_keys_res);
thrust::device_ptr<float> dev_ptr_v_res(d_val_res);
for (int i = 0; i < depth; i++){
checkCudaErrors(hipMemcpy(d_raw, &h_data[i * rows * cols],
sizeof(float) * rows * cols, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_kraw, h_layer_keys,
sizeof(int) * rows * cols, hipMemcpyHostToDevice));
if (!considerZeros){
dim3 blockDim_nz(256, 1, 1);
dim3 gridDim_nz( ceil((float)rows/256), 1, 1);
hipLaunchKernelGGL(( K_CheckNonZerosInCol), dim3(gridDim_nz), dim3(blockDim_nz), 0, 0, d_raw, rows, cols, d_nonZeros);
}
thrust::sort_by_key(dev_ptr_k, dev_ptr_k + (rows * cols), dev_ptr);
checkCudaErrors(hipDeviceSynchronize());
thrust::reduce_by_key(dev_ptr_k, dev_ptr_k+(rows * cols), dev_ptr, dev_ptr_k_res, dev_ptr_v_res);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(&d_sum_depths[i * cols], d_val_res, sizeof(float) * cols, hipMemcpyDeviceToDevice));
}
float *_result;
checkCudaErrors(hipMalloc(&_result, sizeof(float) * cols * depth));
checkCudaErrors(hipMemcpy(_result, d_mean_sd, sizeof(float) * cols * depth, hipMemcpyDeviceToHost));
// Calculate the mean
int divide_by = rows * depth;
if (!considerZeros){
thrust::device_ptr<int> dev_ptr_nonZeros(d_nonZeros);
divide_by = thrust::reduce(dev_ptr_nonZeros, dev_ptr_nonZeros + rows) * depth;
}
checkCudaErrors(hipDeviceSynchronize());
dim3 blockDim_m(256, 1, 1);
dim3 gridDim_m( ceil((float)cols/256), 1, 1);
hipLaunchKernelGGL(( K_Mean), dim3(gridDim_m), dim3(blockDim_m), 0, 0, d_sum_depths, d_mean_sd, rows, cols, (float)divide_by);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(result, d_mean_sd, sizeof(float) * cols * 2, hipMemcpyDeviceToHost));
if (!considerZeros){
hipFree(d_nonZeros);
}
hipFree(d_sum_depths);
hipFree(d_mean_sd);
hipFree(d_raw);
hipFree(d_kraw);
hipFree(d_val_res);
hipFree(d_keys_res);
hipFree(_result);
hipFree(h_layer_keys);
}
void g_Confidence(float* values, float* diagonal, int elements, float* result){
float *d_values;
float *d_diagonal;
float *d_result;
checkCudaErrors(hipMalloc(&d_values, sizeof(float) * elements));
checkCudaErrors(hipMemcpy(d_values, values, sizeof(float) * elements, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_diagonal, sizeof(float) * elements));
checkCudaErrors(hipMemcpy(d_diagonal, diagonal, sizeof(float) * elements, hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_result, sizeof(float) * elements));
checkCudaErrors(hipMemset(d_result, 0, sizeof(float) * elements));
dim3 blockDim(N_THREADS_X * N_THREADS_Y, 1, 1);
dim3 gridDim(ceil((float) elements/(N_THREADS_X * N_THREADS_Y)), 1, 1);
hipLaunchKernelGGL(( ConfidenceKernel), dim3(gridDim), dim3(blockDim), 0, 0, d_values, d_diagonal, elements, d_result);
checkCudaErrors(hipMemcpy(result, d_result, sizeof(float) * elements, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_values));
checkCudaErrors(hipFree(d_diagonal));
checkCudaErrors(hipFree(d_result));
}
void g_Sum(float* values1, float* values2, int elements, float* result) {
float* d_values1;
float* d_values2;
float* d_result;
checkCudaErrors(hipMalloc(&d_values1, sizeof(float) * elements));
checkCudaErrors(hipMalloc(&d_values2, sizeof(float) * elements));
checkCudaErrors(hipMemcpy(d_values1, values1, sizeof(float) * elements,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_values2, values2, sizeof(float) * elements,hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_result, sizeof(float) * elements));
checkCudaErrors(hipMemset(d_result, 0, sizeof(float) * elements));
dim3 blockDim(N_THREADS_X * N_THREADS_Y, 1);
dim3 gridDim(ceil((float) elements / (N_THREADS_X * N_THREADS_X)), 1, 1);
hipLaunchKernelGGL(( sumKernel), dim3(gridDim), dim3(blockDim), 0, 0, d_values1, d_values2, elements, d_result);
checkCudaErrors(hipMemcpy(result, d_result, sizeof(float) * elements, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_values1));
checkCudaErrors(hipFree(d_values2));
checkCudaErrors(hipFree(d_result));
}
void g_Subtract(float* values1, float* values2, int elements, float* result) {
float* d_values1;
float* d_values2;
float* d_result;
checkCudaErrors(hipMalloc(&d_values1, sizeof(float) * elements));
checkCudaErrors(hipMalloc(&d_values2, sizeof(float) * elements));
checkCudaErrors(hipMemcpy(d_values1, values1, sizeof(float) * elements,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_values2, values2, sizeof(float) * elements,hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_result, sizeof(float) * elements));
checkCudaErrors(hipMemset(d_result, 0, sizeof(float) * elements));
dim3 blockDim(N_THREADS_X * N_THREADS_Y, 1);
dim3 gridDim(ceil((float) elements / (N_THREADS_X * N_THREADS_X)), 1, 1);
hipLaunchKernelGGL(( subtractKernel), dim3(gridDim), dim3(blockDim), 0, 0, d_values1, d_values2, elements, d_result);
checkCudaErrors(hipMemcpy(result, d_result, sizeof(float) * elements, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_values1));
checkCudaErrors(hipFree(d_values2));
checkCudaErrors(hipFree(d_result));
}
void g_Binarize(float* values, int elements, float* result) {
float* d_values;
float* d_result;
checkCudaErrors(hipMalloc(&d_values, sizeof(float) * elements));
checkCudaErrors(
hipMemcpy(d_values, values, sizeof(float) * elements,
hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_result, sizeof(float) * elements));
checkCudaErrors(hipMemset(d_result, 0, sizeof(float) * elements));
dim3 blockDim(N_THREADS_X * N_THREADS_Y, 1);
dim3 gridDim(ceil((float) elements / (N_THREADS_X * N_THREADS_X)), 1, 1);
hipLaunchKernelGGL(( binarizeKernel), dim3(gridDim), dim3(blockDim), 0, 0, d_values, elements, d_result);
checkCudaErrors(
hipMemcpy(result, d_result, sizeof(float) * elements,
hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_values));
checkCudaErrors(hipFree(d_result));
}
void g_Transpose(float* values, int elements, float* result) {
float* d_values;
float* d_result;
checkCudaErrors(hipMalloc(&d_values, sizeof(float) * elements));
checkCudaErrors(
hipMemcpy(d_values, values, sizeof(float) * elements,
hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_result, sizeof(float) * elements));
checkCudaErrors(hipMemset(d_result, 0, sizeof(float) * elements));
dim3 blockDim(N_THREADS_X * N_THREADS_Y, 1);
dim3 gridDim(ceil((float) elements / (N_THREADS_X * N_THREADS_X)), 1, 1);
hipLaunchKernelGGL(( transposeKernel), dim3(gridDim), dim3(blockDim), 0, 0, d_values, elements, d_result);
checkCudaErrors(
hipMemcpy(result, d_result, sizeof(float) * elements,
hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_values));
checkCudaErrors(hipFree(d_result));
}
void g_Invert(float* values, int elements, float* result) {
float* d_values;
float* d_result;
checkCudaErrors(hipMalloc(&d_values, sizeof(float) * elements));
checkCudaErrors(
hipMemcpy(d_values, values, sizeof(float) * elements,
hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_result, sizeof(float) * elements));
checkCudaErrors(hipMemset(d_result, 0, sizeof(float) * elements));
dim3 blockDim(N_THREADS_X * N_THREADS_Y, 1);
dim3 gridDim(ceil((float) elements / (N_THREADS_X * N_THREADS_X)), 1, 1);
hipLaunchKernelGGL(( invertKernel), dim3(gridDim), dim3(blockDim), 0, 0, d_values, elements, d_result);
checkCudaErrors(
hipMemcpy(result, d_result, sizeof(float) * elements,
hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_values));
checkCudaErrors(hipFree(d_result));
}
void g_Diagonalize(float* values, int v, float* result) {
float* d_values;
float* d_result;
checkCudaErrors(hipMalloc(&d_values, sizeof(float) * v*v));
checkCudaErrors(
hipMemcpy(d_values, values, sizeof(float) * v*v,
hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_result, sizeof(float) * v*v));
checkCudaErrors(hipMemset(d_result, 0, sizeof(float) * v*v));
dim3 blockDim(N_THREADS_X , N_THREADS_Y, 1);
dim3 gridDim(ceil((float) v / (N_THREADS_X)), ceil((float) v / (N_THREADS_Y)), 1);
hipLaunchKernelGGL(( diagonalizeKernel), dim3(gridDim), dim3(blockDim), 0, 0, d_values, v, d_result);
checkCudaErrors(
hipMemcpy(result, d_result, sizeof(float) * v*v,
hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_values));
checkCudaErrors(hipFree(d_result));
}
void g_UpperDiagonal(float* values, int v, float* result) {
float* d_values;
float* d_result;
checkCudaErrors(hipMalloc(&d_values, sizeof(float) * v*v));
checkCudaErrors(
hipMemcpy(d_values, values, sizeof(float) * v*v,
hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_result, sizeof(float) * v*v));
checkCudaErrors(hipMemset(d_result, 0, sizeof(float) * v*v));
dim3 blockDim(N_THREADS_X , N_THREADS_Y, 1);
dim3 gridDim(ceil((float) v / (N_THREADS_X)), ceil((float) v / (N_THREADS_Y)), 1);
hipLaunchKernelGGL(( upperDiagonalKernel), dim3(gridDim), dim3(blockDim), 0, 0, d_values, v, d_result);
checkCudaErrors(
hipMemcpy(result, d_result, sizeof(float) * v*v,
hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_values));
checkCudaErrors(hipFree(d_result));
}
void g_LowerDiagonal(float* values, int v, float* result) {
float* d_values;
float* d_result;
checkCudaErrors(hipMalloc(&d_values, sizeof(float) * v*v));
checkCudaErrors(
hipMemcpy(d_values, values, sizeof(float) * v*v,
hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_result, sizeof(float) * v*v));
checkCudaErrors(hipMemset(d_result, 0, sizeof(float) * v*v));
dim3 blockDim(N_THREADS_X , N_THREADS_Y, 1);
dim3 gridDim(ceil((float) v / (N_THREADS_X)), ceil((float) v / (N_THREADS_Y)), 1);
hipLaunchKernelGGL(( lowerDiagonalKernel), dim3(gridDim), dim3(blockDim), 0, 0, d_values, v, d_result);
checkCudaErrors(
hipMemcpy(result, d_result, sizeof(float) * v*v,
hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_values));
checkCudaErrors(hipFree(d_result));
}
void g_TransitiveClosure(float* values, int v, float* result) {
float* d_values;
float* d_result;
checkCudaErrors(hipMalloc(&d_values, sizeof(float) * v*v));
checkCudaErrors(
hipMemcpy(d_values, values, sizeof(float) * v*v,
hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc(&d_result, sizeof(float) * v*v));
checkCudaErrors(hipMemset(d_result, 0, sizeof(float) * v*v));
dim3 blockDim(N_THREADS_X, N_THREADS_Y, 1);
dim3 gridDim(ceil((float) v / (N_THREADS_X)), ceil((float) v / (N_THREADS_Y)), 1);
hipLaunchKernelGGL(( prepareClosureKernel), dim3(gridDim), dim3(blockDim), 0, 0, d_values, v, d_result);
for (int k=0; k < v; k++) {
hipLaunchKernelGGL(( transitiveClosureKernel), dim3(gridDim), dim3(blockDim), 0, 0, d_values, k, v, d_result);
}
//rasterizeClosureKernel<<<gridDim, blockDim>>>(d_result, v);
checkCudaErrors(
hipMemcpy(result, d_result, sizeof(float) * v*v,
hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_values));
checkCudaErrors(hipFree(d_result));
}
void g_MatMul(int n_rowsA, int n_colsA, int n_colsB, int nzA, int nzB,
int *rowsA, int *colsA, float *valuesA,
int *rowsB, int *colsB, float *valuesB,
int **row_res, int **col_res, float **value_res,
int& res_nz){
cusp::coo_matrix<int,float,cusp::host_memory> matA(n_rowsA,n_colsA,nzA);
for (int i = 0; i < nzA; i++){
matA.row_indices[i] = rowsA[i]; matA.column_indices[i] = colsA[i]; matA.values[i] = valuesA[i];
}
cusp::coo_matrix<int,float,cusp::device_memory> matA_d = matA;
cusp::coo_matrix<int,float,cusp::host_memory> matB(n_colsA,n_colsB,nzB);
for (int i = 0; i < nzB; i++){
matB.row_indices[i] = rowsB[i]; matB.column_indices[i] = colsB[i]; matB.values[i] = valuesB[i];
}
cusp::coo_matrix<int,float,cusp::device_memory> matB_d = matB;
cusp::coo_matrix<int,float,cusp::device_memory> matRes_d(n_rowsA,n_colsB, n_rowsA * n_colsB);
cusp::multiply(matA_d, matB_d, matRes_d);
cusp::coo_matrix<int,float,cusp::host_memory> matRes = matRes_d;
res_nz = matRes.num_entries;
int *_row_res = new int[res_nz];
int *_col_res = new int[res_nz];
float *_value_res = new float[res_nz];
for(size_t n = 0; n < res_nz; n++)
{
_row_res[n] = matRes.row_indices[n];
_col_res[n] = matRes.column_indices[n];
_value_res[n] = matRes.values[n];
}
*row_res = _row_res;
*col_res = _col_res;
*value_res = _value_res;
}
}
| ed44d6ea77e465ea1c718fda8d0beb57a3e6cfc4.cu | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <algorithm>
#include <cstdlib>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cusp/complex.h>
#include <cusp/coo_matrix.h>
#include <cusp/multiply.h>
#include <cusp/print.h>
#include <helper_cuda.h>
#include <stdio.h>
#include <math.h>
#define N_THREADS_X 16
#define N_THREADS_Y 16
__global__ void K_CheckNonZerosInCol(float *raw, int rows, int cols, int *nonZeros){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < rows){
for (int i = 0; i < cols; i++){
if (raw[idx * cols + i] > 0){
nonZeros[idx] = 1;
}
}
}
}
__global__ void K_Mean(float* mat_sum_depths, float *out_mean, int rows, int cols, float divide_by){
float sum_depths = 0;
int columIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (columIdx < cols){
for (int i = 0; i < rows; i++){
sum_depths += mat_sum_depths[columIdx + (i * cols)];
}
out_mean[columIdx] = sum_depths / divide_by ;
}
}
__global__ void K_Variance(float* layer, float* mean, int rows, int cols){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if ((idx < cols) && (idy < rows)){
float meanv = mean[idx];
float value = meanv - layer[idy * cols + idx];
layer[idy * cols + idx] = value * value;
}
}
__global__ void K_StandardDeviation(float* mat_sum_depths, float *out_sd, int rows, int cols, int depths){
float sum_depths = 0;
int columIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (columIdx < cols){
for (int i = 0; i < rows; i++){
sum_depths += mat_sum_depths[columIdx + (i * cols)];
}
out_sd[columIdx] = sqrt(sum_depths / (rows * depths));
}
}
__global__ void AddKernel(float* _mat1, float *_mat2, float *_res,
int rows1, int cols, int cols2){
int idX = blockDim.x * blockIdx.x + threadIdx.x;
int idY = blockDim.y * blockIdx.y + threadIdx.y;
int id = idY * cols2 + idX;
if (id < rows1 * cols2){
_res[id] = 0;
int mat1_row = idY * cols;
for ( int i = 0; i < cols; i++){
int mat2_col = i * cols2 + idX;
_res[id] += _mat1[mat1_row + i] * _mat2[mat2_col];
}
}
}
__global__ void StandardScoreKernel(float* _mat, int rows, int cols,
float* meanSD, float *res){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < cols){
for (int i = 0; i < rows; i++){
int idxElement = i * cols + idx;
float _mean = meanSD[idx];
float _sd = meanSD[cols + idx];
res[idxElement] = (_mat[idxElement] - _mean) / _sd;
}
}
}
__global__ void ConfidenceKernel(float *values, float *diagonal, int elements, float *result){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < elements){
if (diagonal[idx] > 0)
result[idx] = values[idx] / diagonal[idx];
}
}
// INICIO DOS PROV-KERNELS...
__global__ void sumKernel(float* values1, float* values2, int elements, float* result) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < elements) {
result[idx] = values1[idx]+values2[idx];
}
}
__global__ void subtractKernel(float* values1, float* values2, int elements, float* result) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < elements) {
result[idx] = values1[idx]-values2[idx];
}
}
__global__ void binarizeKernel(float* values, int elements, float* result) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < elements) {
if (values[idx] > 0) {
result[idx] = 1;
} else {
result[idx] = 0;
}
}
}
__global__ void transposeKernel(float* values, int elements, float* result) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < elements) {
result[idx] = values[idx];
}
}
__global__ void invertKernel(float* values, int elements, float* result) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < elements) {
if (values[idx] > 0) {
result[idx] = 0;
} else {
result[idx] = 1;
}
}
}
__global__ void diagonalizeKernel(float* values, int v, float* result) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < v && j < v) {
if (i==j) {
result[i*v + j] = values[i*v + j];
} else {
result[i*v + j] = 0;
}
}
}
__global__ void upperDiagonalKernel(float* values, int v, float* result) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < v && j < v) {
if (j >= i) {
result[i*v + j] = values[i*v + j];
} else {
result[i*v + j] = 0;
}
}
}
__global__ void lowerDiagonalKernel(float* values, int v, float* result) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < v && j < v) {
if (i >= j) {
result[i*v + j] = values[i*v + j];
} else {
result[i*v + j] = 0;
}
}
}
__global__ void prepareClosureKernel(float* values, int v, float* result) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < v && j < v) {
result[i*v + j] = values[i*v + j] > 0 ? 1 : 0;
if (i == j) {
result[i*v + j] = 1;
}
}
}
__global__ void transitiveClosureKernel(float* values, int k, int v, float* result) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < v) {
if (j < v) {
if (((result[i*v + k] != 0) && (result[k*v + j] != 0))) {
if (i != j) { // ignorar próprio nó (i=j).
float distIK = (i == k ? 0 : result[i*v + k]);
float distKJ = (k == j ? 0 : result[k*v + j]);
if (result[i*v + j] == 0) { // caso em que não foi calculado result entre IJ ainda.
result[i*v + j] = distIK + distKJ;
} else if (distIK + distKJ < result[i*v + j]){ // atualizar se novos result forem menores que o atual.
result[i*v + j] = distIK + distKJ;
}
}
}
}
}
}
__global__ void rasterizeClosureKernel(float* matrix, int v) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < v && j < v) {
if (matrix[i*v + j] > 0) {
matrix[i*v + j] = 1 / matrix[i*v + j];
}
}
}
// FIM DOS PROV-KERNELS!
extern "C" {
void g_ResetAndSetGPUDevice(int gpuDevice) {
checkCudaErrors(cudaSetDevice(gpuDevice));
checkCudaErrors(cudaDeviceReset());
checkCudaErrors(cudaSetDevice(gpuDevice));
}
int g_getDeviceCount() {
int nDevices = 0;
if (cudaGetDeviceCount(&nDevices) != cudaSuccess) {
cudaGetLastError();
nDevices = 0;
}
return nDevices;
}
bool g_IsDeviceEnabled() {
return g_getDeviceCount()>0;
}
void g_StandardDeviation(float* mat, int rows, int cols,
float* meanSD, float* result){
float *d_mat;
float *d_meanSD;
float *d_result;
checkCudaErrors(cudaMalloc(&d_mat, sizeof(float) * rows * cols));
checkCudaErrors(cudaMemcpy(d_mat, mat, sizeof(float) * rows * cols, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_meanSD, sizeof(float) * 2 * cols));
checkCudaErrors(cudaMemcpy(d_meanSD, meanSD, sizeof(float) * 2 * cols, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_result, sizeof(float) * rows * cols));
dim3 blockDim(N_THREADS_X * N_THREADS_Y, 1, 1);
dim3 gridDim(ceil((float)cols/(N_THREADS_X * N_THREADS_Y)), 1, 1);
StandardScoreKernel<<<gridDim, blockDim>>>(d_mat, rows, cols, d_meanSD, d_result);
checkCudaErrors(cudaMemcpy(result, d_result, sizeof(float) * rows * cols, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_mat));
checkCudaErrors(cudaFree(d_meanSD));
checkCudaErrors(cudaFree(d_result));
}
void g_MeanSD(int rows, int cols, int depth, float *h_data, float *result, bool considerZeros){
float *h_layer_keys;
checkCudaErrors(cudaMalloc(&h_layer_keys, sizeof(float) * rows * cols));
for (int i = 0; i < rows; i++){
for (int j = 0; j < cols; j++){
h_layer_keys[i * cols + j] = j;
}
}
float *d_raw, *d_val_res, *d_mean_sd, *d_sum_depths;
int *d_kraw, *d_keys_res, *d_nonZeros;
checkCudaErrors(cudaMalloc((void**) &d_raw, sizeof(float) * rows * cols));
checkCudaErrors(cudaMalloc((void**) &d_kraw, sizeof(int) * rows * cols));
checkCudaErrors(cudaMalloc((void**) &d_val_res, sizeof(float) * rows * cols));
checkCudaErrors(cudaMalloc((void**) &d_keys_res, sizeof(int) * rows * cols));
checkCudaErrors(cudaMalloc((void**) &d_mean_sd, sizeof(float) * cols * 2));
checkCudaErrors(cudaMalloc((void**) &d_sum_depths, sizeof(float) * depth * cols));
checkCudaErrors(cudaMemset(d_val_res, 0, sizeof(float) * rows * cols));
checkCudaErrors(cudaMemset(d_keys_res, 0, sizeof(int) * rows * cols));
if (!considerZeros){
checkCudaErrors(cudaMalloc((void**) &d_nonZeros, sizeof(int) * rows));
checkCudaErrors(cudaMemset(d_nonZeros, 0, sizeof(int) * rows));
}
thrust::device_ptr<float> dev_ptr(d_raw);
thrust::device_ptr<int> dev_ptr_k(d_kraw);
thrust::device_ptr<int> dev_ptr_k_res(d_keys_res);
thrust::device_ptr<float> dev_ptr_v_res(d_val_res);
for (int i = 0; i < depth; i++){
checkCudaErrors(cudaMemcpy(d_raw, &h_data[i * rows * cols],
sizeof(float) * rows * cols, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_kraw, h_layer_keys,
sizeof(int) * rows * cols, cudaMemcpyHostToDevice));
if (!considerZeros){
dim3 blockDim_nz(256, 1, 1);
dim3 gridDim_nz( ceil((float)rows/256), 1, 1);
K_CheckNonZerosInCol<<<gridDim_nz, blockDim_nz>>>(d_raw, rows, cols, d_nonZeros);
}
thrust::sort_by_key(dev_ptr_k, dev_ptr_k + (rows * cols), dev_ptr);
checkCudaErrors(cudaDeviceSynchronize());
thrust::reduce_by_key(dev_ptr_k, dev_ptr_k+(rows * cols), dev_ptr, dev_ptr_k_res, dev_ptr_v_res);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(&d_sum_depths[i * cols], d_val_res, sizeof(float) * cols, cudaMemcpyDeviceToDevice));
}
float *_result;
checkCudaErrors(cudaMalloc(&_result, sizeof(float) * cols * depth));
checkCudaErrors(cudaMemcpy(_result, d_mean_sd, sizeof(float) * cols * depth, cudaMemcpyDeviceToHost));
// Calculate the mean
int divide_by = rows * depth;
if (!considerZeros){
thrust::device_ptr<int> dev_ptr_nonZeros(d_nonZeros);
divide_by = thrust::reduce(dev_ptr_nonZeros, dev_ptr_nonZeros + rows) * depth;
}
checkCudaErrors(cudaDeviceSynchronize());
dim3 blockDim_m(256, 1, 1);
dim3 gridDim_m( ceil((float)cols/256), 1, 1);
K_Mean<<<gridDim_m, blockDim_m>>>(d_sum_depths, d_mean_sd, rows, cols, (float)divide_by);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(result, d_mean_sd, sizeof(float) * cols * 2, cudaMemcpyDeviceToHost));
if (!considerZeros){
cudaFree(d_nonZeros);
}
cudaFree(d_sum_depths);
cudaFree(d_mean_sd);
cudaFree(d_raw);
cudaFree(d_kraw);
cudaFree(d_val_res);
cudaFree(d_keys_res);
cudaFree(_result);
cudaFree(h_layer_keys);
}
void g_Confidence(float* values, float* diagonal, int elements, float* result){
float *d_values;
float *d_diagonal;
float *d_result;
checkCudaErrors(cudaMalloc(&d_values, sizeof(float) * elements));
checkCudaErrors(cudaMemcpy(d_values, values, sizeof(float) * elements, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_diagonal, sizeof(float) * elements));
checkCudaErrors(cudaMemcpy(d_diagonal, diagonal, sizeof(float) * elements, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_result, sizeof(float) * elements));
checkCudaErrors(cudaMemset(d_result, 0, sizeof(float) * elements));
dim3 blockDim(N_THREADS_X * N_THREADS_Y, 1, 1);
dim3 gridDim(ceil((float) elements/(N_THREADS_X * N_THREADS_Y)), 1, 1);
ConfidenceKernel<<<gridDim, blockDim>>>(d_values, d_diagonal, elements, d_result);
checkCudaErrors(cudaMemcpy(result, d_result, sizeof(float) * elements, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_values));
checkCudaErrors(cudaFree(d_diagonal));
checkCudaErrors(cudaFree(d_result));
}
void g_Sum(float* values1, float* values2, int elements, float* result) {
float* d_values1;
float* d_values2;
float* d_result;
checkCudaErrors(cudaMalloc(&d_values1, sizeof(float) * elements));
checkCudaErrors(cudaMalloc(&d_values2, sizeof(float) * elements));
checkCudaErrors(cudaMemcpy(d_values1, values1, sizeof(float) * elements,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_values2, values2, sizeof(float) * elements,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_result, sizeof(float) * elements));
checkCudaErrors(cudaMemset(d_result, 0, sizeof(float) * elements));
dim3 blockDim(N_THREADS_X * N_THREADS_Y, 1);
dim3 gridDim(ceil((float) elements / (N_THREADS_X * N_THREADS_X)), 1, 1);
sumKernel<<<gridDim, blockDim>>>(d_values1, d_values2, elements, d_result);
checkCudaErrors(cudaMemcpy(result, d_result, sizeof(float) * elements, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_values1));
checkCudaErrors(cudaFree(d_values2));
checkCudaErrors(cudaFree(d_result));
}
void g_Subtract(float* values1, float* values2, int elements, float* result) {
float* d_values1;
float* d_values2;
float* d_result;
checkCudaErrors(cudaMalloc(&d_values1, sizeof(float) * elements));
checkCudaErrors(cudaMalloc(&d_values2, sizeof(float) * elements));
checkCudaErrors(cudaMemcpy(d_values1, values1, sizeof(float) * elements,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_values2, values2, sizeof(float) * elements,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_result, sizeof(float) * elements));
checkCudaErrors(cudaMemset(d_result, 0, sizeof(float) * elements));
dim3 blockDim(N_THREADS_X * N_THREADS_Y, 1);
dim3 gridDim(ceil((float) elements / (N_THREADS_X * N_THREADS_X)), 1, 1);
subtractKernel<<<gridDim, blockDim>>>(d_values1, d_values2, elements, d_result);
checkCudaErrors(cudaMemcpy(result, d_result, sizeof(float) * elements, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_values1));
checkCudaErrors(cudaFree(d_values2));
checkCudaErrors(cudaFree(d_result));
}
void g_Binarize(float* values, int elements, float* result) {
float* d_values;
float* d_result;
checkCudaErrors(cudaMalloc(&d_values, sizeof(float) * elements));
checkCudaErrors(
cudaMemcpy(d_values, values, sizeof(float) * elements,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_result, sizeof(float) * elements));
checkCudaErrors(cudaMemset(d_result, 0, sizeof(float) * elements));
dim3 blockDim(N_THREADS_X * N_THREADS_Y, 1);
dim3 gridDim(ceil((float) elements / (N_THREADS_X * N_THREADS_X)), 1, 1);
binarizeKernel<<<gridDim, blockDim>>>(d_values, elements, d_result);
checkCudaErrors(
cudaMemcpy(result, d_result, sizeof(float) * elements,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_values));
checkCudaErrors(cudaFree(d_result));
}
void g_Transpose(float* values, int elements, float* result) {
float* d_values;
float* d_result;
checkCudaErrors(cudaMalloc(&d_values, sizeof(float) * elements));
checkCudaErrors(
cudaMemcpy(d_values, values, sizeof(float) * elements,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_result, sizeof(float) * elements));
checkCudaErrors(cudaMemset(d_result, 0, sizeof(float) * elements));
dim3 blockDim(N_THREADS_X * N_THREADS_Y, 1);
dim3 gridDim(ceil((float) elements / (N_THREADS_X * N_THREADS_X)), 1, 1);
transposeKernel<<<gridDim, blockDim>>>(d_values, elements, d_result);
checkCudaErrors(
cudaMemcpy(result, d_result, sizeof(float) * elements,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_values));
checkCudaErrors(cudaFree(d_result));
}
void g_Invert(float* values, int elements, float* result) {
float* d_values;
float* d_result;
checkCudaErrors(cudaMalloc(&d_values, sizeof(float) * elements));
checkCudaErrors(
cudaMemcpy(d_values, values, sizeof(float) * elements,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_result, sizeof(float) * elements));
checkCudaErrors(cudaMemset(d_result, 0, sizeof(float) * elements));
dim3 blockDim(N_THREADS_X * N_THREADS_Y, 1);
dim3 gridDim(ceil((float) elements / (N_THREADS_X * N_THREADS_X)), 1, 1);
invertKernel<<<gridDim, blockDim>>>(d_values, elements, d_result);
checkCudaErrors(
cudaMemcpy(result, d_result, sizeof(float) * elements,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_values));
checkCudaErrors(cudaFree(d_result));
}
void g_Diagonalize(float* values, int v, float* result) {
float* d_values;
float* d_result;
checkCudaErrors(cudaMalloc(&d_values, sizeof(float) * v*v));
checkCudaErrors(
cudaMemcpy(d_values, values, sizeof(float) * v*v,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_result, sizeof(float) * v*v));
checkCudaErrors(cudaMemset(d_result, 0, sizeof(float) * v*v));
dim3 blockDim(N_THREADS_X , N_THREADS_Y, 1);
dim3 gridDim(ceil((float) v / (N_THREADS_X)), ceil((float) v / (N_THREADS_Y)), 1);
diagonalizeKernel<<<gridDim, blockDim>>>(d_values, v, d_result);
checkCudaErrors(
cudaMemcpy(result, d_result, sizeof(float) * v*v,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_values));
checkCudaErrors(cudaFree(d_result));
}
void g_UpperDiagonal(float* values, int v, float* result) {
float* d_values;
float* d_result;
checkCudaErrors(cudaMalloc(&d_values, sizeof(float) * v*v));
checkCudaErrors(
cudaMemcpy(d_values, values, sizeof(float) * v*v,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_result, sizeof(float) * v*v));
checkCudaErrors(cudaMemset(d_result, 0, sizeof(float) * v*v));
dim3 blockDim(N_THREADS_X , N_THREADS_Y, 1);
dim3 gridDim(ceil((float) v / (N_THREADS_X)), ceil((float) v / (N_THREADS_Y)), 1);
upperDiagonalKernel<<<gridDim, blockDim>>>(d_values, v, d_result);
checkCudaErrors(
cudaMemcpy(result, d_result, sizeof(float) * v*v,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_values));
checkCudaErrors(cudaFree(d_result));
}
void g_LowerDiagonal(float* values, int v, float* result) {
float* d_values;
float* d_result;
checkCudaErrors(cudaMalloc(&d_values, sizeof(float) * v*v));
checkCudaErrors(
cudaMemcpy(d_values, values, sizeof(float) * v*v,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_result, sizeof(float) * v*v));
checkCudaErrors(cudaMemset(d_result, 0, sizeof(float) * v*v));
dim3 blockDim(N_THREADS_X , N_THREADS_Y, 1);
dim3 gridDim(ceil((float) v / (N_THREADS_X)), ceil((float) v / (N_THREADS_Y)), 1);
lowerDiagonalKernel<<<gridDim, blockDim>>>(d_values, v, d_result);
checkCudaErrors(
cudaMemcpy(result, d_result, sizeof(float) * v*v,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_values));
checkCudaErrors(cudaFree(d_result));
}
void g_TransitiveClosure(float* values, int v, float* result) {
float* d_values;
float* d_result;
checkCudaErrors(cudaMalloc(&d_values, sizeof(float) * v*v));
checkCudaErrors(
cudaMemcpy(d_values, values, sizeof(float) * v*v,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc(&d_result, sizeof(float) * v*v));
checkCudaErrors(cudaMemset(d_result, 0, sizeof(float) * v*v));
dim3 blockDim(N_THREADS_X, N_THREADS_Y, 1);
dim3 gridDim(ceil((float) v / (N_THREADS_X)), ceil((float) v / (N_THREADS_Y)), 1);
prepareClosureKernel<<<gridDim, blockDim>>>(d_values, v, d_result);
for (int k=0; k < v; k++) {
transitiveClosureKernel<<<gridDim, blockDim>>>(d_values, k, v, d_result);
}
//rasterizeClosureKernel<<<gridDim, blockDim>>>(d_result, v);
checkCudaErrors(
cudaMemcpy(result, d_result, sizeof(float) * v*v,
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_values));
checkCudaErrors(cudaFree(d_result));
}
void g_MatMul(int n_rowsA, int n_colsA, int n_colsB, int nzA, int nzB,
int *rowsA, int *colsA, float *valuesA,
int *rowsB, int *colsB, float *valuesB,
int **row_res, int **col_res, float **value_res,
int& res_nz){
cusp::coo_matrix<int,float,cusp::host_memory> matA(n_rowsA,n_colsA,nzA);
for (int i = 0; i < nzA; i++){
matA.row_indices[i] = rowsA[i]; matA.column_indices[i] = colsA[i]; matA.values[i] = valuesA[i];
}
cusp::coo_matrix<int,float,cusp::device_memory> matA_d = matA;
cusp::coo_matrix<int,float,cusp::host_memory> matB(n_colsA,n_colsB,nzB);
for (int i = 0; i < nzB; i++){
matB.row_indices[i] = rowsB[i]; matB.column_indices[i] = colsB[i]; matB.values[i] = valuesB[i];
}
cusp::coo_matrix<int,float,cusp::device_memory> matB_d = matB;
cusp::coo_matrix<int,float,cusp::device_memory> matRes_d(n_rowsA,n_colsB, n_rowsA * n_colsB);
cusp::multiply(matA_d, matB_d, matRes_d);
cusp::coo_matrix<int,float,cusp::host_memory> matRes = matRes_d;
res_nz = matRes.num_entries;
int *_row_res = new int[res_nz];
int *_col_res = new int[res_nz];
float *_value_res = new float[res_nz];
for(size_t n = 0; n < res_nz; n++)
{
_row_res[n] = matRes.row_indices[n];
_col_res[n] = matRes.column_indices[n];
_value_res[n] = matRes.values[n];
}
*row_res = _row_res;
*col_res = _col_res;
*value_res = _value_res;
}
}
|
913d0987def3d2b96c7386575ca5df3aa471aaa4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// CUDA code to compute minimu distance between n points
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define MAX_POINTS 1048576
#define BLOCK_SIZE 1024
// ----------------------------------------------------------------------------
// Kernel Function to compute distance between all pairs of points
// Input:
// X: X[i] = x-coordinate of the ith point
// Y: Y[i] = y-coordinate of the ith point
// n: number of points
//
// Output:
// D: D[0] = minimum distance
//
__global__ void minimum_distance(float * X, float * Y, float * D, float * Glob,
int n) {
unsigned int i = ((blockIdx.x * blockDim.x) + threadIdx.x);
int j = 0;
if (i < n) {
// complete first stop to initalize!!!!!!
float xFirst = X[i], yFirst = Y[i];
float xComp = X[i + 1], yComp = Y[i + 1];
float sqX = (xComp - xFirst);
float sqY = (yComp - yFirst);
D[i] = sqrtf(sqX * sqX + sqY * sqY);
for (j = i + 1; j < n; j++) {
xComp = X[j];
yComp = Y[j];
float sqX = (xComp - xFirst);
float sqY = (yComp - yFirst);
float distance = sqrtf(sqX * sqX + sqY * sqY);
if (distance < D[i])
D[i] = distance;
}
}
__syncthreads();
if (i < blockDim.x && i < n) {
float sdata;
int tid = threadIdx.x;
int shift = n;
if (blockDim.x < n) {
shift = n / blockDim.x;
sdata = D[tid * shift];
for (j = 1; j < shift; j++) {
if (sdata > D[(tid * shift) + j])
sdata = D[(tid * shift) + j];
}
shift = blockDim.x;
D[tid] = sdata;
}
__syncthreads();
for (unsigned int s = shift / 2; s > 0; s >>= 1) {
if (tid < s) {
if (D[tid] > D[tid + s]) {
D[tid] = D[tid + s];
}
}
__syncthreads();
}
if (i == 0)
*Glob = D[0];
}
}
// ----------------------------------------------------------------------------
// Main program - initializes points and computes minimum distance
// between the points
//
int main(int argc, char* argv[]) {
// Host Data
float * hVx; // host x-coordinate array
float * hVy; // host y-coordinate array
float * hmin_dist; // minimum value on host
// Device Data
float * dVx; // device x-coordinate array
float * dVy; // device x-coordinate array
float * dmin_dist; // minimum value on device
float * dVd;
int i, j, size, num_points, threads, blocks;
float dx, dy, Dij, distance;
unsigned int seed = 0;
hipEvent_t start, stop; // GPU timing variables
struct timeval cpu_start, cpu_stop; // CPU timing variables
float time_array[10];
// Timing initializations
hipEventCreate(&start);
hipEventCreate(&stop);
// Check input
if (argc != 2) {
printf("Use: %s <number of points>\n", argv[0]);
exit(0);
}
if ((num_points = atoi(argv[argc - 1])) > MAX_POINTS) {
printf("Maximum number of points allowed: %d\n", MAX_POINTS);
exit(0);
}
// Allocate host coordinate arrays
size = num_points * sizeof(float);
hVx = (float *) malloc(size);
hVy = (float *) malloc(size);
hmin_dist = (float *) malloc(sizeof(float));
// Initialize points
for (i = 0; i < num_points; i++) {
hVx[i] = (float) (rand_r(&seed)) / (float) (RAND_MAX);
hVy[i] = (float) (rand_r(&seed)) / (float) (RAND_MAX);
}
// Allocate device coordinate arrays
hipMalloc(&dVx, size);
hipMalloc(&dVy, size);
hipMalloc(&dmin_dist, sizeof(float));
hipMalloc(&dVd, size);
// Copy coordinate arrays from host memory to device memory
hipEventRecord(start, 0);
hipMemcpy(dVx, hVx, size, hipMemcpyHostToDevice);
hipMemcpy(dVy, hVy, size, hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&(time_array[0]), start, stop);
// Invoke kernel
hipEventRecord(start, 0);
threads = 256;
blocks = 1;
if (num_points > threads) {
blocks = num_points / threads;
}
hipLaunchKernelGGL(( minimum_distance), dim3(blocks),dim3(threads), 0, 0, dVx, dVy, dVd, dmin_dist, num_points);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&(time_array[1]), start, stop);
// Copy result from device memory to host memory
hipEventRecord(start, 0);
hipMemcpy(hmin_dist, dmin_dist, sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&(time_array[2]), start, stop);
printf("Number of Points = %d\n", num_points);
printf("GPU Host-to-device = %f ms \n", time_array[0]);
printf("GPU execution time = %f ms \n", time_array[1]);
printf("GPU Device-to-host = %f ms \n", time_array[2]);
printf("Minimum distance (GPU) = %e\n", hmin_dist[0]);
// Compute minimum distance on host to check device computation
gettimeofday(&cpu_start, NULL);
dx = hVx[1] - hVx[0];
dy = hVy[1] - hVy[0];
distance = sqrtf(dx * dx + dy * dy);
for (i = 0; i < num_points; i++) {
for (j = i + 1; j < num_points; j++) {
dx = hVx[j] - hVx[i];
dy = hVy[j] - hVy[i];
Dij = sqrtf(dx * dx + dy * dy);
if (distance > Dij)
distance = Dij;
}
}
gettimeofday(&cpu_stop, NULL);
time_array[3] = 1000 * (cpu_stop.tv_sec - cpu_start.tv_sec)
+ 0.000001 * (cpu_stop.tv_usec - cpu_start.tv_usec);
printf("CPU execution time = %f ms\n", time_array[3]);
printf("Minimum distance (CPU) = %e\n", distance);
// Free device memory
hipFree(dVx);
hipFree(dVy);
hipFree(dmin_dist);
hipFree(dVd);
// Free host memory
free(hVx);
free(hVy);
free(hmin_dist);
return 0;
}
| 913d0987def3d2b96c7386575ca5df3aa471aaa4.cu | //
// CUDA code to compute minimu distance between n points
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define MAX_POINTS 1048576
#define BLOCK_SIZE 1024
// ----------------------------------------------------------------------------
// Kernel Function to compute distance between all pairs of points
// Input:
// X: X[i] = x-coordinate of the ith point
// Y: Y[i] = y-coordinate of the ith point
// n: number of points
//
// Output:
// D: D[0] = minimum distance
//
__global__ void minimum_distance(float * X, float * Y, float * D, float * Glob,
int n) {
unsigned int i = ((blockIdx.x * blockDim.x) + threadIdx.x);
int j = 0;
if (i < n) {
// complete first stop to initalize!!!!!!
float xFirst = X[i], yFirst = Y[i];
float xComp = X[i + 1], yComp = Y[i + 1];
float sqX = (xComp - xFirst);
float sqY = (yComp - yFirst);
D[i] = sqrtf(sqX * sqX + sqY * sqY);
for (j = i + 1; j < n; j++) {
xComp = X[j];
yComp = Y[j];
float sqX = (xComp - xFirst);
float sqY = (yComp - yFirst);
float distance = sqrtf(sqX * sqX + sqY * sqY);
if (distance < D[i])
D[i] = distance;
}
}
__syncthreads();
if (i < blockDim.x && i < n) {
float sdata;
int tid = threadIdx.x;
int shift = n;
if (blockDim.x < n) {
shift = n / blockDim.x;
sdata = D[tid * shift];
for (j = 1; j < shift; j++) {
if (sdata > D[(tid * shift) + j])
sdata = D[(tid * shift) + j];
}
shift = blockDim.x;
D[tid] = sdata;
}
__syncthreads();
for (unsigned int s = shift / 2; s > 0; s >>= 1) {
if (tid < s) {
if (D[tid] > D[tid + s]) {
D[tid] = D[tid + s];
}
}
__syncthreads();
}
if (i == 0)
*Glob = D[0];
}
}
// ----------------------------------------------------------------------------
// Main program - initializes points and computes minimum distance
// between the points
//
int main(int argc, char* argv[]) {
// Host Data
float * hVx; // host x-coordinate array
float * hVy; // host y-coordinate array
float * hmin_dist; // minimum value on host
// Device Data
float * dVx; // device x-coordinate array
float * dVy; // device x-coordinate array
float * dmin_dist; // minimum value on device
float * dVd;
int i, j, size, num_points, threads, blocks;
float dx, dy, Dij, distance;
unsigned int seed = 0;
cudaEvent_t start, stop; // GPU timing variables
struct timeval cpu_start, cpu_stop; // CPU timing variables
float time_array[10];
// Timing initializations
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Check input
if (argc != 2) {
printf("Use: %s <number of points>\n", argv[0]);
exit(0);
}
if ((num_points = atoi(argv[argc - 1])) > MAX_POINTS) {
printf("Maximum number of points allowed: %d\n", MAX_POINTS);
exit(0);
}
// Allocate host coordinate arrays
size = num_points * sizeof(float);
hVx = (float *) malloc(size);
hVy = (float *) malloc(size);
hmin_dist = (float *) malloc(sizeof(float));
// Initialize points
for (i = 0; i < num_points; i++) {
hVx[i] = (float) (rand_r(&seed)) / (float) (RAND_MAX);
hVy[i] = (float) (rand_r(&seed)) / (float) (RAND_MAX);
}
// Allocate device coordinate arrays
cudaMalloc(&dVx, size);
cudaMalloc(&dVy, size);
cudaMalloc(&dmin_dist, sizeof(float));
cudaMalloc(&dVd, size);
// Copy coordinate arrays from host memory to device memory
cudaEventRecord(start, 0);
cudaMemcpy(dVx, hVx, size, cudaMemcpyHostToDevice);
cudaMemcpy(dVy, hVy, size, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[0]), start, stop);
// Invoke kernel
cudaEventRecord(start, 0);
threads = 256;
blocks = 1;
if (num_points > threads) {
blocks = num_points / threads;
}
minimum_distance<<<blocks,threads>>>(dVx, dVy, dVd, dmin_dist, num_points);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[1]), start, stop);
// Copy result from device memory to host memory
cudaEventRecord(start, 0);
cudaMemcpy(hmin_dist, dmin_dist, sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[2]), start, stop);
printf("Number of Points = %d\n", num_points);
printf("GPU Host-to-device = %f ms \n", time_array[0]);
printf("GPU execution time = %f ms \n", time_array[1]);
printf("GPU Device-to-host = %f ms \n", time_array[2]);
printf("Minimum distance (GPU) = %e\n", hmin_dist[0]);
// Compute minimum distance on host to check device computation
gettimeofday(&cpu_start, NULL);
dx = hVx[1] - hVx[0];
dy = hVy[1] - hVy[0];
distance = sqrtf(dx * dx + dy * dy);
for (i = 0; i < num_points; i++) {
for (j = i + 1; j < num_points; j++) {
dx = hVx[j] - hVx[i];
dy = hVy[j] - hVy[i];
Dij = sqrtf(dx * dx + dy * dy);
if (distance > Dij)
distance = Dij;
}
}
gettimeofday(&cpu_stop, NULL);
time_array[3] = 1000 * (cpu_stop.tv_sec - cpu_start.tv_sec)
+ 0.000001 * (cpu_stop.tv_usec - cpu_start.tv_usec);
printf("CPU execution time = %f ms\n", time_array[3]);
printf("Minimum distance (CPU) = %e\n", distance);
// Free device memory
cudaFree(dVx);
cudaFree(dVy);
cudaFree(dmin_dist);
cudaFree(dVd);
// Free host memory
free(hVx);
free(hVy);
free(hmin_dist);
return 0;
}
|
6e7c56146ecb743ef5080de742e714393c04c096.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice1D.h"
#include "Indice2D.h"
#include "cudaTools.h"
#include <stdio.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void histogramme(int* ptrDevTabData, int *ptrDevTabResult, int tabSize, int dataMax);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static __device__ void reductionIntraThread(int *ptrDevTabData, int tabSize, int *tabSM);
static __device__ void reductionInterBlock(int* tabSM, int* ptrDevTabResult, int dataMax);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void histogramme(int* ptrDevTabData, int *ptrDevTabResult, int tabSize, int dataMax)
{
extern __shared__ int tabSM[];
if(Indice2D::tidLocal() == 0)
{
for(int i=0; i<dataMax; i++)
{
tabSM[i] = 0;
}
}
__syncthreads();
reductionIntraThread(ptrDevTabData, tabSize, tabSM);
__syncthreads();
reductionInterBlock(tabSM, ptrDevTabResult, dataMax);
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void reductionIntraThread(int *ptrDevTabData, int tabSize, int *tabSM)
{
const int NB_THREAD = Indice2D::nbThread();
const int TID = Indice2D::tid();
int s = TID;
while (s < tabSize)
{
atomicAdd(&tabSM[ptrDevTabData[s]], 1);
s += NB_THREAD;
}
}
__device__ void reductionInterBlock(int* tabSM, int* ptrDevTabResult, int dataMax)
{
if(Indice2D::tidLocal() == 0)
{
for(int i=0; i < dataMax; i++)
{
atomicAdd(&ptrDevTabResult[i], tabSM[i]);
}
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 6e7c56146ecb743ef5080de742e714393c04c096.cu | #include "Indice1D.h"
#include "Indice2D.h"
#include "cudaTools.h"
#include <stdio.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void histogramme(int* ptrDevTabData, int *ptrDevTabResult, int tabSize, int dataMax);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static __device__ void reductionIntraThread(int *ptrDevTabData, int tabSize, int *tabSM);
static __device__ void reductionInterBlock(int* tabSM, int* ptrDevTabResult, int dataMax);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void histogramme(int* ptrDevTabData, int *ptrDevTabResult, int tabSize, int dataMax)
{
extern __shared__ int tabSM[];
if(Indice2D::tidLocal() == 0)
{
for(int i=0; i<dataMax; i++)
{
tabSM[i] = 0;
}
}
__syncthreads();
reductionIntraThread(ptrDevTabData, tabSize, tabSM);
__syncthreads();
reductionInterBlock(tabSM, ptrDevTabResult, dataMax);
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void reductionIntraThread(int *ptrDevTabData, int tabSize, int *tabSM)
{
const int NB_THREAD = Indice2D::nbThread();
const int TID = Indice2D::tid();
int s = TID;
while (s < tabSize)
{
atomicAdd(&tabSM[ptrDevTabData[s]], 1);
s += NB_THREAD;
}
}
__device__ void reductionInterBlock(int* tabSM, int* ptrDevTabResult, int dataMax)
{
if(Indice2D::tidLocal() == 0)
{
for(int i=0; i < dataMax; i++)
{
atomicAdd(&ptrDevTabResult[i], tabSM[i]);
}
}
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
e6bc65674700490fb834b01f5279251d9e5a1a4d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <math.h>
#include "rte.h"
#include <pthread.h>
__host__ Info_Stat * populate_info_dev();
Geometry *geom;
Phantom *phan;
Source *beam_src;
complex_double *diag_terms_host;
complex_double *sph_harm;
Info_Stat *info_stat_host;
SHORT nL;
int nTerms;
__host__ int get_vind_phanind_host(int dep, int row, int col){
return ((geom->bounZ + dep) * (geom->nX + 2 * geom->bounX ) * (geom->nY + 2 * geom->bounY ) /* reached the correct layer */ + ( geom->bounY + row)* (geom->nX + 2 * geom->bounX ) + (geom->bounX + col));
}
__host__ Info_Stat * populate_info_dev(){
Info_Stat *info_stat_host;
info_stat_host = (Info_Stat *) malloc (sizeof(Info_Stat));
info_stat_host->nX = geom->nX;
info_stat_host->nY = geom->nY;
info_stat_host->nZ= geom->nZ;
info_stat_host->bounX = geom->bounX;
info_stat_host->bounY = geom->bounY;
info_stat_host->bounZ= geom->bounZ;
info_stat_host->subbounX = ceilf ((geom->sub_thresh)/(geom->delX));
info_stat_host->subbounY = ceilf ((geom->sub_thresh)/(geom->delY));
info_stat_host->subbounZ = ceilf ((geom->sub_thresh)/(geom->delZ));
info_stat_host->delX = geom->delX;
info_stat_host->delY = geom->delY;
info_stat_host->delZ= geom->delZ;
info_stat_host->x_min = geom->x_min;
info_stat_host->y_min = geom->y_min;
info_stat_host->z_min = geom->z_min;
info_stat_host->x_max = geom->x_max;
info_stat_host->y_max = geom->y_max;
info_stat_host->z_max = geom->z_max;
info_stat_host->sub_thresh = geom->sub_thresh;
info_stat_host->prop_thresh = geom->prop_thresh;
info_stat_host->sub_vox = geom->sub_vox;
info_stat_host->self_sub_vox = geom->self_sub_vox;
info_stat_host->g = phan->g;
info_stat_host->n = phan->n;
info_stat_host->no_tiss = phan->no_tiss;
info_stat_host->cm = C/phan->n;
info_stat_host->no_vox = geom->no_vox;
int i;
for(i=0; i < phan->no_tiss; i++){
info_stat_host->mu_tot[i] = phan->mu_abs[i] + phan->mu_sc[i];
info_stat_host->mu_sc[i] = phan->mu_sc[i];
}
return info_stat_host;
}
int main(int argc, char** argv )
{
time_t start_time, end_time;
time(&start_time);
dictionary *ini;
complex_double *gbar1,*gbar0, *gbar, *dgbara, *dgbars;
complex_double *W, *W1, *out, *out2, *out3, *src, *tmp1, *df, *src2, *out4;
flt_doub *g;
flt_doub *grada,*grads;
flt_doub delg;
int n;
int j,jj;
int jnk;
int tiss_idx;
int iip, jjp, kkp;
int r_ind,i;
int size;
if (argc != 2) {
printf("\n InverseRTE file.par\n");
printf(" file.par is the parameter file.\n\n");
exit(1);
}
// Load in the initialization file
ini = iniparser_load(argv[1]);
// Set up the geometry, phantom, etc
printf("Loading in geometry and phantom information...\n");
geom = LoadGeometry(ini);
phan = LoadPhantom(ini,1);
beam_src = LoadSource(ini);
printf("Done reading source information \n");
nL = iniparser_getint(ini,"Algorithm:nL",-1);
nTerms = iniparser_getint(ini,"Algorithm:nTerms",1);
int jnk2;
FILE *gFile;
if ((gFile = fopen(iniparser_getstring(ini,"Runtime:gFile",NULL),"r")) == NULL){
printf("Error in opening gfile. Exiting \n");
exit(0);
}
//printf("%s is gFile \n", gFile);
fread(&jnk,sizeof(int),1,gFile);
fread(&jnk2,sizeof(int),1,gFile);
printf("Done reading gfile integers %d and %d \n", jnk, jnk2);
g = (flt_doub *) malloc(sizeof(flt_doub) * geom->nX * geom->nY);
fread(g,sizeof(flt_doub),geom->nX * geom->nY,gFile);
fclose(gFile);
info_stat_host = populate_info_dev();
size = (nL+1)*(nL+1)* geom->no_vox;
printf("Generating the spherical harmonic terms \n");
generate_sph_harm_terms();
unsigned int timer;
int cnt,k;
flt_doub tmp;
grada = (flt_doub *)malloc(sizeof(flt_doub)*phan->no_tiss);
grads = (flt_doub *)malloc(sizeof(flt_doub)* phan->no_tiss);
int r_ind_phan;
W = alloc_dist();
out2 = alloc_dist();
out3 = alloc_dist();
out4 = alloc_dist();
src = alloc_dist();
src2 = alloc_dist();
tmp1 = alloc_dist();
int abs_ind, sc_ind;
int max_ind = 10;
flt_doub *mua, *mus;
mua = (flt_doub *) malloc(sizeof(flt_doub)*max_ind);
mus = (flt_doub *) malloc(sizeof(flt_doub)*max_ind);
flt_doub *fisher_mat;
fisher_mat = (flt_doub*) malloc(sizeof(flt_doub)*4);
flt_doub *x,*y,*z;
x = (flt_doub *)malloc(sizeof(flt_doub)*geom->nX);
y = (flt_doub *)malloc(sizeof(flt_doub)*geom->nY);
z = (flt_doub *)malloc(sizeof(flt_doub)*geom->nZ);
flt_doub obj_fov_size;
obj_fov_size = geom->x_max - geom->x_min;
for(i=0; i<geom->nX; i++){
x[i] = i*obj_fov_size/(geom->nX) - obj_fov_size/2 + obj_fov_size/(2*geom->nX);
y[i] = i*obj_fov_size/(geom->nY) - obj_fov_size/2 + obj_fov_size/(2*geom->nY); // Change both of these to obj->nX-1 and obj->nY-1, respectively?
z[i] = i*obj_fov_size/(geom->nZ) + obj_fov_size/(2*geom->nZ); // Change both of these to obj->nX-1 and obj->nY-1, respectively?
}
FILE *abs_fid, *sc_fid, *grada_fid, *grads_fid, *res_fid, *fim_fid, *snr_fid;
abs_fid = fopen("abs_co_fim.dat","w");
sc_fid = fopen("sc_co_fim.dat","w");
grada_fid = fopen("grada_terms_fim.dat","w");
grads_fid = fopen("grads_terms_fim.dat","w");
res_fid = fopen("res_terms_fim.dat","w");
fim_fid = fopen("fim_terms.dat", "w");
snr_fid = fopen("snr_terms.dat", "w");
for(sc_ind = 0; sc_ind < max_ind; sc_ind++){
mus[sc_ind] = sc_ind*0.25 + 0.25;
}
for(abs_ind=0; abs_ind< max_ind; abs_ind++){
mua[abs_ind] = abs_ind*0.0025 + 0.0025;
}
fwrite(mua, sizeof(flt_doub),max_ind, abs_fid);
fwrite(mus, sizeof(flt_doub),max_ind, sc_fid);
int rad_ind, max_rad_ind;
float* rad_sig;
max_rad_ind = 5;
rad_sig = (float *) malloc(sizeof(float)*max_rad_ind);
for (i=0; i< max_rad_ind; i++){
rad_sig[i] = (i+1)*obj_fov_size/(2*geom->nX);
}
rad_ind = 2;
int loc_indx, loc_indy, loc_indz;
int hyp_no;
flt_doub small_sig = 0.1;
flt_doub *snr_sq;
int initz = 2;
snr_sq = (flt_doub* ) malloc(sizeof(flt_doub)*(geom->nZ-initz));
int ind;
for(loc_indz = initz; loc_indz < 16; loc_indz = loc_indz++){
// for(loc_indy = 0; loc_indy < geom->nY; loc_indy++){
// for(loc_indx = 0; loc_indx < geom->nX; loc_indx++){
loc_indy = geom->nY/2;
loc_indx = geom->nX/2;
for (i=0; i<geom->nZ; i++){
for (j=0; j<geom->nY; j++){
for (k=0; k<geom->nX; k++){
r_ind = i*geom->nX*geom->nY + j*geom->nX + k;
if(((x[k] - x[loc_indx])*(x[k]-x[loc_indx]) + (y[j]-y[loc_indy])*(y[j]-y[loc_indy]) + (z[i] - z[loc_indz])*(z[i] - z[loc_indz])) < rad_sig[rad_ind]*rad_sig[rad_ind])
phan->tiss_type[r_ind] = 2;
else
phan->tiss_type[r_ind] = 1;
}}}
for(hyp_no=1; hyp_no >= 0; hyp_no--){
for(tiss_idx = 1; tiss_idx < phan->no_tiss; tiss_idx++){
if(tiss_idx == 2){
phan->mu_abs[tiss_idx] = 0.25 + small_sig*hyp_no; // mua[abs_ind];
phan->mu_sc[tiss_idx] = 1.0;
}
}
populate_info_dev();
generate_diag_terms_host();
generate_source_beam(src);
copy_dist(src,out2);
copy_dist(src,W);
Neumann(W,out2,1);
if(hyp_no == 1){
printf("Generating signal present image\n");
gbar1 = generate_ref_image(out2);
}
else{
printf("Generating signal absent image \n");
gbar0 = generate_ref_image(out2);
}
}
snr_sq[loc_indz-initz] = 0.0;
printf("Computing the SNR \n");
for (ind=0; ind<geom->nX*geom->nY; ind++){
snr_sq[loc_indz-initz] += ((gbar1[ind].real() - gbar0[ind].real())*(gbar1[ind].real() - gbar0[ind].real()))/(gbar0[ind].real());
// if(fabs(gbar1[ind].real() - gbar0[ind].real())>1e-15)
// printf("%lf %lf \n ", gbar1[ind].real(), gbar0[ind].real());
if(ind == geom->nX*(geom->nY/2) + geom->nX/2 )
printf("%f is gbar1 and %f is gbar0 for voxel %d \n", gbar1[ind].real(), gbar0[ind].real(), ind);
}
printf("%e is snr_sq for %d as z index\n", snr_sq[loc_indz-initz], loc_indz);
#if 0
//printf("Compute the gradient with respect to mua");
for (tiss_idx = 1; tiss_idx < phan->no_tiss; tiss_idx++) {
copy_dist(out2,out3);
for (iip=0; iip<geom->nZ; iip++) {
for (jjp=0; jjp<geom->nY; jjp++) {
for (kkp=0; kkp<geom->nX; kkp++) {
r_ind = (iip + geom->bounZ)* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + (jjp + geom->bounY)* (geom->nX + 2*geom->bounX) + (kkp + geom->bounX);
if(!(iip == loc_indz && jjp == loc_indy && kkp == loc_indx)){
for (n=0;n<(nL+1)*(nL+1);n++) {
out3[VOX_TO_SPIND(r_ind, n,(nL+1)*(nL+1))] = 0+0*I;
}
}
else
printf("%d %d %d \n", iip, jjp, kkp);
}}}
scale_dist(out3,-1.0*C);
copy_dist(out3,src);
Neumann(src,out3,1);
dgbara = generate_trans_image(out3,0);
grada[tiss_idx] = 0.0;
for (j = 0;j<geom->nX*geom->nY;j++) {
grada[tiss_idx] = grada[tiss_idx] - 2*(g[j]-gbar[j].real())*dgbara[j].real();
}
}
//printf("Compute the gradient with respect to mus \n");
for (tiss_idx = 1; tiss_idx < phan->no_tiss ; tiss_idx++) {
copy_dist(out2,out3);
scale_dist(out3,-1.0*C);
copy_dist(out2,tmp1);
PropScatmu1(geom,phan,nL,tmp1);
add_dist(tmp1,out3,out3);
for (iip=0; iip<geom->nZ; iip++) {
for (jjp=0; jjp<geom->nY; jjp++) {
for (kkp=0; kkp<geom->nX; kkp++) {
if(!(iip == loc_indz && jjp == loc_indy && kkp == loc_indx)){
r_ind = (iip + geom->bounZ)* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + (jjp + geom->bounY)* (geom->nX + 2*geom->bounX) + (kkp + geom->bounX);
for (n=0;n<(nL+1)*(nL+1);n++) {
out3[VOX_TO_SPIND(r_ind, n, (nL+1)*(nL+1))] = 0+0*I;
}
}
}}}
src = alloc_dist();
copy_dist(out3,src);
Neumann(src,out3,1);
dgbars = generate_trans_image(out3,0);
grads[tiss_idx] = 0.0;
for (j = 0;j<geom->nX*geom->nY;j++) {
grads[tiss_idx] = grads[tiss_idx] - (g[j]-gbar[j].real())*dgbars[j].real();
}
}
memset(fisher_mat, 0, sizeof(flt_doub)*4);
for(j = 0;j<geom->nX*geom->nY;j++){
if(gbar[j].real()){
fisher_mat[0] += (1/gbar[j].real()) * dgbars[j].real()*dgbars[j].real();
fisher_mat[1] += (1/gbar[j].real()) * dgbars[j].real()*dgbara[j].real();
fisher_mat[2] += (1/gbar[j].real()) * dgbars[j].real()*dgbara[j].real();
fisher_mat[3] += (1/gbar[j].real()) * dgbara[j].real()*dgbara[j].real();
}
}
fwrite(fisher_mat, sizeof(flt_doub), 4, fim_fid);
fwrite(grads, sizeof(flt_doub), 1, grads_fid);
fwrite(grada, sizeof(flt_doub), 1, grada_fid);
delg = 0.0;
for (i=0;i<geom->nX*geom->nY;i++) {
delg = delg + (gbar[i].real()-g[i])*(gbar[i].real()-g[i]);
}
printf("Residual = %e\n",(delg));
fwrite(&delg,sizeof(flt_doub),1,res_fid);
}
}
#endif
}
fwrite(snr_sq, sizeof(flt_doub),geom->nZ-initz, snr_fid);
fclose(snr_fid);
fclose(abs_fid);
fclose(sc_fid);
fclose(grada_fid);
fclose(grads_fid);
fclose(res_fid);
fclose(fim_fid);
free(g);
free(beam_src);
free(phan);
free(geom);
free(gbar);
free(dgbara);
free(dgbars);
free(src);
free(src2);
free(out);
free(out2);
free(out3);
free(out4);
free(fisher_mat);
iniparser_freedict(ini);
time(&end_time);
printf("\n*------------------------------------------*\n");
printf("\nThe total time taken by the code = %d sec \n", end_time - start_time);
printf("\n*------------------------------------------*\n");
return(0);
}
| e6bc65674700490fb834b01f5279251d9e5a1a4d.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <math.h>
#include "rte.h"
#include <pthread.h>
__host__ Info_Stat * populate_info_dev();
Geometry *geom;
Phantom *phan;
Source *beam_src;
complex_double *diag_terms_host;
complex_double *sph_harm;
Info_Stat *info_stat_host;
SHORT nL;
int nTerms;
__host__ int get_vind_phanind_host(int dep, int row, int col){
return ((geom->bounZ + dep) * (geom->nX + 2 * geom->bounX ) * (geom->nY + 2 * geom->bounY ) /* reached the correct layer */ + ( geom->bounY + row)* (geom->nX + 2 * geom->bounX ) + (geom->bounX + col));
}
__host__ Info_Stat * populate_info_dev(){
Info_Stat *info_stat_host;
info_stat_host = (Info_Stat *) malloc (sizeof(Info_Stat));
info_stat_host->nX = geom->nX;
info_stat_host->nY = geom->nY;
info_stat_host->nZ= geom->nZ;
info_stat_host->bounX = geom->bounX;
info_stat_host->bounY = geom->bounY;
info_stat_host->bounZ= geom->bounZ;
info_stat_host->subbounX = ceilf ((geom->sub_thresh)/(geom->delX));
info_stat_host->subbounY = ceilf ((geom->sub_thresh)/(geom->delY));
info_stat_host->subbounZ = ceilf ((geom->sub_thresh)/(geom->delZ));
info_stat_host->delX = geom->delX;
info_stat_host->delY = geom->delY;
info_stat_host->delZ= geom->delZ;
info_stat_host->x_min = geom->x_min;
info_stat_host->y_min = geom->y_min;
info_stat_host->z_min = geom->z_min;
info_stat_host->x_max = geom->x_max;
info_stat_host->y_max = geom->y_max;
info_stat_host->z_max = geom->z_max;
info_stat_host->sub_thresh = geom->sub_thresh;
info_stat_host->prop_thresh = geom->prop_thresh;
info_stat_host->sub_vox = geom->sub_vox;
info_stat_host->self_sub_vox = geom->self_sub_vox;
info_stat_host->g = phan->g;
info_stat_host->n = phan->n;
info_stat_host->no_tiss = phan->no_tiss;
info_stat_host->cm = C/phan->n;
info_stat_host->no_vox = geom->no_vox;
int i;
for(i=0; i < phan->no_tiss; i++){
info_stat_host->mu_tot[i] = phan->mu_abs[i] + phan->mu_sc[i];
info_stat_host->mu_sc[i] = phan->mu_sc[i];
}
return info_stat_host;
}
int main(int argc, char** argv )
{
time_t start_time, end_time;
time(&start_time);
dictionary *ini;
complex_double *gbar1,*gbar0, *gbar, *dgbara, *dgbars;
complex_double *W, *W1, *out, *out2, *out3, *src, *tmp1, *df, *src2, *out4;
flt_doub *g;
flt_doub *grada,*grads;
flt_doub delg;
int n;
int j,jj;
int jnk;
int tiss_idx;
int iip, jjp, kkp;
int r_ind,i;
int size;
if (argc != 2) {
printf("\n InverseRTE file.par\n");
printf(" file.par is the parameter file.\n\n");
exit(1);
}
// Load in the initialization file
ini = iniparser_load(argv[1]);
// Set up the geometry, phantom, etc
printf("Loading in geometry and phantom information...\n");
geom = LoadGeometry(ini);
phan = LoadPhantom(ini,1);
beam_src = LoadSource(ini);
printf("Done reading source information \n");
nL = iniparser_getint(ini,"Algorithm:nL",-1);
nTerms = iniparser_getint(ini,"Algorithm:nTerms",1);
int jnk2;
FILE *gFile;
if ((gFile = fopen(iniparser_getstring(ini,"Runtime:gFile",NULL),"r")) == NULL){
printf("Error in opening gfile. Exiting \n");
exit(0);
}
//printf("%s is gFile \n", gFile);
fread(&jnk,sizeof(int),1,gFile);
fread(&jnk2,sizeof(int),1,gFile);
printf("Done reading gfile integers %d and %d \n", jnk, jnk2);
g = (flt_doub *) malloc(sizeof(flt_doub) * geom->nX * geom->nY);
fread(g,sizeof(flt_doub),geom->nX * geom->nY,gFile);
fclose(gFile);
info_stat_host = populate_info_dev();
size = (nL+1)*(nL+1)* geom->no_vox;
printf("Generating the spherical harmonic terms \n");
generate_sph_harm_terms();
unsigned int timer;
int cnt,k;
flt_doub tmp;
grada = (flt_doub *)malloc(sizeof(flt_doub)*phan->no_tiss);
grads = (flt_doub *)malloc(sizeof(flt_doub)* phan->no_tiss);
int r_ind_phan;
W = alloc_dist();
out2 = alloc_dist();
out3 = alloc_dist();
out4 = alloc_dist();
src = alloc_dist();
src2 = alloc_dist();
tmp1 = alloc_dist();
int abs_ind, sc_ind;
int max_ind = 10;
flt_doub *mua, *mus;
mua = (flt_doub *) malloc(sizeof(flt_doub)*max_ind);
mus = (flt_doub *) malloc(sizeof(flt_doub)*max_ind);
flt_doub *fisher_mat;
fisher_mat = (flt_doub*) malloc(sizeof(flt_doub)*4);
flt_doub *x,*y,*z;
x = (flt_doub *)malloc(sizeof(flt_doub)*geom->nX);
y = (flt_doub *)malloc(sizeof(flt_doub)*geom->nY);
z = (flt_doub *)malloc(sizeof(flt_doub)*geom->nZ);
flt_doub obj_fov_size;
obj_fov_size = geom->x_max - geom->x_min;
for(i=0; i<geom->nX; i++){
x[i] = i*obj_fov_size/(geom->nX) - obj_fov_size/2 + obj_fov_size/(2*geom->nX);
y[i] = i*obj_fov_size/(geom->nY) - obj_fov_size/2 + obj_fov_size/(2*geom->nY); // Change both of these to obj->nX-1 and obj->nY-1, respectively?
z[i] = i*obj_fov_size/(geom->nZ) + obj_fov_size/(2*geom->nZ); // Change both of these to obj->nX-1 and obj->nY-1, respectively?
}
FILE *abs_fid, *sc_fid, *grada_fid, *grads_fid, *res_fid, *fim_fid, *snr_fid;
abs_fid = fopen("abs_co_fim.dat","w");
sc_fid = fopen("sc_co_fim.dat","w");
grada_fid = fopen("grada_terms_fim.dat","w");
grads_fid = fopen("grads_terms_fim.dat","w");
res_fid = fopen("res_terms_fim.dat","w");
fim_fid = fopen("fim_terms.dat", "w");
snr_fid = fopen("snr_terms.dat", "w");
for(sc_ind = 0; sc_ind < max_ind; sc_ind++){
mus[sc_ind] = sc_ind*0.25 + 0.25;
}
for(abs_ind=0; abs_ind< max_ind; abs_ind++){
mua[abs_ind] = abs_ind*0.0025 + 0.0025;
}
fwrite(mua, sizeof(flt_doub),max_ind, abs_fid);
fwrite(mus, sizeof(flt_doub),max_ind, sc_fid);
int rad_ind, max_rad_ind;
float* rad_sig;
max_rad_ind = 5;
rad_sig = (float *) malloc(sizeof(float)*max_rad_ind);
for (i=0; i< max_rad_ind; i++){
rad_sig[i] = (i+1)*obj_fov_size/(2*geom->nX);
}
rad_ind = 2;
int loc_indx, loc_indy, loc_indz;
int hyp_no;
flt_doub small_sig = 0.1;
flt_doub *snr_sq;
int initz = 2;
snr_sq = (flt_doub* ) malloc(sizeof(flt_doub)*(geom->nZ-initz));
int ind;
for(loc_indz = initz; loc_indz < 16; loc_indz = loc_indz++){
// for(loc_indy = 0; loc_indy < geom->nY; loc_indy++){
// for(loc_indx = 0; loc_indx < geom->nX; loc_indx++){
loc_indy = geom->nY/2;
loc_indx = geom->nX/2;
for (i=0; i<geom->nZ; i++){
for (j=0; j<geom->nY; j++){
for (k=0; k<geom->nX; k++){
r_ind = i*geom->nX*geom->nY + j*geom->nX + k;
if(((x[k] - x[loc_indx])*(x[k]-x[loc_indx]) + (y[j]-y[loc_indy])*(y[j]-y[loc_indy]) + (z[i] - z[loc_indz])*(z[i] - z[loc_indz])) < rad_sig[rad_ind]*rad_sig[rad_ind])
phan->tiss_type[r_ind] = 2;
else
phan->tiss_type[r_ind] = 1;
}}}
for(hyp_no=1; hyp_no >= 0; hyp_no--){
for(tiss_idx = 1; tiss_idx < phan->no_tiss; tiss_idx++){
if(tiss_idx == 2){
phan->mu_abs[tiss_idx] = 0.25 + small_sig*hyp_no; // mua[abs_ind];
phan->mu_sc[tiss_idx] = 1.0;
}
}
populate_info_dev();
generate_diag_terms_host();
generate_source_beam(src);
copy_dist(src,out2);
copy_dist(src,W);
Neumann(W,out2,1);
if(hyp_no == 1){
printf("Generating signal present image\n");
gbar1 = generate_ref_image(out2);
}
else{
printf("Generating signal absent image \n");
gbar0 = generate_ref_image(out2);
}
}
snr_sq[loc_indz-initz] = 0.0;
printf("Computing the SNR \n");
for (ind=0; ind<geom->nX*geom->nY; ind++){
snr_sq[loc_indz-initz] += ((gbar1[ind].real() - gbar0[ind].real())*(gbar1[ind].real() - gbar0[ind].real()))/(gbar0[ind].real());
// if(fabs(gbar1[ind].real() - gbar0[ind].real())>1e-15)
// printf("%lf %lf \n ", gbar1[ind].real(), gbar0[ind].real());
if(ind == geom->nX*(geom->nY/2) + geom->nX/2 )
printf("%f is gbar1 and %f is gbar0 for voxel %d \n", gbar1[ind].real(), gbar0[ind].real(), ind);
}
printf("%e is snr_sq for %d as z index\n", snr_sq[loc_indz-initz], loc_indz);
#if 0
//printf("Compute the gradient with respect to mua");
for (tiss_idx = 1; tiss_idx < phan->no_tiss; tiss_idx++) {
copy_dist(out2,out3);
for (iip=0; iip<geom->nZ; iip++) {
for (jjp=0; jjp<geom->nY; jjp++) {
for (kkp=0; kkp<geom->nX; kkp++) {
r_ind = (iip + geom->bounZ)* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + (jjp + geom->bounY)* (geom->nX + 2*geom->bounX) + (kkp + geom->bounX);
if(!(iip == loc_indz && jjp == loc_indy && kkp == loc_indx)){
for (n=0;n<(nL+1)*(nL+1);n++) {
out3[VOX_TO_SPIND(r_ind, n,(nL+1)*(nL+1))] = 0+0*I;
}
}
else
printf("%d %d %d \n", iip, jjp, kkp);
}}}
scale_dist(out3,-1.0*C);
copy_dist(out3,src);
Neumann(src,out3,1);
dgbara = generate_trans_image(out3,0);
grada[tiss_idx] = 0.0;
for (j = 0;j<geom->nX*geom->nY;j++) {
grada[tiss_idx] = grada[tiss_idx] - 2*(g[j]-gbar[j].real())*dgbara[j].real();
}
}
//printf("Compute the gradient with respect to mus \n");
for (tiss_idx = 1; tiss_idx < phan->no_tiss ; tiss_idx++) {
copy_dist(out2,out3);
scale_dist(out3,-1.0*C);
copy_dist(out2,tmp1);
PropScatmu1(geom,phan,nL,tmp1);
add_dist(tmp1,out3,out3);
for (iip=0; iip<geom->nZ; iip++) {
for (jjp=0; jjp<geom->nY; jjp++) {
for (kkp=0; kkp<geom->nX; kkp++) {
if(!(iip == loc_indz && jjp == loc_indy && kkp == loc_indx)){
r_ind = (iip + geom->bounZ)* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + (jjp + geom->bounY)* (geom->nX + 2*geom->bounX) + (kkp + geom->bounX);
for (n=0;n<(nL+1)*(nL+1);n++) {
out3[VOX_TO_SPIND(r_ind, n, (nL+1)*(nL+1))] = 0+0*I;
}
}
}}}
src = alloc_dist();
copy_dist(out3,src);
Neumann(src,out3,1);
dgbars = generate_trans_image(out3,0);
grads[tiss_idx] = 0.0;
for (j = 0;j<geom->nX*geom->nY;j++) {
grads[tiss_idx] = grads[tiss_idx] - (g[j]-gbar[j].real())*dgbars[j].real();
}
}
memset(fisher_mat, 0, sizeof(flt_doub)*4);
for(j = 0;j<geom->nX*geom->nY;j++){
if(gbar[j].real()){
fisher_mat[0] += (1/gbar[j].real()) * dgbars[j].real()*dgbars[j].real();
fisher_mat[1] += (1/gbar[j].real()) * dgbars[j].real()*dgbara[j].real();
fisher_mat[2] += (1/gbar[j].real()) * dgbars[j].real()*dgbara[j].real();
fisher_mat[3] += (1/gbar[j].real()) * dgbara[j].real()*dgbara[j].real();
}
}
fwrite(fisher_mat, sizeof(flt_doub), 4, fim_fid);
fwrite(grads, sizeof(flt_doub), 1, grads_fid);
fwrite(grada, sizeof(flt_doub), 1, grada_fid);
delg = 0.0;
for (i=0;i<geom->nX*geom->nY;i++) {
delg = delg + (gbar[i].real()-g[i])*(gbar[i].real()-g[i]);
}
printf("Residual = %e\n",(delg));
fwrite(&delg,sizeof(flt_doub),1,res_fid);
}
}
#endif
}
fwrite(snr_sq, sizeof(flt_doub),geom->nZ-initz, snr_fid);
fclose(snr_fid);
fclose(abs_fid);
fclose(sc_fid);
fclose(grada_fid);
fclose(grads_fid);
fclose(res_fid);
fclose(fim_fid);
free(g);
free(beam_src);
free(phan);
free(geom);
free(gbar);
free(dgbara);
free(dgbars);
free(src);
free(src2);
free(out);
free(out2);
free(out3);
free(out4);
free(fisher_mat);
iniparser_freedict(ini);
time(&end_time);
printf("\n*------------------------------------------*\n");
printf("\nThe total time taken by the code = %d sec \n", end_time - start_time);
printf("\n*------------------------------------------*\n");
return(0);
}
|
f826abf6908ebea1509f0c7225c99f2f8d1e962b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "tools.h"
#include <time.h>
#define N 172032 //131072
#define M 672
#define THRESHOLD 100000
#define GSIZE 1000
#define PNTSIZE 300
#define AUTO_END 10
#define ORTHO 80000
#define RADIUS 1500
#define RADSCALE 1000000000
#define LINE_SIZE 0.3
#define NCOMPASS 360
#define d2r(deg) (deg * PI / 180.0)
#define kill(s) (s->dead = true)
#define PI 3.14159265358979323846
#define MAPX 1273623389
#define MAPY 363706170
#define True 1
#define False 0
#define DELIM "\t"
int nnum, bnum, fnum;
int point_mode = 0;
int width = 800, height = 800;
signal sig[N];
int compass[NCOMPASS];
int count[NCOMPASS];
int selection_mode = 0; //generator: 0, detector: 1
clock_t total_testing_time = 0;
node *Nodes;
polygon *Buildings;
polygon *Forests;
my_t mapx = 0; //1273623389;
my_t mapy = 0; //363706170;
line ga;
int toggle[10];
void load_file();
void clean_up();
void initialize();
__global__ void signal_calculation(signal *signal_list,
const node *node_list, const polygon *building_list, const polygon *forest_list, const line *gtoa) {
my_t gx = gtoa->x1;
my_t gy = gtoa->y1;
my_t ax = gtoa->x2;
my_t ay = gtoa->y2;
my_t zx, zy;
int i = threadIdx.x + (blockIdx.x * blockDim.x);
my_t px, py, test, tdist = 0, kdist = 0;
signal sigref, sigblk;
bool possible;
signal *si = &signal_list[i];
int autoend = -1;
while (!si->dead && ++autoend < AUTO_END) {
si->d = si->vy*si->x - si->vx*si->y;
// case of detection
possible = false;
my_t d = (-si->vy*ax + si->vx*ay + si->d) / RADSCALE;
if (-RADIUS <= d && d <= RADIUS) {
if (si->vx != 0) {
px = ax + (d*si->vy / RADSCALE);
test = (px - si->x) ^ si->vx;
}
else {
py = ay - (d*si->vx / RADSCALE);
test = (py - si->y) ^ si->vy;
}
if (test > 0) {
possible = true;
zx = (si->x - ax);
zy = (si->y - ay);
tdist = zx*zx + zy*zy;
}
}
// reflection test
int n1, n2;
int j, k;
my_t test, kdist;
my_t lx1, ly1, lx2, ly2;
my_t Tnx, Tny, Td, pr;
sigref.dead = true;
int eid;
for (j = 0; j < gtoa->bnum; j++) {
// calculate reflection
const polygon *p = &building_list[j];
d = ((-si->vy)*p->x + (si->vx)*p->y + si->d) / RADSCALE;
pr = p->radius;
//possibly blocked if...
if (-pr <= d && d <= pr)
{
for (k = 0; k < p->isize - 1; k++)
{
eid = 100 * i + k;
if (si->eid == eid) continue;
n1 = p->inodes[k];
n2 = p->inodes[k + 1];
lx1 = node_list[n1].x;
ly1 = node_list[n1].y;
lx2 = node_list[n2].x;
ly2 = node_list[n2].y;
Tnx = -si->vy;
Tny = si->vx;
Td = -(-si->vy*si->x + si->vx*si->y);
my_t tb = Tnx*(lx2 - lx1) + Tny*(ly2 - ly1);
if (tb == 0) { // parallel
continue;
}
my_t t = -(Tnx*lx1 + Tny*ly1 + Td);
if (t == 0 || t == tb) {
continue;
}
if ((0 < t && t < tb) || (tb < t && t < 0)) {
my_t px = lx1 + t*(lx2 - lx1) / tb;
my_t py = ly1 + t*(ly2 - ly1) / tb;
if (si->vx != 0) {
test = (px - si->x) ^ si->vx;
}
else {
test = (py - si->y) ^ si->vy;
}
if (test > 0) {
zx = (si->x - px);
zy = (si->y - py);
kdist = zx*zx + zy*zy;
if (kdist < 10) continue;
if (sigref.dead || sigref.ss > kdist) { //if marked as alive
my_t lnx = -(ly2 - ly1);
my_t lny = (lx2 - lx1);
my_t nv = lnx*si->vx + lny*si->vy;
sigref.x = px;
sigref.y = py;
sigref.vx = si->vx - 2 * nv * lnx / (lnx*lnx + lny*lny);
sigref.vy = si->vy - 2 * nv * lny / (lnx*lnx + lny*lny);
sigref.ss = kdist;
sigref.eid = eid;
sigref.dead = false;
}
}
}
}
}
}
// blocking test
sigblk.dead = false;
for (i = 0; i < gtoa->fnum; i++) {
// calculate reflection
const polygon *p = &forest_list[i];
d = ((-si->vy)*p->x + (si->vx)*p->y + si->d) / RADSCALE;
pr = p->radius;
//possibly blocked if...
if (-pr <= d && d <= pr)
{
for (k = 0; k < p->isize - 1; k++)
{
n1 = p->inodes[k];
n2 = p->inodes[k + 1];
lx1 = node_list[n1].x;
ly1 = node_list[n1].y;
lx2 = node_list[n2].x;
ly2 = node_list[n2].y;
Tnx = -si->vy;
Tny = si->vx;
Td = -(-si->vy*si->x + si->vx*si->y);//sigin->d;
// p' = p1 + t(p2-p1), T(dot)p' = 0
// t = -(T(dot)p1) / (T(dot)(p2 - p1))
my_t tb = Tnx*(lx2 - lx1) + Tny*(ly2 - ly1);
if (tb == 0) { // parallel
continue;
}
my_t t = -(Tnx*lx1 + Tny*ly1 + Td);
if (t == 0 || t == tb) continue;
if ((0 < t && t < tb) || (tb < t && t < 0)) {
my_t px = lx1 + t*(lx2 - lx1) / tb;
my_t py = ly1 + t*(ly2 - ly1) / tb;
if (si->vx != 0) {
test = (px - si->x) ^ si->vx;
}
else {
test = (py - si->y) ^ si->vy;
}
if (test > 0) {
zx = (si->x - px);
zy = (si->y - py);
kdist = zx*zx + zy*zy;
if (!sigblk.dead || sigblk.ss > kdist) { //if marked as alive
//printf("kdist = %lld\n", kdist);
sigblk.x = px;
sigblk.y = py;
sigblk.ss = kdist;
sigblk.dead = true;
}
}
}
}
}
}
/*
if (sigblk.dead) {
if (possible && tdist < sigblk.ss) {
printf("possible!\n");
printf("tdist = %lld ", tdist);
printf("sigblk.ss = %lld\n", sigblk.ss);
si->ss += sqrt((float)tdist);
break;
}
kill(si);
break;
}
if (possible) {
si->ss += sqrt((float)tdist);
break;
}
kill(si);
break;
*/
if (!sigref.dead) {
if (sigblk.dead) {
if (possible && tdist < sigref.ss && tdist < sigblk.ss) {
si->ss += sqrt((float)tdist);
break;
}
if (sigref.ss < sigblk.ss) {
sigref.ss = sqrt(float(sigref.ss));
sigref.ss += si->ss;
*si = sigref;
continue;
}
else {
kill(si);
break;
}
}
else {
if (possible && tdist < sigref.ss) {
si->ss += sqrt((float)tdist);
break;
}
else {
sigref.ss = sqrt(float(sigref.ss));
sigref.ss += si->ss;
*si = sigref;
continue;
}
}
}
else {
if (sigblk.dead) {
if (possible && tdist < sigblk.ss) {
si->ss += sqrt((float)tdist);
break;
}
else {
kill(si);
break;
}
}
}
if (possible)
si->ss += sqrt((float)tdist);
else
kill(si);
break;
}
if (autoend == AUTO_END) {
kill(si);
}
}
////////////////// cuda time
signal *dev_signals;
node *dev_nodes;
polygon *dev_buildings, *dev_forests;
line *dev_gtoa;
void freeCudaMemory() {
hipFree(dev_signals);
hipFree(dev_nodes);
hipFree(dev_buildings);
hipFree(dev_forests);
hipFree(dev_gtoa);
}
hipError_t allocateCudaMemory() {
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output).
cudaStatus = hipMalloc((void**)&dev_gtoa, sizeof(line));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_signals, N * sizeof(signal));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_nodes, nnum * sizeof(node));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_nodes, Nodes, nnum * sizeof(node), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_buildings, bnum * sizeof(polygon));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_buildings, Buildings, bnum * sizeof(polygon), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_forests, fnum * sizeof(polygon));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_forests, Forests, fnum * sizeof(polygon), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t signalCalcWithCuda()
{
clock_t tic = clock();
hipError_t cudaStatus;
long double r;
for (int i = 0; i < N; i++) {
signal *si = &sig[i];
r = d2r(360.0 * i / (long double)N);
si->x = ga.x1;
si->y = ga.y1;
si->vx = cosl(r) * RADSCALE;
si->vy = sinl(r) * RADSCALE;
si->ss = 0;
si->dead = false;
si->eid = -1;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_signals, &sig, N * sizeof(signal), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_gtoa, &ga, sizeof(line), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
signal_calculation << <M, N / M >> >(dev_signals, dev_nodes, dev_buildings, dev_forests, dev_gtoa);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "signal_calculation launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching signal_calculation!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(sig, dev_signals, N * sizeof(signal), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
clock_t toc = clock();
total_testing_time += toc - tic;
return cudaStatus;
}
void convertToCompass() {
int i, sidx;
double sum;
for (i = 0; i < NCOMPASS; i++) {
compass[i] = 0;
count[i] = 0; //initialzie
}
int deg;
for (i = 0; i < N; i++) {
deg = (int)(atan2(-sig[i].vy, -sig[i].vx) * 180 / PI);
if (deg < 0) deg += 360;
sidx = NCOMPASS * deg / 360;
if (!sig[i].dead) {
compass[sidx] = 1000000000 / sig[i].ss;
count[sidx] = 1;
}
//compass[hidx]
}
for (i = 0; i < NCOMPASS; i++) {
if(count[i] != 0)
compass[i] /= count[i];
}
}
void printOutput(){
fprintf(stdout, "[");
int i;
for (i = 0; i < NCOMPASS; i++) {
if (i == NCOMPASS - 1) {
fprintf(stdout, "%d", compass[i]);
}
else {
fprintf(stdout, "%d,", compass[i]);
}
}
fprintf(stdout, "]");
}
int main(int argc, char* argv[])
{
if (argc == 1) {
fprintf(stderr, "usage: ./prog mapx mapy x1 y1 x2 y2");
return -1;
}
int x, y;
my_t x1, y1, x2, y2;
mapx = atol(argv[1]);
mapy = atol(argv[2]);
x = atoi(argv[3]);
y = atoi(argv[4]);
y = height - y - 1;
x1 = 2 * (x - width*0.5) / width * ORTHO;
y1 = 2 * (y - height*0.5) / height * ORTHO;
x = atoi(argv[5]);
y = atoi(argv[6]);
y = height - y - 1;
x2 = 2 * (x - width*0.5) / width * ORTHO;
y2 = 2 * (y - height*0.5) / height * ORTHO;
initialize();
ga.bnum = bnum;
ga.fnum = fnum;
printf("[");
ga.x1 = x1;
ga.y1 = y1;
ga.x2 = x2;
ga.y2 = y2;
signalCalcWithCuda();
convertToCompass();
printOutput();
printf(",");
ga.x1 = x2;
ga.y1 = y2;
ga.x2 = x1;
ga.y2 = y1;
signalCalcWithCuda();
convertToCompass();
printOutput();
printf("]");
clean_up();
return 0;
}
void initialize() {
load_file();
allocateCudaMemory();
}
void load_file() {
int i, count;
FILE * fp;
char stmp[255];
char *pstr;
char *token;
char *next_ptr;
char *c;
int nidx, bidx, fidx;
int firstline = True;
int isname = True;
int ti;
int tokidx;
my_t mxx, mxy, mix, miy;
fp = fopen("_MapData.txt", "rt");
if (fp != NULL)
{
nidx = bidx = fidx = 0;
fscanf(fp, "i\t%d\t%d\t%d\t%lld\t%lld\n", &nnum, &bnum, &fnum, &mapx, &mapy);
Nodes = (node*)malloc(sizeof(node)*nnum);
Buildings = (polygon*)malloc(sizeof(polygon)*bnum);
Forests = (polygon*)malloc(sizeof(polygon)*fnum);
while (!feof(fp))
{
pstr = fgets(stmp, sizeof(stmp), fp);
if (pstr == NULL) break;
if (pstr[0] == 'n') {
double lat, lon;
sscanf(pstr, "n\t%lf\t%lf", &lat, &lon);
Nodes[nidx].x = (my_t)(lon*1e7 - mapx);
Nodes[nidx].y = (my_t)(lat*1e7 - mapy);
nidx++;
}
if (*pstr == 'b') {
count = 0; //except name tag
for (c = pstr+2; *c != NULL; c++) {
if (*c == '\t') count++;
}
//Buildings[bidx].inodes = (int*)malloc(sizeof(int)*count);
Buildings[bidx].isize = count;
mxx = mxy = -99999;
mix = miy = 99999;
tokidx = 0;
isname = True;
/* get the first token */
token = strtok(pstr + 2, DELIM);
/* walk through other tokens */
while( token != NULL )
{
if (isname) {
isname = False;
token = strtok(NULL, DELIM);
continue;
}
sscanf(token, "%d", &ti);
Buildings[bidx].inodes[tokidx] = ti;
if (mxx < Nodes[ti].x)
mxx = Nodes[ti].x;
if (mxy < Nodes[ti].y)
mxy = Nodes[ti].y;
if (mix > Nodes[ti].x)
mix = Nodes[ti].x;
if (miy > Nodes[ti].y)
miy = Nodes[ti].y;
token = strtok(NULL, DELIM);
tokidx++;
}
Buildings[bidx].x = (mxx + mix) / 2;
Buildings[bidx].y = (mxy + miy) / 2;
Buildings[bidx].radius = sqrtl((long double)((mxx - mix)*(mxx - mix) + (mxy - miy)*(mxy - miy))) / 2;
bidx++;
}
if (*pstr == 'f') {
count = 0;
for (c = pstr+2; *c != NULL; c++) {
if (*c == '\t') count++;
}
//Forests[fidx].inodes = (int*)malloc(sizeof(int)*count);
Forests[fidx].isize = count;
mxx = mxy = -99999;
mix = miy = 99999;
tokidx = 0;
isname = True;
/* get the first token */
token = strtok(pstr + 2, DELIM);
/* walk through other tokens */
while( token != NULL )
{
if (isname) {
isname = False;
token = strtok(NULL, DELIM);
continue;
}
sscanf(token, "%d", &ti);
Forests[fidx].inodes[tokidx] = ti;
if (mxx < Nodes[ti].x)
mxx = Nodes[ti].x;
if (mxy < Nodes[ti].y)
mxy = Nodes[ti].y;
if (mix > Nodes[ti].x)
mix = Nodes[ti].x;
if (miy > Nodes[ti].y)
miy = Nodes[ti].y;
token = strtok(NULL, DELIM);
tokidx++;
}
Forests[fidx].x = (mxx + mix) / 2;
Forests[fidx].y = (mxy + miy) / 2;
Forests[fidx].radius = sqrtl((long double)((mxx - mix)*(mxx - mix) + (mxy - miy)*(mxy - miy))) / 2;
fidx++;
}
}
fclose(fp);
}
else
{
//fprintf(stderr, "File closed\n");
//file not exist
}
}
void clean_up() {
int i;
free(Nodes);
free(Buildings);
free(Forests);
freeCudaMemory();
}
| f826abf6908ebea1509f0c7225c99f2f8d1e962b.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "tools.h"
#include <time.h>
#define N 172032 //131072
#define M 672
#define THRESHOLD 100000
#define GSIZE 1000
#define PNTSIZE 300
#define AUTO_END 10
#define ORTHO 80000
#define RADIUS 1500
#define RADSCALE 1000000000
#define LINE_SIZE 0.3
#define NCOMPASS 360
#define d2r(deg) (deg * PI / 180.0)
#define kill(s) (s->dead = true)
#define PI 3.14159265358979323846
#define MAPX 1273623389
#define MAPY 363706170
#define True 1
#define False 0
#define DELIM "\t"
int nnum, bnum, fnum;
int point_mode = 0;
int width = 800, height = 800;
signal sig[N];
int compass[NCOMPASS];
int count[NCOMPASS];
int selection_mode = 0; //generator: 0, detector: 1
clock_t total_testing_time = 0;
node *Nodes;
polygon *Buildings;
polygon *Forests;
my_t mapx = 0; //1273623389;
my_t mapy = 0; //363706170;
line ga;
int toggle[10];
void load_file();
void clean_up();
void initialize();
__global__ void signal_calculation(signal *signal_list,
const node *node_list, const polygon *building_list, const polygon *forest_list, const line *gtoa) {
my_t gx = gtoa->x1;
my_t gy = gtoa->y1;
my_t ax = gtoa->x2;
my_t ay = gtoa->y2;
my_t zx, zy;
int i = threadIdx.x + (blockIdx.x * blockDim.x);
my_t px, py, test, tdist = 0, kdist = 0;
signal sigref, sigblk;
bool possible;
signal *si = &signal_list[i];
int autoend = -1;
while (!si->dead && ++autoend < AUTO_END) {
si->d = si->vy*si->x - si->vx*si->y;
// case of detection
possible = false;
my_t d = (-si->vy*ax + si->vx*ay + si->d) / RADSCALE;
if (-RADIUS <= d && d <= RADIUS) {
if (si->vx != 0) {
px = ax + (d*si->vy / RADSCALE);
test = (px - si->x) ^ si->vx;
}
else {
py = ay - (d*si->vx / RADSCALE);
test = (py - si->y) ^ si->vy;
}
if (test > 0) {
possible = true;
zx = (si->x - ax);
zy = (si->y - ay);
tdist = zx*zx + zy*zy;
}
}
// reflection test
int n1, n2;
int j, k;
my_t test, kdist;
my_t lx1, ly1, lx2, ly2;
my_t Tnx, Tny, Td, pr;
sigref.dead = true;
int eid;
for (j = 0; j < gtoa->bnum; j++) {
// calculate reflection
const polygon *p = &building_list[j];
d = ((-si->vy)*p->x + (si->vx)*p->y + si->d) / RADSCALE;
pr = p->radius;
//possibly blocked if...
if (-pr <= d && d <= pr)
{
for (k = 0; k < p->isize - 1; k++)
{
eid = 100 * i + k;
if (si->eid == eid) continue;
n1 = p->inodes[k];
n2 = p->inodes[k + 1];
lx1 = node_list[n1].x;
ly1 = node_list[n1].y;
lx2 = node_list[n2].x;
ly2 = node_list[n2].y;
Tnx = -si->vy;
Tny = si->vx;
Td = -(-si->vy*si->x + si->vx*si->y);
my_t tb = Tnx*(lx2 - lx1) + Tny*(ly2 - ly1);
if (tb == 0) { // parallel
continue;
}
my_t t = -(Tnx*lx1 + Tny*ly1 + Td);
if (t == 0 || t == tb) {
continue;
}
if ((0 < t && t < tb) || (tb < t && t < 0)) {
my_t px = lx1 + t*(lx2 - lx1) / tb;
my_t py = ly1 + t*(ly2 - ly1) / tb;
if (si->vx != 0) {
test = (px - si->x) ^ si->vx;
}
else {
test = (py - si->y) ^ si->vy;
}
if (test > 0) {
zx = (si->x - px);
zy = (si->y - py);
kdist = zx*zx + zy*zy;
if (kdist < 10) continue;
if (sigref.dead || sigref.ss > kdist) { //if marked as alive
my_t lnx = -(ly2 - ly1);
my_t lny = (lx2 - lx1);
my_t nv = lnx*si->vx + lny*si->vy;
sigref.x = px;
sigref.y = py;
sigref.vx = si->vx - 2 * nv * lnx / (lnx*lnx + lny*lny);
sigref.vy = si->vy - 2 * nv * lny / (lnx*lnx + lny*lny);
sigref.ss = kdist;
sigref.eid = eid;
sigref.dead = false;
}
}
}
}
}
}
// blocking test
sigblk.dead = false;
for (i = 0; i < gtoa->fnum; i++) {
// calculate reflection
const polygon *p = &forest_list[i];
d = ((-si->vy)*p->x + (si->vx)*p->y + si->d) / RADSCALE;
pr = p->radius;
//possibly blocked if...
if (-pr <= d && d <= pr)
{
for (k = 0; k < p->isize - 1; k++)
{
n1 = p->inodes[k];
n2 = p->inodes[k + 1];
lx1 = node_list[n1].x;
ly1 = node_list[n1].y;
lx2 = node_list[n2].x;
ly2 = node_list[n2].y;
Tnx = -si->vy;
Tny = si->vx;
Td = -(-si->vy*si->x + si->vx*si->y);//sigin->d;
// p' = p1 + t(p2-p1), T(dot)p' = 0
// t = -(T(dot)p1) / (T(dot)(p2 - p1))
my_t tb = Tnx*(lx2 - lx1) + Tny*(ly2 - ly1);
if (tb == 0) { // parallel
continue;
}
my_t t = -(Tnx*lx1 + Tny*ly1 + Td);
if (t == 0 || t == tb) continue;
if ((0 < t && t < tb) || (tb < t && t < 0)) {
my_t px = lx1 + t*(lx2 - lx1) / tb;
my_t py = ly1 + t*(ly2 - ly1) / tb;
if (si->vx != 0) {
test = (px - si->x) ^ si->vx;
}
else {
test = (py - si->y) ^ si->vy;
}
if (test > 0) {
zx = (si->x - px);
zy = (si->y - py);
kdist = zx*zx + zy*zy;
if (!sigblk.dead || sigblk.ss > kdist) { //if marked as alive
//printf("kdist = %lld\n", kdist);
sigblk.x = px;
sigblk.y = py;
sigblk.ss = kdist;
sigblk.dead = true;
}
}
}
}
}
}
/*
if (sigblk.dead) {
if (possible && tdist < sigblk.ss) {
printf("possible!\n");
printf("tdist = %lld ", tdist);
printf("sigblk.ss = %lld\n", sigblk.ss);
si->ss += sqrt((float)tdist);
break;
}
kill(si);
break;
}
if (possible) {
si->ss += sqrt((float)tdist);
break;
}
kill(si);
break;
*/
if (!sigref.dead) {
if (sigblk.dead) {
if (possible && tdist < sigref.ss && tdist < sigblk.ss) {
si->ss += sqrt((float)tdist);
break;
}
if (sigref.ss < sigblk.ss) {
sigref.ss = sqrt(float(sigref.ss));
sigref.ss += si->ss;
*si = sigref;
continue;
}
else {
kill(si);
break;
}
}
else {
if (possible && tdist < sigref.ss) {
si->ss += sqrt((float)tdist);
break;
}
else {
sigref.ss = sqrt(float(sigref.ss));
sigref.ss += si->ss;
*si = sigref;
continue;
}
}
}
else {
if (sigblk.dead) {
if (possible && tdist < sigblk.ss) {
si->ss += sqrt((float)tdist);
break;
}
else {
kill(si);
break;
}
}
}
if (possible)
si->ss += sqrt((float)tdist);
else
kill(si);
break;
}
if (autoend == AUTO_END) {
kill(si);
}
}
////////////////// cuda time
signal *dev_signals;
node *dev_nodes;
polygon *dev_buildings, *dev_forests;
line *dev_gtoa;
void freeCudaMemory() {
cudaFree(dev_signals);
cudaFree(dev_nodes);
cudaFree(dev_buildings);
cudaFree(dev_forests);
cudaFree(dev_gtoa);
}
cudaError_t allocateCudaMemory() {
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output).
cudaStatus = cudaMalloc((void**)&dev_gtoa, sizeof(line));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_signals, N * sizeof(signal));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_nodes, nnum * sizeof(node));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_nodes, Nodes, nnum * sizeof(node), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_buildings, bnum * sizeof(polygon));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_buildings, Buildings, bnum * sizeof(polygon), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_forests, fnum * sizeof(polygon));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_forests, Forests, fnum * sizeof(polygon), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t signalCalcWithCuda()
{
clock_t tic = clock();
cudaError_t cudaStatus;
long double r;
for (int i = 0; i < N; i++) {
signal *si = &sig[i];
r = d2r(360.0 * i / (long double)N);
si->x = ga.x1;
si->y = ga.y1;
si->vx = cosl(r) * RADSCALE;
si->vy = sinl(r) * RADSCALE;
si->ss = 0;
si->dead = false;
si->eid = -1;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_signals, &sig, N * sizeof(signal), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_gtoa, &ga, sizeof(line), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
signal_calculation << <M, N / M >> >(dev_signals, dev_nodes, dev_buildings, dev_forests, dev_gtoa);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "signal_calculation launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching signal_calculation!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(sig, dev_signals, N * sizeof(signal), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
clock_t toc = clock();
total_testing_time += toc - tic;
return cudaStatus;
}
void convertToCompass() {
int i, sidx;
double sum;
for (i = 0; i < NCOMPASS; i++) {
compass[i] = 0;
count[i] = 0; //initialzie
}
int deg;
for (i = 0; i < N; i++) {
deg = (int)(atan2(-sig[i].vy, -sig[i].vx) * 180 / PI);
if (deg < 0) deg += 360;
sidx = NCOMPASS * deg / 360;
if (!sig[i].dead) {
compass[sidx] = 1000000000 / sig[i].ss;
count[sidx] = 1;
}
//compass[hidx]
}
for (i = 0; i < NCOMPASS; i++) {
if(count[i] != 0)
compass[i] /= count[i];
}
}
void printOutput(){
fprintf(stdout, "[");
int i;
for (i = 0; i < NCOMPASS; i++) {
if (i == NCOMPASS - 1) {
fprintf(stdout, "%d", compass[i]);
}
else {
fprintf(stdout, "%d,", compass[i]);
}
}
fprintf(stdout, "]");
}
int main(int argc, char* argv[])
{
if (argc == 1) {
fprintf(stderr, "usage: ./prog mapx mapy x1 y1 x2 y2");
return -1;
}
int x, y;
my_t x1, y1, x2, y2;
mapx = atol(argv[1]);
mapy = atol(argv[2]);
x = atoi(argv[3]);
y = atoi(argv[4]);
y = height - y - 1;
x1 = 2 * (x - width*0.5) / width * ORTHO;
y1 = 2 * (y - height*0.5) / height * ORTHO;
x = atoi(argv[5]);
y = atoi(argv[6]);
y = height - y - 1;
x2 = 2 * (x - width*0.5) / width * ORTHO;
y2 = 2 * (y - height*0.5) / height * ORTHO;
initialize();
ga.bnum = bnum;
ga.fnum = fnum;
printf("[");
ga.x1 = x1;
ga.y1 = y1;
ga.x2 = x2;
ga.y2 = y2;
signalCalcWithCuda();
convertToCompass();
printOutput();
printf(",");
ga.x1 = x2;
ga.y1 = y2;
ga.x2 = x1;
ga.y2 = y1;
signalCalcWithCuda();
convertToCompass();
printOutput();
printf("]");
clean_up();
return 0;
}
void initialize() {
load_file();
allocateCudaMemory();
}
void load_file() {
int i, count;
FILE * fp;
char stmp[255];
char *pstr;
char *token;
char *next_ptr;
char *c;
int nidx, bidx, fidx;
int firstline = True;
int isname = True;
int ti;
int tokidx;
my_t mxx, mxy, mix, miy;
fp = fopen("_MapData.txt", "rt");
if (fp != NULL)
{
nidx = bidx = fidx = 0;
fscanf(fp, "i\t%d\t%d\t%d\t%lld\t%lld\n", &nnum, &bnum, &fnum, &mapx, &mapy);
Nodes = (node*)malloc(sizeof(node)*nnum);
Buildings = (polygon*)malloc(sizeof(polygon)*bnum);
Forests = (polygon*)malloc(sizeof(polygon)*fnum);
while (!feof(fp))
{
pstr = fgets(stmp, sizeof(stmp), fp);
if (pstr == NULL) break;
if (pstr[0] == 'n') {
double lat, lon;
sscanf(pstr, "n\t%lf\t%lf", &lat, &lon);
Nodes[nidx].x = (my_t)(lon*1e7 - mapx);
Nodes[nidx].y = (my_t)(lat*1e7 - mapy);
nidx++;
}
if (*pstr == 'b') {
count = 0; //except name tag
for (c = pstr+2; *c != NULL; c++) {
if (*c == '\t') count++;
}
//Buildings[bidx].inodes = (int*)malloc(sizeof(int)*count);
Buildings[bidx].isize = count;
mxx = mxy = -99999;
mix = miy = 99999;
tokidx = 0;
isname = True;
/* get the first token */
token = strtok(pstr + 2, DELIM);
/* walk through other tokens */
while( token != NULL )
{
if (isname) {
isname = False;
token = strtok(NULL, DELIM);
continue;
}
sscanf(token, "%d", &ti);
Buildings[bidx].inodes[tokidx] = ti;
if (mxx < Nodes[ti].x)
mxx = Nodes[ti].x;
if (mxy < Nodes[ti].y)
mxy = Nodes[ti].y;
if (mix > Nodes[ti].x)
mix = Nodes[ti].x;
if (miy > Nodes[ti].y)
miy = Nodes[ti].y;
token = strtok(NULL, DELIM);
tokidx++;
}
Buildings[bidx].x = (mxx + mix) / 2;
Buildings[bidx].y = (mxy + miy) / 2;
Buildings[bidx].radius = sqrtl((long double)((mxx - mix)*(mxx - mix) + (mxy - miy)*(mxy - miy))) / 2;
bidx++;
}
if (*pstr == 'f') {
count = 0;
for (c = pstr+2; *c != NULL; c++) {
if (*c == '\t') count++;
}
//Forests[fidx].inodes = (int*)malloc(sizeof(int)*count);
Forests[fidx].isize = count;
mxx = mxy = -99999;
mix = miy = 99999;
tokidx = 0;
isname = True;
/* get the first token */
token = strtok(pstr + 2, DELIM);
/* walk through other tokens */
while( token != NULL )
{
if (isname) {
isname = False;
token = strtok(NULL, DELIM);
continue;
}
sscanf(token, "%d", &ti);
Forests[fidx].inodes[tokidx] = ti;
if (mxx < Nodes[ti].x)
mxx = Nodes[ti].x;
if (mxy < Nodes[ti].y)
mxy = Nodes[ti].y;
if (mix > Nodes[ti].x)
mix = Nodes[ti].x;
if (miy > Nodes[ti].y)
miy = Nodes[ti].y;
token = strtok(NULL, DELIM);
tokidx++;
}
Forests[fidx].x = (mxx + mix) / 2;
Forests[fidx].y = (mxy + miy) / 2;
Forests[fidx].radius = sqrtl((long double)((mxx - mix)*(mxx - mix) + (mxy - miy)*(mxy - miy))) / 2;
fidx++;
}
}
fclose(fp);
}
else
{
//fprintf(stderr, "File closed\n");
//file not exist
}
}
void clean_up() {
int i;
free(Nodes);
free(Buildings);
free(Forests);
freeCudaMemory();
}
|
db2608b59af9d16936ef8937df7535be4579d2d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
#include <stdio.h>
#include <stdlib.h>
// GPU kernel function to add two vectors
__global__ void add_gpu( int *a, int *b, int *c, int n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index] + b[index];
}
// CPU function to add two vectors
void add_cpu (int *a, int *b, int *c, int n) {
for (int i=0; i < n; i++)
c[i] = a[i] + b[i];
}
// CPU function to generate a vector of random integers
void random_ints (int *a, int n) {
for (int i = 0; i < n; i++)
a[i] = rand() % 10000; // random number between 0 and 9999
}
// CPU function to compare two vectors
int compare_ints( int *a, int *b, int n ){
int pass = 0;
for (int i = 0; i < N; i++){
if (a[i] != b[i]) {
printf("Value mismatch at location %d, values %d and %d\n",i, a[i], b[i]);
pass = 1;
}
}
if (pass == 0) printf ("Test passed\n"); else printf ("Test Failed\n");
return pass;
}
int main( void ) {
int *a, *b, *c; // host copies of a, b, c
int *dev_a, *dev_b, *dev_c; // device copies of a, b, c
int size = N * sizeof( int ); // we need space for N integers
// printf("N = %d\n", N);
// Allocate GPU/device copies of dev_a, dev_b, dev_c
hipMalloc( (void**)&dev_a, size );
hipMalloc( (void**)&dev_b, size );
hipMalloc( (void**)&dev_c, size );
// Allocate CPU/host copies of a, b, c
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
// Fill input vectors with random integer numbers
random_ints( a, N );
random_ints( b, N );
/* printf("a = %d\n", a[2048]);
printf("a = %d\n", a[2]);
printf("b = %d\n", b[1]);
printf("b = %d\n", b[2]);
*/
// copy inputs to device
hipMemcpy( dev_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, size, hipMemcpyHostToDevice );
// launch add_gpu() kernel with blocks and threads
hipLaunchKernelGGL(( add_gpu), dim3(N/THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK) , 0, 0, dev_a, dev_b, dev_c, N );
// copy device result back to host copy of c
hipMemcpy( c, dev_c, size, hipMemcpyDeviceToHost );
printf("c_gpu %d\n", *c);
//Check the results with CPU implementation
int *c_h; c_h = (int*)malloc( size );
add_cpu (a, b, c_h, N);
printf("c_host %d\n", *c_h);
compare_ints(c, c_h, N);
// Clean CPU memory allocations
free( a ); free( b ); free( c ); free (c_h);
// Clean GPU memory allocations
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
return 0;
} | db2608b59af9d16936ef8937df7535be4579d2d5.cu | #define N (2048*2048)
#define THREADS_PER_BLOCK 512
#include <stdio.h>
#include <stdlib.h>
// GPU kernel function to add two vectors
__global__ void add_gpu( int *a, int *b, int *c, int n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index] + b[index];
}
// CPU function to add two vectors
void add_cpu (int *a, int *b, int *c, int n) {
for (int i=0; i < n; i++)
c[i] = a[i] + b[i];
}
// CPU function to generate a vector of random integers
void random_ints (int *a, int n) {
for (int i = 0; i < n; i++)
a[i] = rand() % 10000; // random number between 0 and 9999
}
// CPU function to compare two vectors
int compare_ints( int *a, int *b, int n ){
int pass = 0;
for (int i = 0; i < N; i++){
if (a[i] != b[i]) {
printf("Value mismatch at location %d, values %d and %d\n",i, a[i], b[i]);
pass = 1;
}
}
if (pass == 0) printf ("Test passed\n"); else printf ("Test Failed\n");
return pass;
}
int main( void ) {
int *a, *b, *c; // host copies of a, b, c
int *dev_a, *dev_b, *dev_c; // device copies of a, b, c
int size = N * sizeof( int ); // we need space for N integers
// printf("N = %d\n", N);
// Allocate GPU/device copies of dev_a, dev_b, dev_c
cudaMalloc( (void**)&dev_a, size );
cudaMalloc( (void**)&dev_b, size );
cudaMalloc( (void**)&dev_c, size );
// Allocate CPU/host copies of a, b, c
a = (int*)malloc( size );
b = (int*)malloc( size );
c = (int*)malloc( size );
// Fill input vectors with random integer numbers
random_ints( a, N );
random_ints( b, N );
/* printf("a = %d\n", a[2048]);
printf("a = %d\n", a[2]);
printf("b = %d\n", b[1]);
printf("b = %d\n", b[2]);
*/
// copy inputs to device
cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice );
// launch add_gpu() kernel with blocks and threads
add_gpu<<< N/THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( dev_a, dev_b, dev_c, N );
// copy device result back to host copy of c
cudaMemcpy( c, dev_c, size, cudaMemcpyDeviceToHost );
printf("c_gpu %d\n", *c);
//Check the results with CPU implementation
int *c_h; c_h = (int*)malloc( size );
add_cpu (a, b, c_h, N);
printf("c_host %d\n", *c_h);
compare_ints(c, c_h, N);
// Clean CPU memory allocations
free( a ); free( b ); free( c ); free (c_h);
// Clean GPU memory allocations
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
} |
81aa6dd59cf38e01dce61a12badc5cb901969c8c.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void logical_and_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, ScalarType::BFloat16, iter.common_dtype(), "logical_and_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a && b;
});
});
}
void logical_or_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, ScalarType::BFloat16, iter.common_dtype(), "logical_or_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a || b;
});
});
}
void logical_xor_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, ScalarType::BFloat16, iter.common_dtype(), "logical_xor_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return bool(a) != bool(b);
});
});
}
REGISTER_DISPATCH(logical_and_stub, &logical_and_kernel_cuda);
REGISTER_DISPATCH(logical_or_stub, &logical_or_kernel_cuda);
REGISTER_DISPATCH(logical_xor_stub, &logical_xor_kernel_cuda);
}} // namespace at::native
| 81aa6dd59cf38e01dce61a12badc5cb901969c8c.cu | #include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void logical_and_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, ScalarType::BFloat16, iter.common_dtype(), "logical_and_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a && b;
});
});
}
void logical_or_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, ScalarType::BFloat16, iter.common_dtype(), "logical_or_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return a || b;
});
});
}
void logical_xor_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, ScalarType::BFloat16, iter.common_dtype(), "logical_xor_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool {
return bool(a) != bool(b);
});
});
}
REGISTER_DISPATCH(logical_and_stub, &logical_and_kernel_cuda);
REGISTER_DISPATCH(logical_or_stub, &logical_or_kernel_cuda);
REGISTER_DISPATCH(logical_xor_stub, &logical_xor_kernel_cuda);
}} // namespace at::native
|
7a948c2e81c3c3c642a25f4586badbda42daf591.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "needle.h"
#include <stdio.h>
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
__device__ __host__ int
maximum( int a,
int b,
int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
__global__ void
needle_cuda_shared_1( int* referrence,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx;
int b_index_y = i - 1 - bx;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
ref[ty][tx] = referrence[index + cols * ty];
__syncthreads();
if (tx < 32)
temp[0][0] = matrix_cuda[index_nw];
temp[tx + 1][0] = matrix_cuda[index_w + cols * tx];
temp[0][tx + 1] = matrix_cuda[index_n];
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
__global__ void
needle_cuda_shared_2( int* referrence,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx + block_width - i ;
int b_index_y = block_width - bx -1;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
ref[ty][tx] = referrence[index + cols * ty];
if (tx < 32)
temp[0][0] = matrix_cuda[index_nw];
temp[tx + 1][0] = matrix_cuda[index_w + cols * tx];
temp[0][tx + 1] = matrix_cuda[index_n];
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
| 7a948c2e81c3c3c642a25f4586badbda42daf591.cu |
#include "needle.h"
#include <stdio.h>
#define SDATA( index) CUT_BANK_CHECKER(sdata, index)
__device__ __host__ int
maximum( int a,
int b,
int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
__global__ void
needle_cuda_shared_1( int* referrence,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx;
int b_index_y = i - 1 - bx;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
ref[ty][tx] = referrence[index + cols * ty];
__syncthreads();
if (tx < 32)
temp[0][0] = matrix_cuda[index_nw];
temp[tx + 1][0] = matrix_cuda[index_w + cols * tx];
temp[0][tx + 1] = matrix_cuda[index_n];
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
__global__ void
needle_cuda_shared_2( int* referrence,
int* matrix_cuda,
int cols,
int penalty,
int i,
int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx + block_width - i ;
int b_index_y = block_width - bx -1;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( cols + 1 );
int index_n = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 );
int index_w = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols );
int index_nw = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x;
__shared__ int temp[BLOCK_SIZE+1][BLOCK_SIZE+1];
__shared__ int ref[BLOCK_SIZE][BLOCK_SIZE];
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
ref[ty][tx] = referrence[index + cols * ty];
if (tx < 32)
temp[0][0] = matrix_cuda[index_nw];
temp[tx + 1][0] = matrix_cuda[index_w + cols * tx];
temp[0][tx + 1] = matrix_cuda[index_n];
__syncthreads();
for( int m = 0 ; m < BLOCK_SIZE ; m++){
if ( tx <= m ){
int t_index_x = tx + 1;
int t_index_y = m - tx + 1;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m ;
int t_index_y = BLOCK_SIZE - tx;
temp[t_index_y][t_index_x] = maximum( temp[t_index_y-1][t_index_x-1] + ref[t_index_y-1][t_index_x-1],
temp[t_index_y][t_index_x-1] - penalty,
temp[t_index_y-1][t_index_x] - penalty);
}
__syncthreads();
}
for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++)
matrix_cuda[index + ty * cols] = temp[ty+1][tx+1];
}
|
17a08badfdf143042496a741a15ce1d73c67386e.hip | // !!! This is a file automatically generated by hipify!!!
#include <fast_gicp/cuda/gaussian_voxelmap.cuh>
#include <fast_gicp/cuda/vector3_hash.cuh>
namespace fast_gicp {
namespace cuda {
// point coord -> voxel coord conversion
struct voxel_coord_kernel {
voxel_coord_kernel(const thrust::device_ptr<const VoxelMapInfo>& info) : voxelmap_info_ptr(info) {}
__host__ __device__ Eigen::Vector3i operator()(const Eigen::Vector3f& x) const {
const auto& info = *thrust::raw_pointer_cast(voxelmap_info_ptr);
return calc_voxel_coord(x, info.voxel_resolution);
}
const thrust::device_ptr<const VoxelMapInfo> voxelmap_info_ptr;
};
// assign voxel indices to buckets
struct voxel_bucket_assignment_kernel {
voxel_bucket_assignment_kernel(
const thrust::device_ptr<const VoxelMapInfo>& voxelmap_info,
const thrust::device_vector<Eigen::Vector3i>& point_coords,
thrust::device_vector<thrust::pair<int, int>>& index_buckets,
thrust::device_vector<int>& voxels_failures)
: voxelmap_info_ptr(voxelmap_info),
point_coords_ptr(point_coords.data()),
index_buckets_ptr(index_buckets.data()),
voxels_failures_ptr(voxels_failures.data()) {}
__device__ void operator()(int point_index) const {
const auto& info = *thrust::raw_pointer_cast(voxelmap_info_ptr);
const Eigen::Vector3i* coords = thrust::raw_pointer_cast(point_coords_ptr);
uint64_t hash = vector3i_hash(coords[point_index]);
for (int i = 0; i < info.max_bucket_scan_count; i++) {
uint64_t bucket_index = (hash + i) % info.num_buckets;
thrust::pair<int, int>* index_bucket = thrust::raw_pointer_cast(index_buckets_ptr) + bucket_index;
int old = atomicCAS(&index_bucket->first, -1, point_index);
if (old < 0) {
index_bucket->second = atomicAdd(thrust::raw_pointer_cast(voxels_failures_ptr), 1);
return;
}
if (equal(coords[point_index], coords[old])) {
return;
}
}
atomicAdd(thrust::raw_pointer_cast(voxels_failures_ptr) + 1, 1);
}
thrust::device_ptr<const VoxelMapInfo> voxelmap_info_ptr;
thrust::device_ptr<const Eigen::Vector3i> point_coords_ptr;
thrust::device_ptr<thrust::pair<int, int>> index_buckets_ptr;
thrust::device_ptr<int> voxels_failures_ptr;
};
// pair<point index, bucket index> to pair<voxel coord, bucket index>
struct voxel_coord_select_kernel {
voxel_coord_select_kernel(const thrust::device_vector<Eigen::Vector3i>& point_coords) : point_coords_ptr(point_coords.data()) {}
__device__ thrust::pair<Eigen::Vector3i, int> operator()(const thrust::pair<int, int>& index_bucket) const {
if (index_bucket.first < 0) {
return thrust::make_pair(Eigen::Vector3i(0, 0, 0), -1);
}
return thrust::make_pair(thrust::raw_pointer_cast(point_coords_ptr)[index_bucket.first], index_bucket.second);
}
thrust::device_ptr<const Eigen::Vector3i> point_coords_ptr;
};
// accumulate points and covs
struct accumulate_points_kernel {
accumulate_points_kernel(
const thrust::device_ptr<VoxelMapInfo>& voxelmap_info_ptr,
const thrust::device_vector<thrust::pair<Eigen::Vector3i, int>>& buckets,
thrust::device_vector<int>& num_points,
thrust::device_vector<Eigen::Vector3f>& voxel_means,
thrust::device_vector<Eigen::Matrix3f>& voxel_covs)
: voxelmap_info_ptr(voxelmap_info_ptr),
buckets_ptr(buckets.data()),
num_points_ptr(num_points.data()),
voxel_means_ptr(voxel_means.data()),
voxel_covs_ptr(voxel_covs.data()) {}
__device__ void operator()(const thrust::tuple<Eigen::Vector3f, Eigen::Matrix3f>& input) const {
const auto& info = *thrust::raw_pointer_cast(voxelmap_info_ptr);
const auto& mean = thrust::get<0>(input);
const auto& cov = thrust::get<1>(input);
const Eigen::Vector3i coord = calc_voxel_coord(mean, info.voxel_resolution);
uint64_t hash = vector3i_hash(coord);
for (int i = 0; i < info.max_bucket_scan_count; i++) {
uint64_t bucket_index = (hash + i) % info.num_buckets;
const thrust::pair<Eigen::Vector3i, int>& bucket = thrust::raw_pointer_cast(buckets_ptr)[bucket_index];
if (equal(bucket.first, coord)) {
if (bucket.second < 0) {
break;
}
int& num_points = thrust::raw_pointer_cast(num_points_ptr)[bucket.second];
Eigen::Vector3f& voxel_mean = thrust::raw_pointer_cast(voxel_means_ptr)[bucket.second];
Eigen::Matrix3f& voxel_cov = thrust::raw_pointer_cast(voxel_covs_ptr)[bucket.second];
atomicAdd(&num_points, 1);
for (int j = 0; j < 3; j++) {
atomicAdd(voxel_mean.data() + j, mean[j]);
}
for (int j = 0; j < 9; j++) {
atomicAdd(voxel_cov.data() + j, cov.data()[j]);
}
}
}
}
__device__ void operator()(const Eigen::Vector3f& mean) const {
const auto& info = *thrust::raw_pointer_cast(voxelmap_info_ptr);
const Eigen::Vector3i coord = calc_voxel_coord(mean, info.voxel_resolution);
uint64_t hash = vector3i_hash(coord);
for (int i = 0; i < info.max_bucket_scan_count; i++) {
uint64_t bucket_index = (hash + i) % info.num_buckets;
const thrust::pair<Eigen::Vector3i, int>& bucket = thrust::raw_pointer_cast(buckets_ptr)[bucket_index];
if (equal(bucket.first, coord)) {
int& num_points = thrust::raw_pointer_cast(num_points_ptr)[bucket.second];
Eigen::Vector3f& voxel_mean = thrust::raw_pointer_cast(voxel_means_ptr)[bucket.second];
Eigen::Matrix3f& voxel_cov = thrust::raw_pointer_cast(voxel_covs_ptr)[bucket.second];
Eigen::Matrix3f cov = mean * mean.transpose();
atomicAdd(&num_points, 1);
for (int j = 0; j < 3; j++) {
atomicAdd(voxel_mean.data() + j, mean[j]);
}
for (int j = 0; j < 9; j++) {
atomicAdd(voxel_cov.data() + j, cov.data()[j]);
}
}
}
}
thrust::device_ptr<const VoxelMapInfo> voxelmap_info_ptr;
thrust::device_ptr<const thrust::pair<Eigen::Vector3i, int>> buckets_ptr;
thrust::device_ptr<int> num_points_ptr;
thrust::device_ptr<Eigen::Vector3f> voxel_means_ptr;
thrust::device_ptr<Eigen::Matrix3f> voxel_covs_ptr;
};
struct finalize_voxels_kernel {
finalize_voxels_kernel(thrust::device_vector<int>& num_points, thrust::device_vector<Eigen::Vector3f>& voxel_means, thrust::device_vector<Eigen::Matrix3f>& voxel_covs)
: num_points_ptr(num_points.data()),
voxel_means_ptr(voxel_means.data()),
voxel_covs_ptr(voxel_covs.data()) {}
__host__ __device__ void operator()(int i) const {
int num_points = thrust::raw_pointer_cast(num_points_ptr)[i];
auto& voxel_mean = thrust::raw_pointer_cast(voxel_means_ptr)[i];
auto& voxel_covs = thrust::raw_pointer_cast(voxel_covs_ptr)[i];
voxel_mean /= num_points;
voxel_covs /= num_points;
}
thrust::device_ptr<int> num_points_ptr;
thrust::device_ptr<Eigen::Vector3f> voxel_means_ptr;
thrust::device_ptr<Eigen::Matrix3f> voxel_covs_ptr;
};
struct ndt_finalize_voxels_kernel {
ndt_finalize_voxels_kernel(thrust::device_vector<int>& num_points, thrust::device_vector<Eigen::Vector3f>& voxel_means, thrust::device_vector<Eigen::Matrix3f>& voxel_covs)
: num_points_ptr(num_points.data()),
voxel_means_ptr(voxel_means.data()),
voxel_covs_ptr(voxel_covs.data()) {}
__host__ __device__ void operator()(int i) const {
int num_points = thrust::raw_pointer_cast(num_points_ptr)[i];
auto& voxel_mean = thrust::raw_pointer_cast(voxel_means_ptr)[i];
auto& voxel_covs = thrust::raw_pointer_cast(voxel_covs_ptr)[i];
Eigen::Vector3f sum_pts = voxel_mean;
voxel_mean /= num_points;
voxel_covs = (voxel_covs - voxel_mean * sum_pts.transpose()) / num_points;
}
thrust::device_ptr<int> num_points_ptr;
thrust::device_ptr<Eigen::Vector3f> voxel_means_ptr;
thrust::device_ptr<Eigen::Matrix3f> voxel_covs_ptr;
};
GaussianVoxelMap::GaussianVoxelMap(float resolution, int init_num_buckets, int max_bucket_scan_count) : init_num_buckets(init_num_buckets) {
voxelmap_info.num_voxels = 0;
voxelmap_info.num_buckets = init_num_buckets;
voxelmap_info.max_bucket_scan_count = max_bucket_scan_count;
voxelmap_info.voxel_resolution = resolution;
voxelmap_info_ptr.resize(1);
voxelmap_info_ptr[0] = voxelmap_info;
}
void GaussianVoxelMap::create_voxelmap(const thrust::device_vector<Eigen::Vector3f>& points) {
hipStream_t stream;
hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
create_bucket_table(stream, points);
num_points.resize(voxelmap_info.num_voxels);
voxel_means.resize(voxelmap_info.num_voxels);
voxel_covs.resize(voxelmap_info.num_voxels);
num_points.resize(voxelmap_info.num_voxels);
voxel_means.resize(voxelmap_info.num_voxels);
voxel_covs.resize(voxelmap_info.num_voxels);
thrust::fill(thrust::hip::par.on(stream), num_points.begin(), num_points.end(), 0);
thrust::fill(thrust::hip::par.on(stream), voxel_means.begin(), voxel_means.end(), Eigen::Vector3f::Zero().eval());
thrust::fill(thrust::hip::par.on(stream), voxel_covs.begin(), voxel_covs.end(), Eigen::Matrix3f::Zero().eval());
thrust::for_each(thrust::hip::par.on(stream), points.begin(), points.end(), accumulate_points_kernel(voxelmap_info_ptr.data(), buckets, num_points, voxel_means, voxel_covs));
thrust::for_each(thrust::hip::par.on(stream), thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(voxelmap_info.num_voxels), ndt_finalize_voxels_kernel(num_points, voxel_means, voxel_covs));
hipStreamSynchronize(stream);
hipStreamDestroy(stream);
}
void GaussianVoxelMap::create_voxelmap(const thrust::device_vector<Eigen::Vector3f>& points, const thrust::device_vector<Eigen::Matrix3f>& covariances) {
hipStream_t stream;
hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
create_bucket_table(stream, points);
num_points.resize(voxelmap_info.num_voxels);
voxel_means.resize(voxelmap_info.num_voxels);
voxel_covs.resize(voxelmap_info.num_voxels);
thrust::fill(thrust::hip::par.on(stream), num_points.begin(), num_points.end(), 0);
thrust::fill(thrust::hip::par.on(stream), voxel_means.begin(), voxel_means.end(), Eigen::Vector3f::Zero().eval());
thrust::fill(thrust::hip::par.on(stream), voxel_covs.begin(), voxel_covs.end(), Eigen::Matrix3f::Zero().eval());
thrust::for_each(
thrust::hip::par.on(stream),
thrust::make_zip_iterator(thrust::make_tuple(points.begin(), covariances.begin())),
thrust::make_zip_iterator(thrust::make_tuple(points.end(), covariances.end())),
accumulate_points_kernel(voxelmap_info_ptr.data(), buckets, num_points, voxel_means, voxel_covs));
thrust::for_each(thrust::hip::par.on(stream), thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(voxelmap_info.num_voxels), finalize_voxels_kernel(num_points, voxel_means, voxel_covs));
hipStreamSynchronize(stream);
hipStreamDestroy(stream);
}
void GaussianVoxelMap::create_bucket_table(hipStream_t stream, const thrust::device_vector<Eigen::Vector3f>& points) {
thrust::device_vector<Eigen::Vector3i> coords(points.size());
thrust::transform(thrust::hip::par.on(stream), points.begin(), points.end(), coords.begin(), voxel_coord_kernel(voxelmap_info_ptr.data()));
thrust::device_vector<thrust::pair<int, int>> index_buckets;
thrust::device_vector<int> voxels_failures(2, 0);
for (int num_buckets = init_num_buckets; init_num_buckets * 4; num_buckets *= 2) {
voxelmap_info.num_buckets = num_buckets;
voxelmap_info_ptr[0] = voxelmap_info;
index_buckets.resize(num_buckets);
thrust::fill(thrust::hip::par.on(stream), index_buckets.begin(), index_buckets.end(), thrust::make_pair(-1, -1));
thrust::fill(thrust::hip::par.on(stream), voxels_failures.begin(), voxels_failures.end(), 0);
thrust::for_each(
thrust::hip::par.on(stream),
thrust::counting_iterator<int>(0),
thrust::counting_iterator<int>(points.size()),
voxel_bucket_assignment_kernel(voxelmap_info_ptr.data(), coords, index_buckets, voxels_failures));
thrust::host_vector<int> h_voxels_failures = voxels_failures;
if (static_cast<double>(h_voxels_failures[1]) / points.size() < 0.01) {
voxelmap_info.num_voxels = h_voxels_failures[0];
voxelmap_info_ptr[0] = voxelmap_info;
break;
}
}
buckets.resize(index_buckets.size());
thrust::transform(thrust::hip::par.on(stream), index_buckets.begin(), index_buckets.end(), buckets.begin(), voxel_coord_select_kernel(coords));
}
} // namespace cuda
} // namespace fast_gicp
| 17a08badfdf143042496a741a15ce1d73c67386e.cu | #include <fast_gicp/cuda/gaussian_voxelmap.cuh>
#include <fast_gicp/cuda/vector3_hash.cuh>
namespace fast_gicp {
namespace cuda {
// point coord -> voxel coord conversion
struct voxel_coord_kernel {
voxel_coord_kernel(const thrust::device_ptr<const VoxelMapInfo>& info) : voxelmap_info_ptr(info) {}
__host__ __device__ Eigen::Vector3i operator()(const Eigen::Vector3f& x) const {
const auto& info = *thrust::raw_pointer_cast(voxelmap_info_ptr);
return calc_voxel_coord(x, info.voxel_resolution);
}
const thrust::device_ptr<const VoxelMapInfo> voxelmap_info_ptr;
};
// assign voxel indices to buckets
struct voxel_bucket_assignment_kernel {
voxel_bucket_assignment_kernel(
const thrust::device_ptr<const VoxelMapInfo>& voxelmap_info,
const thrust::device_vector<Eigen::Vector3i>& point_coords,
thrust::device_vector<thrust::pair<int, int>>& index_buckets,
thrust::device_vector<int>& voxels_failures)
: voxelmap_info_ptr(voxelmap_info),
point_coords_ptr(point_coords.data()),
index_buckets_ptr(index_buckets.data()),
voxels_failures_ptr(voxels_failures.data()) {}
__device__ void operator()(int point_index) const {
const auto& info = *thrust::raw_pointer_cast(voxelmap_info_ptr);
const Eigen::Vector3i* coords = thrust::raw_pointer_cast(point_coords_ptr);
uint64_t hash = vector3i_hash(coords[point_index]);
for (int i = 0; i < info.max_bucket_scan_count; i++) {
uint64_t bucket_index = (hash + i) % info.num_buckets;
thrust::pair<int, int>* index_bucket = thrust::raw_pointer_cast(index_buckets_ptr) + bucket_index;
int old = atomicCAS(&index_bucket->first, -1, point_index);
if (old < 0) {
index_bucket->second = atomicAdd(thrust::raw_pointer_cast(voxels_failures_ptr), 1);
return;
}
if (equal(coords[point_index], coords[old])) {
return;
}
}
atomicAdd(thrust::raw_pointer_cast(voxels_failures_ptr) + 1, 1);
}
thrust::device_ptr<const VoxelMapInfo> voxelmap_info_ptr;
thrust::device_ptr<const Eigen::Vector3i> point_coords_ptr;
thrust::device_ptr<thrust::pair<int, int>> index_buckets_ptr;
thrust::device_ptr<int> voxels_failures_ptr;
};
// pair<point index, bucket index> to pair<voxel coord, bucket index>
struct voxel_coord_select_kernel {
voxel_coord_select_kernel(const thrust::device_vector<Eigen::Vector3i>& point_coords) : point_coords_ptr(point_coords.data()) {}
__device__ thrust::pair<Eigen::Vector3i, int> operator()(const thrust::pair<int, int>& index_bucket) const {
if (index_bucket.first < 0) {
return thrust::make_pair(Eigen::Vector3i(0, 0, 0), -1);
}
return thrust::make_pair(thrust::raw_pointer_cast(point_coords_ptr)[index_bucket.first], index_bucket.second);
}
thrust::device_ptr<const Eigen::Vector3i> point_coords_ptr;
};
// accumulate points and covs
struct accumulate_points_kernel {
accumulate_points_kernel(
const thrust::device_ptr<VoxelMapInfo>& voxelmap_info_ptr,
const thrust::device_vector<thrust::pair<Eigen::Vector3i, int>>& buckets,
thrust::device_vector<int>& num_points,
thrust::device_vector<Eigen::Vector3f>& voxel_means,
thrust::device_vector<Eigen::Matrix3f>& voxel_covs)
: voxelmap_info_ptr(voxelmap_info_ptr),
buckets_ptr(buckets.data()),
num_points_ptr(num_points.data()),
voxel_means_ptr(voxel_means.data()),
voxel_covs_ptr(voxel_covs.data()) {}
__device__ void operator()(const thrust::tuple<Eigen::Vector3f, Eigen::Matrix3f>& input) const {
const auto& info = *thrust::raw_pointer_cast(voxelmap_info_ptr);
const auto& mean = thrust::get<0>(input);
const auto& cov = thrust::get<1>(input);
const Eigen::Vector3i coord = calc_voxel_coord(mean, info.voxel_resolution);
uint64_t hash = vector3i_hash(coord);
for (int i = 0; i < info.max_bucket_scan_count; i++) {
uint64_t bucket_index = (hash + i) % info.num_buckets;
const thrust::pair<Eigen::Vector3i, int>& bucket = thrust::raw_pointer_cast(buckets_ptr)[bucket_index];
if (equal(bucket.first, coord)) {
if (bucket.second < 0) {
break;
}
int& num_points = thrust::raw_pointer_cast(num_points_ptr)[bucket.second];
Eigen::Vector3f& voxel_mean = thrust::raw_pointer_cast(voxel_means_ptr)[bucket.second];
Eigen::Matrix3f& voxel_cov = thrust::raw_pointer_cast(voxel_covs_ptr)[bucket.second];
atomicAdd(&num_points, 1);
for (int j = 0; j < 3; j++) {
atomicAdd(voxel_mean.data() + j, mean[j]);
}
for (int j = 0; j < 9; j++) {
atomicAdd(voxel_cov.data() + j, cov.data()[j]);
}
}
}
}
__device__ void operator()(const Eigen::Vector3f& mean) const {
const auto& info = *thrust::raw_pointer_cast(voxelmap_info_ptr);
const Eigen::Vector3i coord = calc_voxel_coord(mean, info.voxel_resolution);
uint64_t hash = vector3i_hash(coord);
for (int i = 0; i < info.max_bucket_scan_count; i++) {
uint64_t bucket_index = (hash + i) % info.num_buckets;
const thrust::pair<Eigen::Vector3i, int>& bucket = thrust::raw_pointer_cast(buckets_ptr)[bucket_index];
if (equal(bucket.first, coord)) {
int& num_points = thrust::raw_pointer_cast(num_points_ptr)[bucket.second];
Eigen::Vector3f& voxel_mean = thrust::raw_pointer_cast(voxel_means_ptr)[bucket.second];
Eigen::Matrix3f& voxel_cov = thrust::raw_pointer_cast(voxel_covs_ptr)[bucket.second];
Eigen::Matrix3f cov = mean * mean.transpose();
atomicAdd(&num_points, 1);
for (int j = 0; j < 3; j++) {
atomicAdd(voxel_mean.data() + j, mean[j]);
}
for (int j = 0; j < 9; j++) {
atomicAdd(voxel_cov.data() + j, cov.data()[j]);
}
}
}
}
thrust::device_ptr<const VoxelMapInfo> voxelmap_info_ptr;
thrust::device_ptr<const thrust::pair<Eigen::Vector3i, int>> buckets_ptr;
thrust::device_ptr<int> num_points_ptr;
thrust::device_ptr<Eigen::Vector3f> voxel_means_ptr;
thrust::device_ptr<Eigen::Matrix3f> voxel_covs_ptr;
};
struct finalize_voxels_kernel {
finalize_voxels_kernel(thrust::device_vector<int>& num_points, thrust::device_vector<Eigen::Vector3f>& voxel_means, thrust::device_vector<Eigen::Matrix3f>& voxel_covs)
: num_points_ptr(num_points.data()),
voxel_means_ptr(voxel_means.data()),
voxel_covs_ptr(voxel_covs.data()) {}
__host__ __device__ void operator()(int i) const {
int num_points = thrust::raw_pointer_cast(num_points_ptr)[i];
auto& voxel_mean = thrust::raw_pointer_cast(voxel_means_ptr)[i];
auto& voxel_covs = thrust::raw_pointer_cast(voxel_covs_ptr)[i];
voxel_mean /= num_points;
voxel_covs /= num_points;
}
thrust::device_ptr<int> num_points_ptr;
thrust::device_ptr<Eigen::Vector3f> voxel_means_ptr;
thrust::device_ptr<Eigen::Matrix3f> voxel_covs_ptr;
};
struct ndt_finalize_voxels_kernel {
ndt_finalize_voxels_kernel(thrust::device_vector<int>& num_points, thrust::device_vector<Eigen::Vector3f>& voxel_means, thrust::device_vector<Eigen::Matrix3f>& voxel_covs)
: num_points_ptr(num_points.data()),
voxel_means_ptr(voxel_means.data()),
voxel_covs_ptr(voxel_covs.data()) {}
__host__ __device__ void operator()(int i) const {
int num_points = thrust::raw_pointer_cast(num_points_ptr)[i];
auto& voxel_mean = thrust::raw_pointer_cast(voxel_means_ptr)[i];
auto& voxel_covs = thrust::raw_pointer_cast(voxel_covs_ptr)[i];
Eigen::Vector3f sum_pts = voxel_mean;
voxel_mean /= num_points;
voxel_covs = (voxel_covs - voxel_mean * sum_pts.transpose()) / num_points;
}
thrust::device_ptr<int> num_points_ptr;
thrust::device_ptr<Eigen::Vector3f> voxel_means_ptr;
thrust::device_ptr<Eigen::Matrix3f> voxel_covs_ptr;
};
GaussianVoxelMap::GaussianVoxelMap(float resolution, int init_num_buckets, int max_bucket_scan_count) : init_num_buckets(init_num_buckets) {
voxelmap_info.num_voxels = 0;
voxelmap_info.num_buckets = init_num_buckets;
voxelmap_info.max_bucket_scan_count = max_bucket_scan_count;
voxelmap_info.voxel_resolution = resolution;
voxelmap_info_ptr.resize(1);
voxelmap_info_ptr[0] = voxelmap_info;
}
void GaussianVoxelMap::create_voxelmap(const thrust::device_vector<Eigen::Vector3f>& points) {
cudaStream_t stream;
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
create_bucket_table(stream, points);
num_points.resize(voxelmap_info.num_voxels);
voxel_means.resize(voxelmap_info.num_voxels);
voxel_covs.resize(voxelmap_info.num_voxels);
num_points.resize(voxelmap_info.num_voxels);
voxel_means.resize(voxelmap_info.num_voxels);
voxel_covs.resize(voxelmap_info.num_voxels);
thrust::fill(thrust::cuda::par.on(stream), num_points.begin(), num_points.end(), 0);
thrust::fill(thrust::cuda::par.on(stream), voxel_means.begin(), voxel_means.end(), Eigen::Vector3f::Zero().eval());
thrust::fill(thrust::cuda::par.on(stream), voxel_covs.begin(), voxel_covs.end(), Eigen::Matrix3f::Zero().eval());
thrust::for_each(thrust::cuda::par.on(stream), points.begin(), points.end(), accumulate_points_kernel(voxelmap_info_ptr.data(), buckets, num_points, voxel_means, voxel_covs));
thrust::for_each(thrust::cuda::par.on(stream), thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(voxelmap_info.num_voxels), ndt_finalize_voxels_kernel(num_points, voxel_means, voxel_covs));
cudaStreamSynchronize(stream);
cudaStreamDestroy(stream);
}
void GaussianVoxelMap::create_voxelmap(const thrust::device_vector<Eigen::Vector3f>& points, const thrust::device_vector<Eigen::Matrix3f>& covariances) {
cudaStream_t stream;
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
create_bucket_table(stream, points);
num_points.resize(voxelmap_info.num_voxels);
voxel_means.resize(voxelmap_info.num_voxels);
voxel_covs.resize(voxelmap_info.num_voxels);
thrust::fill(thrust::cuda::par.on(stream), num_points.begin(), num_points.end(), 0);
thrust::fill(thrust::cuda::par.on(stream), voxel_means.begin(), voxel_means.end(), Eigen::Vector3f::Zero().eval());
thrust::fill(thrust::cuda::par.on(stream), voxel_covs.begin(), voxel_covs.end(), Eigen::Matrix3f::Zero().eval());
thrust::for_each(
thrust::cuda::par.on(stream),
thrust::make_zip_iterator(thrust::make_tuple(points.begin(), covariances.begin())),
thrust::make_zip_iterator(thrust::make_tuple(points.end(), covariances.end())),
accumulate_points_kernel(voxelmap_info_ptr.data(), buckets, num_points, voxel_means, voxel_covs));
thrust::for_each(thrust::cuda::par.on(stream), thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(voxelmap_info.num_voxels), finalize_voxels_kernel(num_points, voxel_means, voxel_covs));
cudaStreamSynchronize(stream);
cudaStreamDestroy(stream);
}
void GaussianVoxelMap::create_bucket_table(cudaStream_t stream, const thrust::device_vector<Eigen::Vector3f>& points) {
thrust::device_vector<Eigen::Vector3i> coords(points.size());
thrust::transform(thrust::cuda::par.on(stream), points.begin(), points.end(), coords.begin(), voxel_coord_kernel(voxelmap_info_ptr.data()));
thrust::device_vector<thrust::pair<int, int>> index_buckets;
thrust::device_vector<int> voxels_failures(2, 0);
for (int num_buckets = init_num_buckets; init_num_buckets * 4; num_buckets *= 2) {
voxelmap_info.num_buckets = num_buckets;
voxelmap_info_ptr[0] = voxelmap_info;
index_buckets.resize(num_buckets);
thrust::fill(thrust::cuda::par.on(stream), index_buckets.begin(), index_buckets.end(), thrust::make_pair(-1, -1));
thrust::fill(thrust::cuda::par.on(stream), voxels_failures.begin(), voxels_failures.end(), 0);
thrust::for_each(
thrust::cuda::par.on(stream),
thrust::counting_iterator<int>(0),
thrust::counting_iterator<int>(points.size()),
voxel_bucket_assignment_kernel(voxelmap_info_ptr.data(), coords, index_buckets, voxels_failures));
thrust::host_vector<int> h_voxels_failures = voxels_failures;
if (static_cast<double>(h_voxels_failures[1]) / points.size() < 0.01) {
voxelmap_info.num_voxels = h_voxels_failures[0];
voxelmap_info_ptr[0] = voxelmap_info;
break;
}
}
buckets.resize(index_buckets.size());
thrust::transform(thrust::cuda::par.on(stream), index_buckets.begin(), index_buckets.end(), buckets.begin(), voxel_coord_select_kernel(coords));
}
} // namespace cuda
} // namespace fast_gicp
|
02825a3c9d01d6235ceb251b6aa8c1e9113c59e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "./local_share_forward.cuh"
#include "src/cuda/kernel_common/diagnostic_prologue.cuh"
using namespace megdnn;
using namespace cuda;
using namespace local_share;
namespace {
template <int unroll_ci_, int unroll_co_, int unroll_n_>
struct UnrollConfig {
static int const unroll_ci = unroll_ci_;
static int const unroll_co = unroll_co_;
static int const unroll_n = unroll_n_;
};
template <int thread_x, int thread_y>
struct ThreadConfig {
static int const nr_thread_x = thread_x;
static int const nr_thread_y = thread_y;
static int const nr_threads = nr_thread_x * nr_thread_y;
};
template <typename UnrollConfig_, typename ThreadConfig_>
struct DataTileCount {
typedef UnrollConfig_ UnrollConfig;
typedef ThreadConfig_ ThreadConfig;
static int const tile_batch = UnrollConfig::unroll_n * ThreadConfig::nr_thread_x;
static int const load_x = tile_batch > 32 ? 32 : tile_batch;
static int const load_y = ThreadConfig::nr_threads / load_x;
static int const smem_h = UnrollConfig::unroll_ci;
static int const smem_w = tile_batch;
static int const smem_stride = smem_w;
static int const smem_tot = smem_h * smem_stride;
static int const reg_row = (smem_h + load_y - 1) / load_y;
static int const reg_col = (smem_w + load_x - 1) / load_x;
static bool const check_sh_bounds = smem_w % load_x != 0;
};
template <typename UnrollConfig_, typename ThreadConfig_>
struct FilterTileCount {
typedef UnrollConfig_ UnrollConfig;
typedef ThreadConfig_ ThreadConfig;
static int const tile_co = ThreadConfig::nr_thread_y * UnrollConfig::unroll_co;
static int const smem_h = UnrollConfig::unroll_ci;
static int const smem_w = tile_co;
static int const smem_stride = smem_w + 1;
static int const smem_tot = smem_h * smem_stride;
static int const load_x = tile_co > 32 ? 32 : tile_co;
static int const load_y = ThreadConfig::nr_threads / load_x;
static int const reg_row = (smem_h + load_y - 1) / load_y;
static int const reg_col = (smem_w + load_x - 1) / load_x;
static bool const check_sh_bounds = smem_w % load_x != 0;
};
template <bool check_bounds, typename UnrollConfig, typename ThreadConfig>
struct DataGlobal2ShareMemVisitor {
typedef DataTileCount<UnrollConfig, ThreadConfig> TileCount;
typedef float copy_t;
float* smem;
const copy_t* g_ptr;
int stride;
int remain;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int tid = tidy * ThreadConfig::nr_thread_x + tidx;
const int gl_load_y = tid / TileCount::load_x;
const int gl_load_x = tid - gl_load_y * TileCount::load_x;
copy_t reg[TileCount::reg_row][TileCount::reg_col];
__device__ DataGlobal2ShareMemVisitor(copy_t* smem, int stride, int remain)
: smem{smem}, stride{stride}, remain{remain} {}
__device__ __forceinline__ void first_copy() {
#pragma unroll
for (int i = 0; i < TileCount::reg_row; ++i) {
int h_idx = gl_load_y + i * TileCount::load_y;
#pragma unrol
for (int j = 0; j < TileCount::reg_col; ++j) {
int w_idx = gl_load_x + j * TileCount::load_x;
if (h_idx >= TileCount::smem_h)
continue;
if (TileCount::check_sh_bounds && w_idx >= TileCount::smem_w)
continue;
if (check_bounds) {
copy_t val = 0.f;
if (w_idx < remain) {
val = g_ptr[h_idx * stride + w_idx];
}
*(sh_ptr(h_idx, w_idx)) = val;
} else {
*(sh_ptr(h_idx, w_idx)) = g_ptr[h_idx * stride + w_idx];
}
}
}
}
__device__ __forceinline__ void copy() {
#pragma unroll
for (int i = 0; i < TileCount::reg_row; ++i) {
int h_idx = gl_load_y + i * TileCount::load_y;
#pragma unrol
for (int j = 0; j < TileCount::reg_col; ++j) {
int w_idx = gl_load_x + j * TileCount::load_x;
if (h_idx >= TileCount::smem_h)
continue;
if (TileCount::check_sh_bounds && w_idx >= TileCount::smem_w)
continue;
if (check_bounds) {
copy_t val = 0.f;
if (w_idx < remain) {
val = g_ptr[h_idx * stride + w_idx];
}
reg[i][j] = val;
} else {
reg[i][j] = g_ptr[h_idx * stride + w_idx];
}
}
}
}
__device__ __forceinline__ void commit() {
#pragma unroll
for (int i = 0; i < TileCount::reg_row; ++i) {
int h_idx = gl_load_y + i * TileCount::load_y;
#pragma unrol
for (int j = 0; j < TileCount::reg_col; ++j) {
int w_idx = gl_load_x + j * TileCount::load_x;
if (h_idx >= TileCount::smem_h)
continue;
if (TileCount::check_sh_bounds && w_idx >= TileCount::smem_w)
continue;
*(sh_ptr(h_idx, w_idx)) = reg[i][j];
}
}
}
__device__ __forceinline__ float* sh_ptr(int y, int x) {
return &smem[y * TileCount::smem_stride + x];
}
__device__ __forceinline__ void move_forward() {
g_ptr += UnrollConfig::unroll_ci * stride;
}
};
template <bool check_bounds, typename UnrollConfig, typename ThreadConfig>
struct FilterGlobal2ShareMemVisitor {
typedef float copy_t;
typedef FilterTileCount<UnrollConfig, ThreadConfig> TileCount;
float* smem;
const copy_t* g_ptr;
int stride;
int remain;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int tid = tidy * ThreadConfig::nr_thread_x + tidx;
const int gl_load_y = tid / TileCount::load_x;
const int gl_load_x = tid - gl_load_y * TileCount::load_x;
copy_t reg[TileCount::reg_row][TileCount::reg_col];
__device__ FilterGlobal2ShareMemVisitor(copy_t* smem, int stride, int remain)
: smem{smem}, stride{stride}, remain{remain} {}
__device__ __forceinline__ void first_copy() {
#pragma unroll
for (int i = 0; i < TileCount::reg_row; ++i) {
int h_idx = gl_load_y + i * TileCount::load_y;
#pragma unrol
for (int j = 0; j < TileCount::reg_col; ++j) {
int w_idx = gl_load_x + j * TileCount::load_x;
if (h_idx >= TileCount::smem_h)
continue;
if (TileCount::check_sh_bounds && w_idx >= TileCount::smem_w)
continue;
if (check_bounds) {
copy_t val = 0.f;
if (w_idx < remain) {
val = g_ptr[h_idx * stride + w_idx];
}
*(sh_ptr(h_idx, w_idx)) = val;
} else {
*(sh_ptr(h_idx, w_idx)) = g_ptr[h_idx * stride + w_idx];
}
}
}
}
__device__ __forceinline__ void copy() {
#pragma unroll
for (int i = 0; i < TileCount::reg_row; ++i) {
int h_idx = gl_load_y + i * TileCount::load_y;
#pragma unrol
for (int j = 0; j < TileCount::reg_col; ++j) {
int w_idx = gl_load_x + j * TileCount::load_x;
if (h_idx >= TileCount::smem_h)
continue;
if (TileCount::check_sh_bounds && w_idx >= TileCount::smem_w)
continue;
if (check_bounds) {
copy_t val = 0.f;
if (w_idx < remain) {
val = g_ptr[h_idx * stride + w_idx];
}
reg[i][j] = val;
} else {
reg[i][j] = g_ptr[h_idx * stride + w_idx];
}
}
}
}
__device__ __forceinline__ void commit() {
#pragma unroll
for (int i = 0; i < TileCount::reg_row; ++i) {
int h_idx = gl_load_y + i * TileCount::load_y;
#pragma unrol
for (int j = 0; j < TileCount::reg_col; ++j) {
int w_idx = gl_load_x + j * TileCount::load_x;
if (h_idx >= TileCount::smem_h)
continue;
if (TileCount::check_sh_bounds && w_idx >= TileCount::smem_w)
continue;
*(sh_ptr(h_idx, w_idx)) = reg[i][j];
}
}
}
__device__ __forceinline__ float* sh_ptr(int y, int x) {
return &smem[y * TileCount::smem_stride + x];
}
__device__ __forceinline__ void move_forward() {
g_ptr += UnrollConfig::unroll_ci * stride;
}
};
template <bool check_bounds, typename UnrollConfig, typename ThreadConfig>
__device__ __forceinline__ void consume_block(
DataGlobal2ShareMemVisitor<check_bounds, UnrollConfig, ThreadConfig>&
data_gl2sh_visitor,
FilterGlobal2ShareMemVisitor<check_bounds, UnrollConfig, ThreadConfig>&
filter_gl2sh_visitor,
float r_src[UnrollConfig::unroll_n], float r_filter[UnrollConfig::unroll_co],
float r_acc[UnrollConfig::unroll_co][UnrollConfig::unroll_n]) {
typedef DataTileCount<UnrollConfig, ThreadConfig> DataTileCount;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
#pragma unroll
for (int ci_inner = 0; ci_inner < UnrollConfig::unroll_ci; ++ci_inner) {
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_n; ++i) {
r_src[i] = *(data_gl2sh_visitor.sh_ptr(
ci_inner, tidx + i * ThreadConfig::nr_thread_x));
}
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_co; ++j) {
r_filter[j] = *(filter_gl2sh_visitor.sh_ptr(
ci_inner, tidy + j * ThreadConfig::nr_thread_y));
}
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_co; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_n; ++j) {
r_acc[i][j] += r_src[j] * r_filter[i];
}
}
}
}
template <bool check_bounds, typename UnrollConfig, typename ThreadConfig>
__global__ void local_share_device_template_f32(
const float* __restrict__ src, const float* __restrict__ filter,
float* __restrict__ dst, Param param, int fh, int fw, int sh, int sw) {
typedef DataTileCount<UnrollConfig, ThreadConfig> DataTileCount;
typedef FilterTileCount<UnrollConfig, ThreadConfig> FilterTileCount;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int bidy = blockIdx.y;
const int bidz = blockIdx.z;
const int ho = param.sgh * param.grp_ho;
const int wo = param.sgw * param.grp_wo;
const int b_ho = bidx / wo;
const int b_wo = bidx - wo * b_ho;
const int sgh_idx = b_ho / param.grp_ho;
const int sgw_idx = b_wo / param.grp_wo;
const int b_batch = bidy * DataTileCount::tile_batch;
const int b_co = bidz * FilterTileCount::tile_co;
const int t_batch = tidx + b_batch;
const int t_co = tidy + b_co;
extern __shared__ float smem[];
float* sh_src = smem;
float* sh_filter = smem + DataTileCount::smem_tot;
const float* __restrict__ g_ptr_src = src + b_batch;
const float* __restrict__ g_ptr_filter = filter + b_co + // output channel
(sgh_idx * param.sgw + sgw_idx) *
param.co * param.ci * fh *
fw; // spatial group
float* __restrict__ g_ptr_dst = dst +
t_co * ho * wo * param.n // output channel stride+
+ (b_ho * wo + b_wo) * param.n // spatial stride
+ t_batch;
// TODO check register
DataGlobal2ShareMemVisitor<check_bounds, UnrollConfig, ThreadConfig>
src_gl2sh_visitor{sh_src, param.hi * param.wi * param.n, param.n - b_batch};
FilterGlobal2ShareMemVisitor<check_bounds, UnrollConfig, ThreadConfig>
filter_gl2sh_visitor{sh_filter, param.co * fh * fw, param.co - b_co};
float r_src[UnrollConfig::unroll_n];
float r_filter[UnrollConfig::unroll_co];
float r_acc[UnrollConfig::unroll_co][UnrollConfig::unroll_n];
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_co; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_n; ++j) {
r_acc[i][j] = 0;
}
}
int h_base = b_ho * sh - param.ph;
int w_base = b_wo * sw - param.pw;
int h_start = h_base >= 0 ? h_base : 0;
int w_start = w_base >= 0 ? w_base : 0;
int h_end = h_base + fh - 1;
int w_end = w_base + fw - 1;
h_end = h_end < param.hi ? h_end : param.hi - 1;
w_end = w_end < param.wi ? w_end : param.wi - 1;
const int ci_blks =
(param.ci + UnrollConfig::unroll_ci - 1) / UnrollConfig::unroll_ci;
int kh = h_start - h_base;
int kw = w_start - w_base;
src_gl2sh_visitor.g_ptr = g_ptr_src + (h_start * param.wi + w_start) * param.n;
filter_gl2sh_visitor.g_ptr = g_ptr_filter + (kh * fw + kw) * param.co;
src_gl2sh_visitor.first_copy();
filter_gl2sh_visitor.first_copy();
__syncthreads();
for (int h = h_start; h <= h_end; ++h) {
for (int w = w_start; w <= w_end; ++w) {
for (int ci_outer = 0; ci_outer < ci_blks; ci_outer++) {
if (ci_outer == ci_blks - 1) {
if (!(h == h_end && w == w_end)) {
int w_next = w == w_end ? w_start : w + 1;
int h_next = w == w_end ? h + 1 : h;
int kh = h_next - h_base;
int kw = w_next - w_base;
src_gl2sh_visitor.g_ptr =
g_ptr_src + (h_next * param.wi + w_next) * param.n;
filter_gl2sh_visitor.g_ptr =
g_ptr_filter + (kh * fw + kw) * param.co;
src_gl2sh_visitor.copy();
filter_gl2sh_visitor.copy();
}
} else {
src_gl2sh_visitor.move_forward();
filter_gl2sh_visitor.move_forward();
src_gl2sh_visitor.copy();
filter_gl2sh_visitor.copy();
}
consume_block<check_bounds, UnrollConfig, ThreadConfig>(
src_gl2sh_visitor, filter_gl2sh_visitor, r_src, r_filter,
r_acc);
if (!(ci_outer == ci_blks - 1 && h == h_end && w == w_end)) {
__syncthreads();
src_gl2sh_visitor.commit();
filter_gl2sh_visitor.commit();
__syncthreads();
}
}
}
}
const int co_stride = ho * wo * param.n;
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_co; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_n; ++j) {
if (check_bounds && (t_co + i * ThreadConfig::nr_thread_y >= param.co ||
t_batch + j * ThreadConfig::nr_thread_x >= param.n)) {
} else {
g_ptr_dst
[i * ThreadConfig::nr_thread_y * co_stride +
j * ThreadConfig::nr_thread_x] = r_acc[i][j];
}
}
}
}
void (*get_kern(const Param& param, LaunchConfig& launch_config))(
const float* __restrict__, const float* __restrict__, float* __restrict__,
Param, int, int, int, int) {
void (*kern)(
const float* __restrict__, const float* __restrict__, float* __restrict__,
Param, int, int, int, int);
kern = nullptr;
#define CHK3(n_, co_, ci_, tx_, ty_) \
if (param.n >= n_) { \
if (param.co >= co_) { \
if (param.ci % ci_ == 0) { \
static constexpr int unroll_ci = (ci_); \
static constexpr int unroll_co = (co_ + ty_ - 1) / ty_; \
static constexpr int unroll_n = (n_ + tx_ - 1) / tx_; \
static constexpr int thread_x = tx_; \
static constexpr int thread_y = ty_; \
typedef UnrollConfig<unroll_ci, unroll_co, unroll_n> UnrollConfig; \
typedef ThreadConfig<thread_x, thread_y> ThreadConfig; \
typedef DataTileCount<UnrollConfig, ThreadConfig> DataTileCount; \
typedef FilterTileCount<UnrollConfig, ThreadConfig> FilterTileCount; \
kern = local_share_device_template_f32< \
true, UnrollConfig, ThreadConfig>; \
launch_config.nr_threads_x = thread_x; \
launch_config.nr_threads_y = thread_y; \
launch_config.nr_threads_z = 1; \
launch_config.nr_blocks_x = \
param.grp_ho * param.grp_wo * param.sgh * param.sgw; \
launch_config.nr_blocks_y = DIVUP(param.n, DataTileCount::tile_batch); \
launch_config.nr_blocks_z = DIVUP(param.co, FilterTileCount::tile_co); \
launch_config.smem_size_in_bytes = \
sizeof(float) * \
(DataTileCount::smem_tot + FilterTileCount::smem_tot); \
} \
} \
}
#define CHK2(n_, co_) \
CHK3(n_, co_, 4, 8, 16) \
CHK3(n_, co_, 8, 8, 16)
#define CHK2_(n_, co_) \
CHK3(n_, co_, 4, 8, 8) \
CHK3(n_, co_, 8, 8, 8)
#define CHK(n_) \
CHK2_(n_, 1) \
CHK2_(n_, 8) CHK2_(n_, 16) CHK2_(n_, 32) CHK2_(n_, 64) CHK2(n_, 128)
CHK(1)
CHK(8);
CHK(16);
CHK(32);
CHK(64);
#undef CHK
#undef CHK2
#undef CHK2_
#undef CHK3
#define CHK3(n_, co_, ci_, tx_, ty_) \
if (param.n % n_ == 0) { \
if (param.co % co_ == 0) { \
if (param.ci % ci_ == 0) { \
static constexpr int unroll_ci = (ci_); \
static constexpr int unroll_co = (co_) / (ty_); \
static constexpr int unroll_n = (n_) / (tx_); \
static constexpr int thread_x = tx_; \
static constexpr int thread_y = ty_; \
typedef UnrollConfig<unroll_ci, unroll_co, unroll_n> UnrollConfig; \
typedef ThreadConfig<thread_x, thread_y> ThreadConfig; \
typedef DataTileCount<UnrollConfig, ThreadConfig> DataTileCount; \
typedef FilterTileCount<UnrollConfig, ThreadConfig> FilterTileCount; \
kern = local_share_device_template_f32< \
false, UnrollConfig, ThreadConfig>; \
launch_config.nr_threads_x = thread_x; \
launch_config.nr_threads_y = thread_y; \
launch_config.nr_threads_z = 1; \
launch_config.nr_blocks_x = \
param.grp_ho * param.grp_wo * param.sgh * param.sgw; \
launch_config.nr_blocks_y = DIVUP(param.n, DataTileCount::tile_batch); \
launch_config.nr_blocks_z = DIVUP(param.co, FilterTileCount::tile_co); \
launch_config.smem_size_in_bytes = \
sizeof(float) * \
(DataTileCount::smem_tot + FilterTileCount::smem_tot); \
} \
} \
}
#define CHK2(n_, co_) CHK3(n_, co_, 4, 8, 8) CHK3(n_, co_, 8, 8, 8)
#define CHK(n_) \
CHK2(n_, 8) \
CHK2(n_, 16) \
CHK2(n_, 32) CHK2(n_, 64) CHK3(n_, 128, 4, 8, 16) CHK3(n_, 128, 8, 8, 16)
CHK(8);
CHK(16);
CHK(32);
CHK(64);
#undef CHK
#undef CHK2
#undef CHK3
megdnn_assert(
kern != nullptr,
"no usable kernel implementation for local share "
"convolution (batch,co,ci)=(%d,%d,%d)",
param.n, param.co, param.ci);
return kern;
}
} // namespace
void megdnn::cuda::local_share::
_do_local_share_convolution_large_batch_size_small_image(
const float* d_src, const float* d_filter, float* d_dst,
float* workspace, int fh, int fw, int sh, int sw, const Param& param,
hipblasHandle_t cublas_handle, hipStream_t stream, float* one,
float* zero) {
float* ws_src = workspace;
int nr_src_total = param.n * param.ci * param.hi * param.wi;
float* ws_dst = ws_src + nr_src_total;
// tensor reformat from (n, c, h, w) -> (c, h, w, n)
{
int m = param.n, n = param.ci * param.hi * param.wi;
int lda, ldb;
lda = ldb = param.ci * param.hi * param.wi;
int ldc = param.n;
cublas_check(hipblasSgeam(
cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_T, m, n, one, d_src, lda, zero,
d_src, ldb, ws_src, ldc));
}
{
void (*kern)(
const float* __restrict__, const float* __restrict__,
float* __restrict__, Param, int, int, int, int);
LaunchConfig launch_config;
kern = get_kern(param, launch_config);
uint32_t nr_threads_x = launch_config.nr_threads_x,
nr_threads_y = launch_config.nr_threads_y,
nr_blocks_x = launch_config.nr_blocks_x,
nr_blocks_y = launch_config.nr_blocks_y,
nr_blocks_z = launch_config.nr_blocks_z,
smem_size_in_bytes = launch_config.smem_size_in_bytes;
_check_launch_config(launch_config);
dim3 block_size{nr_threads_x, nr_threads_y, 1};
dim3 grid_size{nr_blocks_x, nr_blocks_y, nr_blocks_z};
hipLaunchKernelGGL(( kern), dim3(grid_size), dim3(block_size), smem_size_in_bytes, stream,
ws_src, d_filter, ws_dst, param, fh, fw, sh, sw);
after_kernel_launch();
}
// tensor reformat form (c, h, w, n) -> (n, c, h, w)
{
int ho = param.grp_ho * param.sgh, wo = param.grp_wo * param.sgw;
int m = param.co * ho * wo, n = param.n;
int lda, ldb;
lda = ldb = param.n;
int ldc = param.co * ho * wo;
cublas_check(hipblasSgeam(
cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_T, m, n, one, ws_dst, lda, zero,
ws_dst, ldb, d_dst, ldc));
}
}
#include "src/cuda/kernel_common/diagnostic_epilogue.cuh"
// vim: syntax=cuda.doxygen
| 02825a3c9d01d6235ceb251b6aa8c1e9113c59e4.cu | #include "./local_share_forward.cuh"
#include "src/cuda/kernel_common/diagnostic_prologue.cuh"
using namespace megdnn;
using namespace cuda;
using namespace local_share;
namespace {
template <int unroll_ci_, int unroll_co_, int unroll_n_>
struct UnrollConfig {
static int const unroll_ci = unroll_ci_;
static int const unroll_co = unroll_co_;
static int const unroll_n = unroll_n_;
};
template <int thread_x, int thread_y>
struct ThreadConfig {
static int const nr_thread_x = thread_x;
static int const nr_thread_y = thread_y;
static int const nr_threads = nr_thread_x * nr_thread_y;
};
template <typename UnrollConfig_, typename ThreadConfig_>
struct DataTileCount {
typedef UnrollConfig_ UnrollConfig;
typedef ThreadConfig_ ThreadConfig;
static int const tile_batch = UnrollConfig::unroll_n * ThreadConfig::nr_thread_x;
static int const load_x = tile_batch > 32 ? 32 : tile_batch;
static int const load_y = ThreadConfig::nr_threads / load_x;
static int const smem_h = UnrollConfig::unroll_ci;
static int const smem_w = tile_batch;
static int const smem_stride = smem_w;
static int const smem_tot = smem_h * smem_stride;
static int const reg_row = (smem_h + load_y - 1) / load_y;
static int const reg_col = (smem_w + load_x - 1) / load_x;
static bool const check_sh_bounds = smem_w % load_x != 0;
};
template <typename UnrollConfig_, typename ThreadConfig_>
struct FilterTileCount {
typedef UnrollConfig_ UnrollConfig;
typedef ThreadConfig_ ThreadConfig;
static int const tile_co = ThreadConfig::nr_thread_y * UnrollConfig::unroll_co;
static int const smem_h = UnrollConfig::unroll_ci;
static int const smem_w = tile_co;
static int const smem_stride = smem_w + 1;
static int const smem_tot = smem_h * smem_stride;
static int const load_x = tile_co > 32 ? 32 : tile_co;
static int const load_y = ThreadConfig::nr_threads / load_x;
static int const reg_row = (smem_h + load_y - 1) / load_y;
static int const reg_col = (smem_w + load_x - 1) / load_x;
static bool const check_sh_bounds = smem_w % load_x != 0;
};
template <bool check_bounds, typename UnrollConfig, typename ThreadConfig>
struct DataGlobal2ShareMemVisitor {
typedef DataTileCount<UnrollConfig, ThreadConfig> TileCount;
typedef float copy_t;
float* smem;
const copy_t* g_ptr;
int stride;
int remain;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int tid = tidy * ThreadConfig::nr_thread_x + tidx;
const int gl_load_y = tid / TileCount::load_x;
const int gl_load_x = tid - gl_load_y * TileCount::load_x;
copy_t reg[TileCount::reg_row][TileCount::reg_col];
__device__ DataGlobal2ShareMemVisitor(copy_t* smem, int stride, int remain)
: smem{smem}, stride{stride}, remain{remain} {}
__device__ __forceinline__ void first_copy() {
#pragma unroll
for (int i = 0; i < TileCount::reg_row; ++i) {
int h_idx = gl_load_y + i * TileCount::load_y;
#pragma unrol
for (int j = 0; j < TileCount::reg_col; ++j) {
int w_idx = gl_load_x + j * TileCount::load_x;
if (h_idx >= TileCount::smem_h)
continue;
if (TileCount::check_sh_bounds && w_idx >= TileCount::smem_w)
continue;
if (check_bounds) {
copy_t val = 0.f;
if (w_idx < remain) {
val = g_ptr[h_idx * stride + w_idx];
}
*(sh_ptr(h_idx, w_idx)) = val;
} else {
*(sh_ptr(h_idx, w_idx)) = g_ptr[h_idx * stride + w_idx];
}
}
}
}
__device__ __forceinline__ void copy() {
#pragma unroll
for (int i = 0; i < TileCount::reg_row; ++i) {
int h_idx = gl_load_y + i * TileCount::load_y;
#pragma unrol
for (int j = 0; j < TileCount::reg_col; ++j) {
int w_idx = gl_load_x + j * TileCount::load_x;
if (h_idx >= TileCount::smem_h)
continue;
if (TileCount::check_sh_bounds && w_idx >= TileCount::smem_w)
continue;
if (check_bounds) {
copy_t val = 0.f;
if (w_idx < remain) {
val = g_ptr[h_idx * stride + w_idx];
}
reg[i][j] = val;
} else {
reg[i][j] = g_ptr[h_idx * stride + w_idx];
}
}
}
}
__device__ __forceinline__ void commit() {
#pragma unroll
for (int i = 0; i < TileCount::reg_row; ++i) {
int h_idx = gl_load_y + i * TileCount::load_y;
#pragma unrol
for (int j = 0; j < TileCount::reg_col; ++j) {
int w_idx = gl_load_x + j * TileCount::load_x;
if (h_idx >= TileCount::smem_h)
continue;
if (TileCount::check_sh_bounds && w_idx >= TileCount::smem_w)
continue;
*(sh_ptr(h_idx, w_idx)) = reg[i][j];
}
}
}
__device__ __forceinline__ float* sh_ptr(int y, int x) {
return &smem[y * TileCount::smem_stride + x];
}
__device__ __forceinline__ void move_forward() {
g_ptr += UnrollConfig::unroll_ci * stride;
}
};
template <bool check_bounds, typename UnrollConfig, typename ThreadConfig>
struct FilterGlobal2ShareMemVisitor {
typedef float copy_t;
typedef FilterTileCount<UnrollConfig, ThreadConfig> TileCount;
float* smem;
const copy_t* g_ptr;
int stride;
int remain;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int tid = tidy * ThreadConfig::nr_thread_x + tidx;
const int gl_load_y = tid / TileCount::load_x;
const int gl_load_x = tid - gl_load_y * TileCount::load_x;
copy_t reg[TileCount::reg_row][TileCount::reg_col];
__device__ FilterGlobal2ShareMemVisitor(copy_t* smem, int stride, int remain)
: smem{smem}, stride{stride}, remain{remain} {}
__device__ __forceinline__ void first_copy() {
#pragma unroll
for (int i = 0; i < TileCount::reg_row; ++i) {
int h_idx = gl_load_y + i * TileCount::load_y;
#pragma unrol
for (int j = 0; j < TileCount::reg_col; ++j) {
int w_idx = gl_load_x + j * TileCount::load_x;
if (h_idx >= TileCount::smem_h)
continue;
if (TileCount::check_sh_bounds && w_idx >= TileCount::smem_w)
continue;
if (check_bounds) {
copy_t val = 0.f;
if (w_idx < remain) {
val = g_ptr[h_idx * stride + w_idx];
}
*(sh_ptr(h_idx, w_idx)) = val;
} else {
*(sh_ptr(h_idx, w_idx)) = g_ptr[h_idx * stride + w_idx];
}
}
}
}
__device__ __forceinline__ void copy() {
#pragma unroll
for (int i = 0; i < TileCount::reg_row; ++i) {
int h_idx = gl_load_y + i * TileCount::load_y;
#pragma unrol
for (int j = 0; j < TileCount::reg_col; ++j) {
int w_idx = gl_load_x + j * TileCount::load_x;
if (h_idx >= TileCount::smem_h)
continue;
if (TileCount::check_sh_bounds && w_idx >= TileCount::smem_w)
continue;
if (check_bounds) {
copy_t val = 0.f;
if (w_idx < remain) {
val = g_ptr[h_idx * stride + w_idx];
}
reg[i][j] = val;
} else {
reg[i][j] = g_ptr[h_idx * stride + w_idx];
}
}
}
}
__device__ __forceinline__ void commit() {
#pragma unroll
for (int i = 0; i < TileCount::reg_row; ++i) {
int h_idx = gl_load_y + i * TileCount::load_y;
#pragma unrol
for (int j = 0; j < TileCount::reg_col; ++j) {
int w_idx = gl_load_x + j * TileCount::load_x;
if (h_idx >= TileCount::smem_h)
continue;
if (TileCount::check_sh_bounds && w_idx >= TileCount::smem_w)
continue;
*(sh_ptr(h_idx, w_idx)) = reg[i][j];
}
}
}
__device__ __forceinline__ float* sh_ptr(int y, int x) {
return &smem[y * TileCount::smem_stride + x];
}
__device__ __forceinline__ void move_forward() {
g_ptr += UnrollConfig::unroll_ci * stride;
}
};
template <bool check_bounds, typename UnrollConfig, typename ThreadConfig>
__device__ __forceinline__ void consume_block(
DataGlobal2ShareMemVisitor<check_bounds, UnrollConfig, ThreadConfig>&
data_gl2sh_visitor,
FilterGlobal2ShareMemVisitor<check_bounds, UnrollConfig, ThreadConfig>&
filter_gl2sh_visitor,
float r_src[UnrollConfig::unroll_n], float r_filter[UnrollConfig::unroll_co],
float r_acc[UnrollConfig::unroll_co][UnrollConfig::unroll_n]) {
typedef DataTileCount<UnrollConfig, ThreadConfig> DataTileCount;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
#pragma unroll
for (int ci_inner = 0; ci_inner < UnrollConfig::unroll_ci; ++ci_inner) {
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_n; ++i) {
r_src[i] = *(data_gl2sh_visitor.sh_ptr(
ci_inner, tidx + i * ThreadConfig::nr_thread_x));
}
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_co; ++j) {
r_filter[j] = *(filter_gl2sh_visitor.sh_ptr(
ci_inner, tidy + j * ThreadConfig::nr_thread_y));
}
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_co; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_n; ++j) {
r_acc[i][j] += r_src[j] * r_filter[i];
}
}
}
}
template <bool check_bounds, typename UnrollConfig, typename ThreadConfig>
__global__ void local_share_device_template_f32(
const float* __restrict__ src, const float* __restrict__ filter,
float* __restrict__ dst, Param param, int fh, int fw, int sh, int sw) {
typedef DataTileCount<UnrollConfig, ThreadConfig> DataTileCount;
typedef FilterTileCount<UnrollConfig, ThreadConfig> FilterTileCount;
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int bidx = blockIdx.x;
const int bidy = blockIdx.y;
const int bidz = blockIdx.z;
const int ho = param.sgh * param.grp_ho;
const int wo = param.sgw * param.grp_wo;
const int b_ho = bidx / wo;
const int b_wo = bidx - wo * b_ho;
const int sgh_idx = b_ho / param.grp_ho;
const int sgw_idx = b_wo / param.grp_wo;
const int b_batch = bidy * DataTileCount::tile_batch;
const int b_co = bidz * FilterTileCount::tile_co;
const int t_batch = tidx + b_batch;
const int t_co = tidy + b_co;
extern __shared__ float smem[];
float* sh_src = smem;
float* sh_filter = smem + DataTileCount::smem_tot;
const float* __restrict__ g_ptr_src = src + b_batch;
const float* __restrict__ g_ptr_filter = filter + b_co + // output channel
(sgh_idx * param.sgw + sgw_idx) *
param.co * param.ci * fh *
fw; // spatial group
float* __restrict__ g_ptr_dst = dst +
t_co * ho * wo * param.n // output channel stride+
+ (b_ho * wo + b_wo) * param.n // spatial stride
+ t_batch;
// TODO check register
DataGlobal2ShareMemVisitor<check_bounds, UnrollConfig, ThreadConfig>
src_gl2sh_visitor{sh_src, param.hi * param.wi * param.n, param.n - b_batch};
FilterGlobal2ShareMemVisitor<check_bounds, UnrollConfig, ThreadConfig>
filter_gl2sh_visitor{sh_filter, param.co * fh * fw, param.co - b_co};
float r_src[UnrollConfig::unroll_n];
float r_filter[UnrollConfig::unroll_co];
float r_acc[UnrollConfig::unroll_co][UnrollConfig::unroll_n];
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_co; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_n; ++j) {
r_acc[i][j] = 0;
}
}
int h_base = b_ho * sh - param.ph;
int w_base = b_wo * sw - param.pw;
int h_start = h_base >= 0 ? h_base : 0;
int w_start = w_base >= 0 ? w_base : 0;
int h_end = h_base + fh - 1;
int w_end = w_base + fw - 1;
h_end = h_end < param.hi ? h_end : param.hi - 1;
w_end = w_end < param.wi ? w_end : param.wi - 1;
const int ci_blks =
(param.ci + UnrollConfig::unroll_ci - 1) / UnrollConfig::unroll_ci;
int kh = h_start - h_base;
int kw = w_start - w_base;
src_gl2sh_visitor.g_ptr = g_ptr_src + (h_start * param.wi + w_start) * param.n;
filter_gl2sh_visitor.g_ptr = g_ptr_filter + (kh * fw + kw) * param.co;
src_gl2sh_visitor.first_copy();
filter_gl2sh_visitor.first_copy();
__syncthreads();
for (int h = h_start; h <= h_end; ++h) {
for (int w = w_start; w <= w_end; ++w) {
for (int ci_outer = 0; ci_outer < ci_blks; ci_outer++) {
if (ci_outer == ci_blks - 1) {
if (!(h == h_end && w == w_end)) {
int w_next = w == w_end ? w_start : w + 1;
int h_next = w == w_end ? h + 1 : h;
int kh = h_next - h_base;
int kw = w_next - w_base;
src_gl2sh_visitor.g_ptr =
g_ptr_src + (h_next * param.wi + w_next) * param.n;
filter_gl2sh_visitor.g_ptr =
g_ptr_filter + (kh * fw + kw) * param.co;
src_gl2sh_visitor.copy();
filter_gl2sh_visitor.copy();
}
} else {
src_gl2sh_visitor.move_forward();
filter_gl2sh_visitor.move_forward();
src_gl2sh_visitor.copy();
filter_gl2sh_visitor.copy();
}
consume_block<check_bounds, UnrollConfig, ThreadConfig>(
src_gl2sh_visitor, filter_gl2sh_visitor, r_src, r_filter,
r_acc);
if (!(ci_outer == ci_blks - 1 && h == h_end && w == w_end)) {
__syncthreads();
src_gl2sh_visitor.commit();
filter_gl2sh_visitor.commit();
__syncthreads();
}
}
}
}
const int co_stride = ho * wo * param.n;
#pragma unroll
for (int i = 0; i < UnrollConfig::unroll_co; ++i) {
#pragma unroll
for (int j = 0; j < UnrollConfig::unroll_n; ++j) {
if (check_bounds && (t_co + i * ThreadConfig::nr_thread_y >= param.co ||
t_batch + j * ThreadConfig::nr_thread_x >= param.n)) {
} else {
g_ptr_dst
[i * ThreadConfig::nr_thread_y * co_stride +
j * ThreadConfig::nr_thread_x] = r_acc[i][j];
}
}
}
}
void (*get_kern(const Param& param, LaunchConfig& launch_config))(
const float* __restrict__, const float* __restrict__, float* __restrict__,
Param, int, int, int, int) {
void (*kern)(
const float* __restrict__, const float* __restrict__, float* __restrict__,
Param, int, int, int, int);
kern = nullptr;
#define CHK3(n_, co_, ci_, tx_, ty_) \
if (param.n >= n_) { \
if (param.co >= co_) { \
if (param.ci % ci_ == 0) { \
static constexpr int unroll_ci = (ci_); \
static constexpr int unroll_co = (co_ + ty_ - 1) / ty_; \
static constexpr int unroll_n = (n_ + tx_ - 1) / tx_; \
static constexpr int thread_x = tx_; \
static constexpr int thread_y = ty_; \
typedef UnrollConfig<unroll_ci, unroll_co, unroll_n> UnrollConfig; \
typedef ThreadConfig<thread_x, thread_y> ThreadConfig; \
typedef DataTileCount<UnrollConfig, ThreadConfig> DataTileCount; \
typedef FilterTileCount<UnrollConfig, ThreadConfig> FilterTileCount; \
kern = local_share_device_template_f32< \
true, UnrollConfig, ThreadConfig>; \
launch_config.nr_threads_x = thread_x; \
launch_config.nr_threads_y = thread_y; \
launch_config.nr_threads_z = 1; \
launch_config.nr_blocks_x = \
param.grp_ho * param.grp_wo * param.sgh * param.sgw; \
launch_config.nr_blocks_y = DIVUP(param.n, DataTileCount::tile_batch); \
launch_config.nr_blocks_z = DIVUP(param.co, FilterTileCount::tile_co); \
launch_config.smem_size_in_bytes = \
sizeof(float) * \
(DataTileCount::smem_tot + FilterTileCount::smem_tot); \
} \
} \
}
#define CHK2(n_, co_) \
CHK3(n_, co_, 4, 8, 16) \
CHK3(n_, co_, 8, 8, 16)
#define CHK2_(n_, co_) \
CHK3(n_, co_, 4, 8, 8) \
CHK3(n_, co_, 8, 8, 8)
#define CHK(n_) \
CHK2_(n_, 1) \
CHK2_(n_, 8) CHK2_(n_, 16) CHK2_(n_, 32) CHK2_(n_, 64) CHK2(n_, 128)
CHK(1)
CHK(8);
CHK(16);
CHK(32);
CHK(64);
#undef CHK
#undef CHK2
#undef CHK2_
#undef CHK3
#define CHK3(n_, co_, ci_, tx_, ty_) \
if (param.n % n_ == 0) { \
if (param.co % co_ == 0) { \
if (param.ci % ci_ == 0) { \
static constexpr int unroll_ci = (ci_); \
static constexpr int unroll_co = (co_) / (ty_); \
static constexpr int unroll_n = (n_) / (tx_); \
static constexpr int thread_x = tx_; \
static constexpr int thread_y = ty_; \
typedef UnrollConfig<unroll_ci, unroll_co, unroll_n> UnrollConfig; \
typedef ThreadConfig<thread_x, thread_y> ThreadConfig; \
typedef DataTileCount<UnrollConfig, ThreadConfig> DataTileCount; \
typedef FilterTileCount<UnrollConfig, ThreadConfig> FilterTileCount; \
kern = local_share_device_template_f32< \
false, UnrollConfig, ThreadConfig>; \
launch_config.nr_threads_x = thread_x; \
launch_config.nr_threads_y = thread_y; \
launch_config.nr_threads_z = 1; \
launch_config.nr_blocks_x = \
param.grp_ho * param.grp_wo * param.sgh * param.sgw; \
launch_config.nr_blocks_y = DIVUP(param.n, DataTileCount::tile_batch); \
launch_config.nr_blocks_z = DIVUP(param.co, FilterTileCount::tile_co); \
launch_config.smem_size_in_bytes = \
sizeof(float) * \
(DataTileCount::smem_tot + FilterTileCount::smem_tot); \
} \
} \
}
#define CHK2(n_, co_) CHK3(n_, co_, 4, 8, 8) CHK3(n_, co_, 8, 8, 8)
#define CHK(n_) \
CHK2(n_, 8) \
CHK2(n_, 16) \
CHK2(n_, 32) CHK2(n_, 64) CHK3(n_, 128, 4, 8, 16) CHK3(n_, 128, 8, 8, 16)
CHK(8);
CHK(16);
CHK(32);
CHK(64);
#undef CHK
#undef CHK2
#undef CHK3
megdnn_assert(
kern != nullptr,
"no usable kernel implementation for local share "
"convolution (batch,co,ci)=(%d,%d,%d)",
param.n, param.co, param.ci);
return kern;
}
} // namespace
void megdnn::cuda::local_share::
_do_local_share_convolution_large_batch_size_small_image(
const float* d_src, const float* d_filter, float* d_dst,
float* workspace, int fh, int fw, int sh, int sw, const Param& param,
cublasHandle_t cublas_handle, cudaStream_t stream, float* one,
float* zero) {
float* ws_src = workspace;
int nr_src_total = param.n * param.ci * param.hi * param.wi;
float* ws_dst = ws_src + nr_src_total;
// tensor reformat from (n, c, h, w) -> (c, h, w, n)
{
int m = param.n, n = param.ci * param.hi * param.wi;
int lda, ldb;
lda = ldb = param.ci * param.hi * param.wi;
int ldc = param.n;
cublas_check(cublasSgeam(
cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T, m, n, one, d_src, lda, zero,
d_src, ldb, ws_src, ldc));
}
{
void (*kern)(
const float* __restrict__, const float* __restrict__,
float* __restrict__, Param, int, int, int, int);
LaunchConfig launch_config;
kern = get_kern(param, launch_config);
uint32_t nr_threads_x = launch_config.nr_threads_x,
nr_threads_y = launch_config.nr_threads_y,
nr_blocks_x = launch_config.nr_blocks_x,
nr_blocks_y = launch_config.nr_blocks_y,
nr_blocks_z = launch_config.nr_blocks_z,
smem_size_in_bytes = launch_config.smem_size_in_bytes;
_check_launch_config(launch_config);
dim3 block_size{nr_threads_x, nr_threads_y, 1};
dim3 grid_size{nr_blocks_x, nr_blocks_y, nr_blocks_z};
kern<<<grid_size, block_size, smem_size_in_bytes, stream>>>(
ws_src, d_filter, ws_dst, param, fh, fw, sh, sw);
after_kernel_launch();
}
// tensor reformat form (c, h, w, n) -> (n, c, h, w)
{
int ho = param.grp_ho * param.sgh, wo = param.grp_wo * param.sgw;
int m = param.co * ho * wo, n = param.n;
int lda, ldb;
lda = ldb = param.n;
int ldc = param.co * ho * wo;
cublas_check(cublasSgeam(
cublas_handle, CUBLAS_OP_T, CUBLAS_OP_T, m, n, one, ws_dst, lda, zero,
ws_dst, ldb, d_dst, ldc));
}
}
#include "src/cuda/kernel_common/diagnostic_epilogue.cuh"
// vim: syntax=cuda.doxygen
|
3c29f789191201f945c2e1d09ae3771c9892154b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_ideal_gas_kernel;
int xdim0_ideal_gas_kernel_h = -1;
__constant__ int ydim0_ideal_gas_kernel;
int ydim0_ideal_gas_kernel_h = -1;
__constant__ int xdim1_ideal_gas_kernel;
int xdim1_ideal_gas_kernel_h = -1;
__constant__ int ydim1_ideal_gas_kernel;
int ydim1_ideal_gas_kernel_h = -1;
__constant__ int xdim2_ideal_gas_kernel;
int xdim2_ideal_gas_kernel_h = -1;
__constant__ int ydim2_ideal_gas_kernel;
int ydim2_ideal_gas_kernel_h = -1;
__constant__ int xdim3_ideal_gas_kernel;
int xdim3_ideal_gas_kernel_h = -1;
__constant__ int ydim3_ideal_gas_kernel;
int ydim3_ideal_gas_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x, y, z) \
(x + xdim0_ideal_gas_kernel * (y) + \
xdim0_ideal_gas_kernel * ydim0_ideal_gas_kernel * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_ideal_gas_kernel * (y) + \
xdim1_ideal_gas_kernel * ydim1_ideal_gas_kernel * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_ideal_gas_kernel * (y) + \
xdim2_ideal_gas_kernel * ydim2_ideal_gas_kernel * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_ideal_gas_kernel * (y) + \
xdim3_ideal_gas_kernel * ydim3_ideal_gas_kernel * (z))
// user function
__device__
void
ideal_gas_kernel_gpu(const double *density, const double *energy,
double *pressure, double *soundspeed) {
double sound_speed_squared, v, pressurebyenergy, pressurebyvolume;
v = 1.0 / density[OPS_ACC0(0, 0, 0)];
pressure[OPS_ACC2(0, 0, 0)] =
(1.4 - 1.0) * density[OPS_ACC0(0, 0, 0)] * energy[OPS_ACC1(0, 0, 0)];
pressurebyenergy = (1.4 - 1.0) * density[OPS_ACC0(0, 0, 0)];
pressurebyvolume =
-1.0 * density[OPS_ACC0(0, 0, 0)] * pressure[OPS_ACC2(0, 0, 0)];
sound_speed_squared =
v * v *
(pressure[OPS_ACC2(0, 0, 0)] * pressurebyenergy - pressurebyvolume);
soundspeed[OPS_ACC3(0, 0, 0)] = sqrt(sound_speed_squared);
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_ideal_gas_kernel(const double *__restrict arg0,
const double *__restrict arg1,
double *__restrict arg2,
double *__restrict arg3, int size0,
int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_ideal_gas_kernel +
idx_z * 1 * 1 * xdim0_ideal_gas_kernel * ydim0_ideal_gas_kernel;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_ideal_gas_kernel +
idx_z * 1 * 1 * xdim1_ideal_gas_kernel * ydim1_ideal_gas_kernel;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_ideal_gas_kernel +
idx_z * 1 * 1 * xdim2_ideal_gas_kernel * ydim2_ideal_gas_kernel;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_ideal_gas_kernel +
idx_z * 1 * 1 * xdim3_ideal_gas_kernel * ydim3_ideal_gas_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ideal_gas_kernel_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_ideal_gas_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 4, range, 11))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(11, "ideal_gas_kernel");
OPS_kernels[11].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != xdim0_ideal_gas_kernel_h || ydim0 != ydim0_ideal_gas_kernel_h ||
xdim1 != xdim1_ideal_gas_kernel_h || ydim1 != ydim1_ideal_gas_kernel_h ||
xdim2 != xdim2_ideal_gas_kernel_h || ydim2 != ydim2_ideal_gas_kernel_h ||
xdim3 != xdim3_ideal_gas_kernel_h || ydim3 != ydim3_ideal_gas_kernel_h) {
hipMemcpyToSymbol(xdim0_ideal_gas_kernel, &xdim0, sizeof(int));
xdim0_ideal_gas_kernel_h = xdim0;
hipMemcpyToSymbol(ydim0_ideal_gas_kernel, &ydim0, sizeof(int));
ydim0_ideal_gas_kernel_h = ydim0;
hipMemcpyToSymbol(xdim1_ideal_gas_kernel, &xdim1, sizeof(int));
xdim1_ideal_gas_kernel_h = xdim1;
hipMemcpyToSymbol(ydim1_ideal_gas_kernel, &ydim1, sizeof(int));
ydim1_ideal_gas_kernel_h = ydim1;
hipMemcpyToSymbol(xdim2_ideal_gas_kernel, &xdim2, sizeof(int));
xdim2_ideal_gas_kernel_h = xdim2;
hipMemcpyToSymbol(ydim2_ideal_gas_kernel, &ydim2, sizeof(int));
ydim2_ideal_gas_kernel_h = ydim2;
hipMemcpyToSymbol(xdim3_ideal_gas_kernel, &xdim3, sizeof(int));
xdim3_ideal_gas_kernel_h = xdim3;
hipMemcpyToSymbol(ydim3_ideal_gas_kernel, &ydim3, sizeof(int));
ydim3_ideal_gas_kernel_h = ydim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[11].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_ideal_gas_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[11].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[11].mpi_time += t2 - t1;
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 11;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 11;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg *)malloc(4 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_ideal_gas_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(11, "ideal_gas_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| 3c29f789191201f945c2e1d09ae3771c9892154b.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_ideal_gas_kernel;
int xdim0_ideal_gas_kernel_h = -1;
__constant__ int ydim0_ideal_gas_kernel;
int ydim0_ideal_gas_kernel_h = -1;
__constant__ int xdim1_ideal_gas_kernel;
int xdim1_ideal_gas_kernel_h = -1;
__constant__ int ydim1_ideal_gas_kernel;
int ydim1_ideal_gas_kernel_h = -1;
__constant__ int xdim2_ideal_gas_kernel;
int xdim2_ideal_gas_kernel_h = -1;
__constant__ int ydim2_ideal_gas_kernel;
int ydim2_ideal_gas_kernel_h = -1;
__constant__ int xdim3_ideal_gas_kernel;
int xdim3_ideal_gas_kernel_h = -1;
__constant__ int ydim3_ideal_gas_kernel;
int ydim3_ideal_gas_kernel_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x, y, z) \
(x + xdim0_ideal_gas_kernel * (y) + \
xdim0_ideal_gas_kernel * ydim0_ideal_gas_kernel * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_ideal_gas_kernel * (y) + \
xdim1_ideal_gas_kernel * ydim1_ideal_gas_kernel * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_ideal_gas_kernel * (y) + \
xdim2_ideal_gas_kernel * ydim2_ideal_gas_kernel * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_ideal_gas_kernel * (y) + \
xdim3_ideal_gas_kernel * ydim3_ideal_gas_kernel * (z))
// user function
__device__
void
ideal_gas_kernel_gpu(const double *density, const double *energy,
double *pressure, double *soundspeed) {
double sound_speed_squared, v, pressurebyenergy, pressurebyvolume;
v = 1.0 / density[OPS_ACC0(0, 0, 0)];
pressure[OPS_ACC2(0, 0, 0)] =
(1.4 - 1.0) * density[OPS_ACC0(0, 0, 0)] * energy[OPS_ACC1(0, 0, 0)];
pressurebyenergy = (1.4 - 1.0) * density[OPS_ACC0(0, 0, 0)];
pressurebyvolume =
-1.0 * density[OPS_ACC0(0, 0, 0)] * pressure[OPS_ACC2(0, 0, 0)];
sound_speed_squared =
v * v *
(pressure[OPS_ACC2(0, 0, 0)] * pressurebyenergy - pressurebyvolume);
soundspeed[OPS_ACC3(0, 0, 0)] = sqrt(sound_speed_squared);
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_ideal_gas_kernel(const double *__restrict arg0,
const double *__restrict arg1,
double *__restrict arg2,
double *__restrict arg3, int size0,
int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_ideal_gas_kernel +
idx_z * 1 * 1 * xdim0_ideal_gas_kernel * ydim0_ideal_gas_kernel;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_ideal_gas_kernel +
idx_z * 1 * 1 * xdim1_ideal_gas_kernel * ydim1_ideal_gas_kernel;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_ideal_gas_kernel +
idx_z * 1 * 1 * xdim2_ideal_gas_kernel * ydim2_ideal_gas_kernel;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_ideal_gas_kernel +
idx_z * 1 * 1 * xdim3_ideal_gas_kernel * ydim3_ideal_gas_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ideal_gas_kernel_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_ideal_gas_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 4, range, 11))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(11, "ideal_gas_kernel");
OPS_kernels[11].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != xdim0_ideal_gas_kernel_h || ydim0 != ydim0_ideal_gas_kernel_h ||
xdim1 != xdim1_ideal_gas_kernel_h || ydim1 != ydim1_ideal_gas_kernel_h ||
xdim2 != xdim2_ideal_gas_kernel_h || ydim2 != ydim2_ideal_gas_kernel_h ||
xdim3 != xdim3_ideal_gas_kernel_h || ydim3 != ydim3_ideal_gas_kernel_h) {
cudaMemcpyToSymbol(xdim0_ideal_gas_kernel, &xdim0, sizeof(int));
xdim0_ideal_gas_kernel_h = xdim0;
cudaMemcpyToSymbol(ydim0_ideal_gas_kernel, &ydim0, sizeof(int));
ydim0_ideal_gas_kernel_h = ydim0;
cudaMemcpyToSymbol(xdim1_ideal_gas_kernel, &xdim1, sizeof(int));
xdim1_ideal_gas_kernel_h = xdim1;
cudaMemcpyToSymbol(ydim1_ideal_gas_kernel, &ydim1, sizeof(int));
ydim1_ideal_gas_kernel_h = ydim1;
cudaMemcpyToSymbol(xdim2_ideal_gas_kernel, &xdim2, sizeof(int));
xdim2_ideal_gas_kernel_h = xdim2;
cudaMemcpyToSymbol(ydim2_ideal_gas_kernel, &ydim2, sizeof(int));
ydim2_ideal_gas_kernel_h = ydim2;
cudaMemcpyToSymbol(xdim3_ideal_gas_kernel, &xdim3, sizeof(int));
xdim3_ideal_gas_kernel_h = xdim3;
cudaMemcpyToSymbol(ydim3_ideal_gas_kernel, &ydim3, sizeof(int));
ydim3_ideal_gas_kernel_h = ydim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[11].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_ideal_gas_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[11].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[11].mpi_time += t2 - t1;
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_ideal_gas_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 11;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 11;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg *)malloc(4 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_ideal_gas_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(11, "ideal_gas_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
0e1cf4367fbe9613d468516257fb1762a35916bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CudaVecDouble.h"
__global__ void kernel_vecDouble(int *in, int *out, const int n)
{
int i = threadIdx.x;
if (i < n) {
out[i] = in[i] * 2;
}
}
void vecDouble(int *hIn, int *hOut, const int n)
{
int *dIn;
int *dOut;
hipHostMalloc((void**)&dIn, n * sizeof(int));
hipHostMalloc((void**)&dOut, n * sizeof(int));
hipMemcpy(dIn, hIn, n * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_vecDouble), dim3(1), dim3(n), 0, 0, dIn, dOut, n);
hipDeviceSynchronize();
hipMemcpy(hOut, dOut, n * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dIn);
hipFree(dOut);
}
| 0e1cf4367fbe9613d468516257fb1762a35916bd.cu | #include "cuda_runtime.h"
#include "CudaVecDouble.h"
__global__ void kernel_vecDouble(int *in, int *out, const int n)
{
int i = threadIdx.x;
if (i < n) {
out[i] = in[i] * 2;
}
}
void vecDouble(int *hIn, int *hOut, const int n)
{
int *dIn;
int *dOut;
cudaMallocHost((void**)&dIn, n * sizeof(int));
cudaMallocHost((void**)&dOut, n * sizeof(int));
cudaMemcpy(dIn, hIn, n * sizeof(int), cudaMemcpyHostToDevice);
kernel_vecDouble<<<1, n>>>(dIn, dOut, n);
cudaDeviceSynchronize();
cudaMemcpy(hOut, dOut, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dIn);
cudaFree(dOut);
}
|
4c3ac69c9c45250413cb3fd1ee4867a519d38776.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Written by Xueyan Zou @TuSimple Algorithm Intern
*/
#include "./spatial-completion-inl.h"
#include <assert.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
namespace mshadow {
namespace cuda {
__device__ void get_gate_idx_xy(int h1, int w1, int h2, int w2, int* out, bool horizontal, bool reverse)
{
if(horizontal && ! reverse) // left -> right
{
if(w1>w2)
{
out[0]=h1;
out[1]=w1;
}
else
{
out[0]=h2;
out[1]=w2;
}
}
if(horizontal && reverse) // right -> left
{
if(w1<w2)
{
out[0]=h1;
out[1]=w1;
}
else
{
out[0]=h2;
out[1]=w2;
}
}
if(!horizontal && !reverse) // top -> bottom
{
if(h1>h2)
{
out[0]=h1;
out[1]=w1;
}
else
{
out[0]=h2;
out[1]=w2;
}
}
if(!horizontal && reverse) // bottom -> top
{
if(h1<h2)
{
out[0]=h1;
out[1]=w1;
}
else
{
out[0]=h2;
out[1]=w2;
}
}
}
template <typename Dtype>
__device__ void set_gate_xy(Dtype* data, int num, int channels, int height, int width, int n, int c, int h1, int w1, int h2, int w2, Dtype v, bool horizontal, bool reverse)
{
if(h1<0 || h1 >=height) //redundant
return ; //redundant
if(w1<0 || w1 >= width) //redundant
return ; //redundant
if(h2<0 || h2 >=height) //redundant
return ; //redundant
if(w2<0 || w2 >= width) //redundant
return ; //redundant
int idx[2];
get_gate_idx_xy(h1, w1, h2, w2, idx, horizontal, reverse);
int h = idx[0];
int w = idx[1];
data[n*channels*height*width + c*height*width + h*width + w] = v;
}
template <typename Dtype> //this function is modified by xueyan
__device__ Dtype get_gate_xy(Dtype * data, int num, int channels, int height, int width, int n, int c, int h1, int w1, int h2, int w2, bool horizontal, bool reverse){
//handle index out of range
if(h1<0 || h1 >=height) //redundant
return 0; //redundant
if(w1<0 || w1 >= width) //redundant
return 0; //redundant
if(h2<0 || h2 >=height)
return 0;
if(w2<0 || w2 >= width)
return 0;
int idx[2];
get_gate_idx_xy(h1, w1, h2, w2, idx, horizontal, reverse);
int h = idx[0];
int w = idx[1];
return data[n*channels*height*width + c*height*width + h*width + w];
}
template <typename Dtype>
__device__ void set_data_xy(Dtype * data, int num, int channels,int height, int width,int n,int c,int h,int w, Dtype v)
{
//modify by xueyan, assert error.
if(h<0 || h >=height)
assert(0);
if(w<0 || w >= width)
assert(0);
data[n*channels*height*width + c*height*width + h*width + w] = v;
}
template <typename Dtype>
__device__ Dtype get_data_xy(Dtype *data, int num, int channels, int height, int width, int n, int c, int h, int w){
//handle index out of range
if(h<0 || h >=height)
return 0;
if(w<0 || w >= width)
return 0;
//spatial-propagation-inl.h:82 -> default configuration of dim is (batch, channel, height, width)
return data[n*channels*height*width + c*height*width + h*width + w];
}
/*LEFT->RIGHT*/
/*h(t) = (1-sum(g_i(t))) * x_i(t) + sum(g_i(t) * h_i(t-1))*/
template <typename Dtype>
__global__ void forward_one_col_left_right(const int count, int T, int num, int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, Dtype* H, bool horizontal, bool reverse){
//count -> total number of threads; T -> current_row/current_column; num -> total num batch
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int hc_count = height * channels; //in order to calculate batch size
int n,c,h,w; //w->current_col; n->current_batch; c->current_channel;
int temp = index;
w = T;
n = temp / hc_count;
temp = temp % hc_count;
c = temp / height;
temp = temp % height;
h = temp;
//locate the pixel as (n,c,h,w);
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w); //x
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
//modify logic by xueyan
Dtype g_data_1 = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); //g_1(t)
Dtype h_minus1_data_1 = get_data_xy(H,num,channels,height,width,n,c,h-1,w-1); //h_1(t-1)
Dtype h1_minus1 = g_data_1 * h_minus1_data_1; //g_1(t)*h_1(t-1)
Dtype g_data_2 = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse); //g_2(t)
Dtype h_minus1_data_2 = get_data_xy(H,num,channels,height,width,n,c,h,w-1); //h_2(t-1)
Dtype h2_minus1 = g_data_2 * h_minus1_data_2; //g_2(t)*h_2(t-1)
Dtype g_data_3 = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); //g_3(t)
Dtype h_minus1_data_3 = get_data_xy(H,num,channels,height,width,n,c,h+1,w-1); //h_3(t-1)
Dtype h3_minus1 = g_data_3 * h_minus1_data_3; //g_3(t)*h_3(t-1)
Dtype h_hype = (h1_minus1 + h2_minus1 + h3_minus1) * (1 - c_data); //sum(g_i(t)*h_i(t-1)) * (1-c) = (g_1(t)*h_1(t-1)+g_2(t)*h_2(t-1)+g_3(t)*h_3(t-1))*(1-c)
Dtype x_hype = c_data * x_data; //c * x
Dtype h_data = x_hype + h_hype; //c * x_i(t) + sum(g_i(t) * h_i(t-1))*(1-c)
set_data_xy(H,num,channels,height,width,n,c,h,w,h_data); //set H data at point x
}
}
/*END h(t) = (1-sum(g_i(t))) * x_i(t) + sum(g_i(t) * h_i(t+1))*/
/*RIGHT->LEFT*/
/*h(t) = c * x_i(t) + (1-c) * sum(g_i(t) * h_i(t+1))*/
template <typename Dtype>
__global__ void forward_one_col_right_left(const int count, int T, int num, int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, Dtype* H, bool horizontal, bool reverse){
//count -> total number of threads; T -> current_row/current_column; num -> total num batch
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int hc_count = height * channels;
int n,c,h,w; //w->current_col; n->current_batch; c->current_channel;
int temp = index;
w = T;
n = temp / hc_count;
temp = temp % hc_count;
c = temp / height;
temp = temp % height;
h = temp;
//locate the pixel as (n,c,h,w);
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w); //x
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
//modify logic by xueyan
Dtype g_data_1 = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); //g_1(t)
Dtype h_minus1_data_1 = get_data_xy(H,num,channels,height,width,n,c,h-1,w+1); //h_1(t+1)
Dtype h1_minus1 = g_data_1 * h_minus1_data_1; //g_1(t)*h_1(t+1)
Dtype g_data_2 = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse); //g_2(t)
Dtype h_minus1_data_2 = get_data_xy(H,num,channels,height,width,n,c,h,w+1); //h_2(t+1)
Dtype h2_minus1 = g_data_2 * h_minus1_data_2; //g_2(t)*h_2(t+1)
Dtype g_data_3 = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); //g_3(t)
Dtype h_minus1_data_3 = get_data_xy(H,num,channels,height,width,n,c,h+1,w+1); //h_3(t+1)
Dtype h3_minus1 = g_data_3 * h_minus1_data_3; //g_3(t)*h_3(t+1)
Dtype h_hype = (1 - c_data) * (h1_minus1 + h2_minus1 + h3_minus1); //sum(g_i(t)*h_i(t+1)) * (1-c) = (1-c) * (g_1(t)*h_1(t+1)+g_2(t)*h_2(t+1)+g_3(t)*h_3(t+1))
Dtype x_hype = c_data * x_data; //c * x
Dtype h_data = x_hype + h_hype; //(1-sum(g_i(t))) * x_i(t) + sum(g_i(t) * h_i(t+1))
set_data_xy(H,num,channels,height,width,n,c,h,w,h_data); //set H data at point x
}
}
/*END h(t) = c * x_i(t) + (1-c) * sum(g_i(t) * h_i(t+1))*/
/*TOP->BOTTOM*/
/*h(t) = c * x_i(t) + (1-c) * sum(g_i(t) * h_i(t+1))*/
template <typename Dtype>
__global__ void forward_one_row_top_bottom(const int count, int T, int num, int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, Dtype* H, bool horizontal, bool reverse){
//count -> total number of threads; T -> current_row/current_column; num -> total num batch
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int wc_count = width * channels;
int n,c,h,w; //w->current_col; n->current_batch; c->current_channel;
int temp = index;
h = T;
n = temp / wc_count;
temp = temp % wc_count;
c = temp / width;
temp = temp % width;
w = temp;
//locate the pixel as (n,c,h,w);
//modify logic by xueyan
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w); //x
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
Dtype g_data_1 = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); //g_(t)1
Dtype h_minus1_data_1 = get_data_xy(H,num,channels,height,width,n,c,h-1,w-1); //h_(t-1)1
Dtype h1_minus1 = g_data_1 * h_minus1_data_1; //g_(t)1 * h_(t-1)1
Dtype g_data_2 = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse); //g_(t)2
Dtype h_minus1_data_2 = get_data_xy(H,num,channels,height,width,n,c,h-1,w); //h_(t-1)2
Dtype h2_minus1 = g_data_2 * h_minus1_data_2; //g_(t)2 * h_(t-1)2
Dtype g_data_3 = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); //g_(t)3
Dtype h_minus1_data_3 = get_data_xy(H,num,channels,height,width,n,c,h-1,w+1); //h_(t-1)3
Dtype h3_minus1 = g_data_3 * h_minus1_data_3; //g_(t)3 * h_(t-1)3
Dtype h_hype = (h1_minus1 + h2_minus1 + h3_minus1) * (1 - c_data); //(1-c)*(sum(g_(t)i * h_(t-1)i)) = (1-c) * (g_(t)1*h_(t-1)1+g_(t)2*h_(t-1)2+g_(t)3*h_(t-1)3)
Dtype x_hype = c_data * x_data; //c * x_(t)i
Dtype h_data = x_hype + h_hype; //(1-sum(g_(t)i)) * x_(t)i + sum(g_(t)i * h_(t-1)i)
set_data_xy(H,num,channels,height,width,n,c,h,w,h_data);
}
}
/*END h(t) = c * x_i(t) + (1-c) * sum(g_i(t) * h_i(t+1))*/
/*BOTTOM->TOP*/
/*h(t) = c * x_i(t) + (1-c) * sum(g_i(t) * h_i(t+1))*/
template <typename Dtype>
__global__ void forward_one_row_bottom_top(const int count, int T, int num, int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, Dtype* H, bool horizontal, bool reverse){
//count -> total number of threads; T -> current_row/current_column; num -> total num batch
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int wc_count = width * channels;
int n,c,h,w; //w->current_col; n->current_batch; c->current_channel;
int temp = index;
h = T;
n = temp / wc_count;
temp = temp % wc_count;
c = temp / width;
temp = temp % width;
w = temp;
//locate the pixel as (n,c,h,w);
//modify logic by xueyan
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w); //w
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
Dtype g_data_1 = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); //g_(t)1
Dtype h_minus1_data_1 = get_data_xy(H,num,channels,height,width,n,c,h+1,w-1); //h_(t+1)1
Dtype h1_minus1 = g_data_1 * h_minus1_data_1; //g_(t)1 * h_(t+1)1
Dtype g_data_2 = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse); //g_(t)2
Dtype h_minus1_data_2 = get_data_xy(H,num,channels,height,width,n,c,h+1,w); //h_(t+1)2
Dtype h2_minus1 = g_data_2 * h_minus1_data_2; //g_(t)2 * h_(t+1)2
Dtype g_data_3 = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); //g_(t)3
Dtype h_minus1_data_3 = get_data_xy(H,num,channels,height,width,n,c,h+1,w+1); //h_(t+1)3
Dtype h3_minus1 = g_data_3 * h_minus1_data_3; //g_(t)3 * h_(t+1)3
Dtype h_hype = (1 - c_data) * (h1_minus1 + h2_minus1 + h3_minus1); //(1-c)*sum(g_(t)i * h_(t+1)i) = (1-c)*(g_(t)1*h_(t+1)1+g_(t)2*h_(t+1)2+g_(t)3*h_(t+1)3)
Dtype x_hype = c_data * x_data; //c * x_(t)i
Dtype h_data = x_hype + h_hype; //(1-sum(g_(t)i)) * x_(t)i + sum(g_(t)i * h_(t+1)i)
set_data_xy(H,num,channels,height,width,n,c,h,w,h_data);
}
}
/*END h(t) = c * x_i(t) + (1-c) * sum(g_i(t) * h_i(t+1))*/
template <typename Dtype>
__global__ void backward_one_col_left_right(const int count, int T, int num, int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, const Dtype* H, Dtype* X_diff, Dtype* G1_diff, Dtype* G2_diff, Dtype* G3_diff, Dtype* C_diff, Dtype* H_diff, bool horizontal, bool reverse){
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int hc_count = height * channels;
int n,c,h,w; //w->current_col; n->current_batch; c->current_channel;
int temp=index;
w = T;
n = temp / hc_count;
temp = temp % hc_count;
c = temp / height;
temp = temp % height;
h = temp;
//locate the pixel as (n,c,h,w);
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w);
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
//h(t)_diff = top(t)_diff
Dtype h_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h,w);
//h(t)_diff += h(t+1)_diff * g(t+1) if t<T
Dtype add1_h3_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h-1,w+1);
Dtype add1_g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse);
Dtype add1_c3_data = get_data_xy(C,num,channels,height,width,n,c,h-1,w+1); //c
Dtype add1_h2_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h,w+1);
Dtype add1_g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse);
Dtype add1_c2_data = get_data_xy(C,num,channels,height,width,n,c,h,w+1);
Dtype add1_h1_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h+1,w+1);
Dtype add1_g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse);
Dtype add1_c1_data = get_data_xy(C,num,channels,height,width,n,c,h+1,w+1);
h_diff = h_diff + add1_h3_diff * add1_g3_data * (1 - add1_c3_data) + add1_h2_diff * add1_g2_data * (1 - add1_c2_data) + add1_h1_diff * add1_g1_data * (1 - add1_c1_data);
//H_diff[n*channels*height*width + c*height*width + h*width + w]=0;
set_data_xy(H_diff,num,channels,height,width,n,c,h,w,h_diff);
//x(t)_diff=(1-sum(g_date))*h(t)_diff
Dtype g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse);
Dtype g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse);
Dtype g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse);
Dtype x_diff = c_data * h_diff;
set_data_xy(X_diff,num,channels,height,width,n,c,h,w,x_diff);
// g_diff = h_diff * (h_data(t-1) - x_data)
Dtype h1_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h-1,w-1);
Dtype g1_diff = h_diff * h1_minus1_data * (1 - c_data);
set_gate_xy(G1_diff,num,channels,height,width,n,c,h,w,h-1,w-1,g1_diff,horizontal,reverse);
Dtype h2_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h,w-1);
Dtype g2_diff = h_diff * h2_minus1_data * (1 - c_data);
set_gate_xy(G2_diff,num,channels,height,width,n,c,h,w,h,w-1,g2_diff,horizontal,reverse);
Dtype h3_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h+1,w-1);
Dtype g3_diff = h_diff * h3_minus1_data * (1 - c_data);
set_gate_xy(G3_diff,num,channels,height,width,n,c,h,w,h+1,w-1,g3_diff,horizontal,reverse);
// c_diff = h_diff * (x - h1_minus1_data - h2_minus1_data - h3_minus1_data)
Dtype c_diff = h_diff * (x_data - h1_minus1_data*g1_data - h2_minus1_data*g2_data - h3_minus1_data*g3_data);
set_data_xy(C_diff,num,channels,height,width,n,c,h,w,c_diff);
}
}
template <typename Dtype>
__global__ void backward_one_col_right_left(const int count, int T, int num,int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, const Dtype* H, Dtype* X_diff, Dtype* G1_diff, Dtype* G2_diff, Dtype* G3_diff, Dtype* C_diff, Dtype* H_diff, bool horizontal, bool reverse){
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int hc_count = height * channels;
int n,c,h,w; //w->current_col; n->current_batch; c->current_channel;
int temp=index;
w = T;
n = temp / hc_count;
temp = temp % hc_count;
c = temp / height;
temp = temp % height;
h = temp;
//locate the pixel as (n,c,h,w);
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w);
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
//h(t)_diff = top(t)_diff
Dtype h_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h,w);
//h(t)_diff += h(t+1)_diff * g(t+1) if t<T
Dtype add1_h3_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h-1,w-1);
Dtype add1_g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse);
Dtype add1_c3_data = get_data_xy(C,num,channels,height,width,n,c,h-1,w-1);
Dtype add1_h2_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h,w-1);
Dtype add1_g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse);
Dtype add1_c2_data = get_data_xy(C,num,channels,height,width,n,c,h,w-1);
Dtype add1_h1_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h+1,w-1);
Dtype add1_g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse);
Dtype add1_c1_data = get_data_xy(C,num,channels,height,width,n,c,h+1,w-1);
h_diff = h_diff + add1_h3_diff * add1_g3_data * (1 - add1_c3_data) + add1_h2_diff * add1_g2_data * (1 - add1_c2_data) + add1_h1_diff * add1_g1_data * (1 - add1_c1_data);
set_data_xy(H_diff,num,channels,height,width,n,c,h,w,h_diff);
Dtype g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse);
Dtype g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse);
Dtype g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse);
Dtype x_diff = c_data * h_diff;
set_data_xy(X_diff,num,channels,height,width,n,c,h,w,x_diff);
// g_diff = h_diff * (h_data(t-1) - x_data)
Dtype h1_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h-1,w+1);
Dtype g1_diff = h_diff * h1_minus1_data * (1 - c_data);
set_gate_xy(G1_diff,num,channels,height,width,n,c,h,w,h-1,w+1,g1_diff,horizontal,reverse);
Dtype h2_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h,w+1);
Dtype g2_diff = h_diff * h2_minus1_data * (1 - c_data);
set_gate_xy(G2_diff,num,channels,height,width,n,c,h,w,h,w+1,g2_diff,horizontal,reverse);
Dtype h3_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h+1,w+1);
Dtype g3_diff = h_diff * h3_minus1_data * (1 - c_data);
set_gate_xy(G3_diff,num,channels,height,width,n,c,h,w,h+1,w+1,g3_diff,horizontal,reverse);
// c_diff = h_diff * (x - h1_minus1_data - h2_minus1_data - h3_minus1_data)
Dtype c_diff = h_diff * (x_data - h1_minus1_data*g1_data - h2_minus1_data*g2_data - h3_minus1_data*g3_data);
set_data_xy(C_diff,num,channels,height,width,n,c,h,w,c_diff);
}
}
template <typename Dtype>
__global__ void backward_one_row_top_bottom(const int count, int T, int num,int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, const Dtype* H, Dtype* X_diff, Dtype* G1_diff, Dtype* G2_diff, Dtype* G3_diff, Dtype* C_diff, Dtype* H_diff, bool horizontal, bool reverse){
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int wc_count = width * channels;
int n,c,h,w; //w->current_col; n->current_batch; c->current_channel;
int temp=index;
h = T;
n = temp / wc_count;
temp = temp % wc_count;
c = temp / width;
temp = temp % width;
w = temp;
//locate the pixel as (n,c,h,w);
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w);
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
Dtype h_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h,w);
//h(t)_diff += h(t+1)_diff * g(t+1) if t<T
Dtype add1_h3_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h+1,w-1);
Dtype add1_g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse);
Dtype add1_c3_data = get_data_xy(C,num,channels,height,width,n,c,h+1,w-1);
Dtype add1_h2_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h+1,w);
Dtype add1_g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse);
Dtype add1_c2_data = get_data_xy(C,num,channels,height,width,n,c,h+1,w);
Dtype add1_h1_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h+1,w+1);
Dtype add1_g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse);
Dtype add1_c1_data = get_data_xy(C,num,channels,height,width,n,c,h+1,w+1);
h_diff = h_diff + add1_h3_diff * add1_g3_data * (1 - add1_c3_data) + add1_h2_diff * add1_g2_data * (1 - add1_c2_data) + add1_h1_diff * add1_g1_data * (1 - add1_c1_data);
set_data_xy(H_diff,num,channels,height,width,n,c,h,w,h_diff);
//x(t)_diff=(1-g(t))*h(t)_diff
Dtype g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse);
Dtype g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse);
Dtype g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse);
Dtype x_diff = c_data * h_diff;
set_data_xy(X_diff,num,channels,height,width,n,c,h,w,x_diff);
// g_diff = h_diff * (h_data(t-1) - x_data)
Dtype h1_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h-1,w-1);
Dtype g1_diff = h_diff * h1_minus1_data * (1 - c_data);
set_gate_xy(G1_diff,num,channels,height,width,n,c,h,w,h-1,w-1,g1_diff,horizontal,reverse);
Dtype h2_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h-1,w);
Dtype g2_diff = h_diff * h2_minus1_data * (1 - c_data);
set_gate_xy(G2_diff,num,channels,height,width,n,c,h,w,h-1,w,g2_diff,horizontal,reverse);
Dtype h3_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h-1,w+1);
Dtype g3_diff = h_diff * h3_minus1_data * (1 - c_data);
set_gate_xy(G3_diff,num,channels,height,width,n,c,h,w,h-1,w+1,g3_diff,horizontal,reverse);
// c_diff = h_diff * (x - h1_minus1_data - h2_minus1_data - h3_minus1_data)
Dtype c_diff = h_diff * (x_data - h1_minus1_data*g1_data - h2_minus1_data*g2_data - h3_minus1_data*g3_data);
set_data_xy(C_diff,num,channels,height,width,n,c,h,w,c_diff);
}
}
template <typename Dtype>
__global__ void backward_one_row_bottom_top(const int count, int T, int num, int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, const Dtype* H, Dtype* X_diff, Dtype* G1_diff, Dtype* G2_diff, Dtype* G3_diff, Dtype* C_diff, Dtype* H_diff, bool horizontal, bool reverse){
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int wc_count = width * channels;
int n,c,h,w;
int temp=index;
h = T;
n = temp / wc_count;
temp = temp % wc_count;
c = temp / width;
temp = temp % width;
w = temp;
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w);
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
//h(t)_diff = top(t)_diff
Dtype h_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h,w);
//h(t)_diff += h(t+1)_diff * g(t+1) if t<T
Dtype add1_h3_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h-1,w-1);
Dtype add1_g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse);
Dtype add1_c3_data = get_data_xy(C,num,channels,height,width,n,c,h-1,w-1);
Dtype add1_h2_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h-1,w);
Dtype add1_g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse);
Dtype add1_c2_data = get_data_xy(C,num,channels,height,width,n,c,h-1,w);
Dtype add1_h1_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h-1,w+1);
Dtype add1_g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse);
Dtype add1_c1_data = get_data_xy(C,num,channels,height,width,n,c,h-1,w+1);
h_diff = h_diff + add1_h3_diff * add1_g3_data * (1 - add1_c3_data) + add1_h2_diff * add1_g2_data * (1 - add1_c2_data) + add1_h1_diff * add1_g1_data * (1 - add1_c1_data);
set_data_xy(H_diff,num,channels,height,width,n,c,h,w,h_diff);
//x(t)_diff=(1-g(t))*h(t)_diff
Dtype g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse);
Dtype g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse);
Dtype g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse);
Dtype x_diff = c_data * h_diff;
set_data_xy(X_diff,num,channels,height,width,n,c,h,w,x_diff);
// g_diff = h_diff * (h_data(t-1) - x_data)
Dtype h1_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h+1,w-1);
Dtype g1_diff = h_diff * h1_minus1_data * (1 - c_data);
set_gate_xy(G1_diff,num,channels,height,width,n,c,h,w,h+1,w-1,g1_diff,horizontal,reverse);
//Dtype g2_diff = h_diff * g2_idx * x_data * -1;
Dtype h2_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h+1,w);
Dtype g2_diff = h_diff * h2_minus1_data * (1 - c_data);
set_gate_xy(G2_diff,num,channels,height,width,n,c,h,w,h+1,w,g2_diff,horizontal,reverse);
//Dtype g3_diff = h_diff * g3_idx * x_data * -1;
Dtype h3_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h+1,w+1);
Dtype g3_diff = h_diff * h3_minus1_data * (1 - c_data);
set_gate_xy(G3_diff,num,channels,height,width,n,c,h,w,h+1,w+1,g3_diff,horizontal,reverse);
// c_diff = h_diff * (x - h1_minus1_data - h2_minus1_data - h3_minus1_data)
Dtype c_diff = h_diff * (x_data - h1_minus1_data*g1_data - h2_minus1_data*g2_data - h3_minus1_data*g3_data);
set_data_xy(C_diff,num,channels,height,width,n,c,h,w,c_diff);
}
}
template<typename Dtype>
inline void SCNForward(const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 4, Dtype> &g1,
const Tensor<gpu, 4, Dtype> &g2,
const Tensor<gpu, 4, Dtype> &g3,
const Tensor<gpu, 4, Dtype> &c,
const Tensor<gpu, 4, Dtype> &out,
const bool horizontal_,
const bool reverse_){
/*get pointer*/
const Dtype *X = data.dptr_;
const Dtype *G1 = g1.dptr_;
const Dtype *G2 = g2.dptr_;
const Dtype *G3 = g3.dptr_;
const Dtype *C = c.dptr_;
Dtype *H = out.dptr_;
/*END get pointer*/
/*get dimension*/
//data, g1, g2, g3, out, share the same dimension
//n_X represent number of X
const int n_batch = data.size(0);
const int n_channel = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
/*END get dimension*/
/*set cuda system param*/
const int NUM_THREADS_BLOCK = 512; //CUDA: use 512 threads per block
const int NUM_BLOCKS_GRID = kMaxGridDim; //CUDA: use largest blocks num per grid
/*END set cuda system param*/
/*allocate kernel*/
if(horizontal_ && !reverse_){ // left to right
/*logic within this block:
*1. calculate total number of execution units that run in parallel
*2. calculate block and grid dimension
*3. check block/grid dimension, get stream
*4. call cuda kernal function*/
const int n_operation_parallel = height * n_channel * n_batch;
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_col = 0; current_col < width; current_col++){ //iterate through the column
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN left->right forward"); //check whether dimGrid or dimBlock is out of range
hipStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
hipLaunchKernelGGL(( forward_one_col_left_right<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream, n_operation_parallel, current_col, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, horizontal_, reverse_);
}
}else if(horizontal_ && reverse_){ // right to left
/*logic same as previous*/
const int n_operation_parallel = height * n_channel * n_batch; //total number of execution units that run in parallel
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_col = width - 1; current_col >= 0; current_col--){
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN right->left forward"); //check whether dimGrid or dimBlock is out of range
hipStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
hipLaunchKernelGGL(( forward_one_col_right_left<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream, n_operation_parallel, current_col, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, horizontal_, reverse_);
}
}else if(!horizontal_ && !reverse_){ // top to bottom
/*logic same as previous*/
const int n_operation_parallel = width * n_channel * n_batch; //total number of execution units that run in parallel
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_row = 0; current_row < height; current_row++){
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN top->bottom forward"); //check whether dimGrid or dimBlock is out of range
hipStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
hipLaunchKernelGGL(( forward_one_row_top_bottom<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream, n_operation_parallel, current_row, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, horizontal_, reverse_);
}
}else{ //bottom to top
/*logic same as previous*/
const int n_operation_parallel = width * n_channel * n_batch; //total number of execution units that run in parallel
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_row = height - 1; current_row >= 0; current_row--){
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN bottom->top forward"); //check whether dimGrid or dimBlock is out of range
hipStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
hipLaunchKernelGGL(( forward_one_row_bottom_top<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream, n_operation_parallel, current_row, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, horizontal_, reverse_);
}
}
/*END allocate kernel*/
}//end SCNForward
template<typename Dtype>
inline void SCNBackward(const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 4, Dtype> &g1,
const Tensor<gpu, 4, Dtype> &g2,
const Tensor<gpu, 4, Dtype> &g3,
const Tensor<gpu, 4, Dtype> &c,
const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data_diff,
const Tensor<gpu, 4, Dtype> &g1_diff,
const Tensor<gpu, 4, Dtype> &g2_diff,
const Tensor<gpu, 4, Dtype> &g3_diff,
const Tensor<gpu, 4, Dtype> &c_diff,
const Tensor<gpu, 4, Dtype> &out_diff,
const bool horizontal_,
const bool reverse_){
/*get pointer*/
const Dtype *X = data.dptr_;
const Dtype *G1 = g1.dptr_;
const Dtype *G2 = g2.dptr_;
const Dtype *G3 = g3.dptr_;
const Dtype *C = c.dptr_;
const Dtype *H = out.dptr_;
Dtype *X_diff = data_diff.dptr_;
Dtype *G1_diff = g1_diff.dptr_;
Dtype *G2_diff = g2_diff.dptr_;
Dtype *G3_diff = g3_diff.dptr_;
Dtype *C_diff = c_diff.dptr_;
Dtype *H_diff = out_diff.dptr_;
/*END get pointer*/
/*get dimension*/
//data, g1, g2, g3, out, share the same dimension
//n_X represent number of X
const int n_batch = data.size(0);
const int n_channel = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
/*END get dimension*/
/*set cuda system param*/
const int NUM_THREADS_BLOCK = 512; //CUDA: use 512 threads per block
const int NUM_BLOCKS_GRID = kMaxGridDim; //CUDA: use largest blocks num per grid
/*END set cuda system param*/
/*allocate kernel*/
if(horizontal_ && !reverse_){ // left to right
/*logic within this block:
*1. calculate total number of execution units that run in parallel
*2. calculate block and grid dimension
*3. check block/grid dimension, get stream
*4. call cuda kernal function*/
const int n_operation_parallel = height * n_channel * n_batch;
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_col = width - 1; current_col >= 0; current_col--){ //iterate through the column
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN left->right backward"); //check whether dimGrid or dimBlock is out of range
hipStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
hipLaunchKernelGGL(( backward_one_col_left_right<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream, n_operation_parallel, current_col, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, X_diff, G1_diff, G2_diff, G3_diff, C_diff, H_diff, horizontal_, reverse_);
}
}else if(horizontal_ && reverse_){ // right to left
/*logic same as previous*/
const int n_operation_parallel = height * n_channel * n_batch; //total number of execution units that run in parallel
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_col = 0; current_col < width; current_col++){
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN right->left backward"); //check whether dimGrid or dimBlock is out of range
hipStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
hipLaunchKernelGGL(( backward_one_col_right_left<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream, n_operation_parallel, current_col, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, X_diff, G1_diff, G2_diff, G3_diff, C_diff, H_diff, horizontal_, reverse_);
}
}else if(!horizontal_ && !reverse_){ // top to bottom
/*logic same as previous*/
const int n_operation_parallel = width * n_channel * n_batch; //total number of execution units that run in parallel
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_row = height - 1; current_row >= 0; current_row--){
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN top->bottom backward"); //check whether dimGrid or dimBlock is out of range
hipStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
hipLaunchKernelGGL(( backward_one_row_top_bottom<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream, n_operation_parallel, current_row, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, X_diff, G1_diff, G2_diff, G3_diff, C_diff, H_diff, horizontal_, reverse_);
}
}else{ //bottom to top
/*logic same as previous*/
const int n_operation_parallel = width * n_channel * n_batch; //total number of execution units that run in parallel
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_row = 0; current_row < width; current_row++){
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN bottom->top backward"); //check whether dimGrid or dimBlock is out of range
hipStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
hipLaunchKernelGGL(( backward_one_row_bottom_top<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream, n_operation_parallel, current_row, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, X_diff, G1_diff, G2_diff, G3_diff, C_diff, H_diff, horizontal_, reverse_);
}
}
/*END allocate kernel*/
}//end SCNBackward
} //namespace cuda
template<typename Dtype>
inline void SCNForward(const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 4, Dtype> &g1,
const Tensor<gpu, 4, Dtype> &g2,
const Tensor<gpu, 4, Dtype> &g3,
const Tensor<gpu, 4, Dtype> &c,
const Tensor<gpu, 4, Dtype> &out,
const bool horizontal,
const bool reverse){
cuda::SCNForward(data, g1, g2, g3, c, out, horizontal, reverse);
}
template<typename Dtype>
inline void SCNBackward(const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 4, Dtype> &g1,
const Tensor<gpu, 4, Dtype> &g2,
const Tensor<gpu, 4, Dtype> &g3,
const Tensor<gpu, 4, Dtype> &c,
const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data_diff,
const Tensor<gpu, 4, Dtype> &g1_diff,
const Tensor<gpu, 4, Dtype> &g2_diff,
const Tensor<gpu, 4, Dtype> &g3_diff,
const Tensor<gpu, 4, Dtype> &c_diff,
const Tensor<gpu, 4, Dtype> &out_diff,
const bool horizontal,
const bool reverse){
cuda::SCNBackward(data, g1, g2, g3, c, out, data_diff, g1_diff, g2_diff, g3_diff, c_diff, out_diff, horizontal, reverse);
}
} //namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ScnParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ScnOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
| 4c3ac69c9c45250413cb3fd1ee4867a519d38776.cu | /*
* Written by Xueyan Zou @TuSimple Algorithm Intern
*/
#include "./spatial-completion-inl.h"
#include <assert.h>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <algorithm>
#include <vector>
namespace mshadow {
namespace cuda {
__device__ void get_gate_idx_xy(int h1, int w1, int h2, int w2, int* out, bool horizontal, bool reverse)
{
if(horizontal && ! reverse) // left -> right
{
if(w1>w2)
{
out[0]=h1;
out[1]=w1;
}
else
{
out[0]=h2;
out[1]=w2;
}
}
if(horizontal && reverse) // right -> left
{
if(w1<w2)
{
out[0]=h1;
out[1]=w1;
}
else
{
out[0]=h2;
out[1]=w2;
}
}
if(!horizontal && !reverse) // top -> bottom
{
if(h1>h2)
{
out[0]=h1;
out[1]=w1;
}
else
{
out[0]=h2;
out[1]=w2;
}
}
if(!horizontal && reverse) // bottom -> top
{
if(h1<h2)
{
out[0]=h1;
out[1]=w1;
}
else
{
out[0]=h2;
out[1]=w2;
}
}
}
template <typename Dtype>
__device__ void set_gate_xy(Dtype* data, int num, int channels, int height, int width, int n, int c, int h1, int w1, int h2, int w2, Dtype v, bool horizontal, bool reverse)
{
if(h1<0 || h1 >=height) //redundant
return ; //redundant
if(w1<0 || w1 >= width) //redundant
return ; //redundant
if(h2<0 || h2 >=height) //redundant
return ; //redundant
if(w2<0 || w2 >= width) //redundant
return ; //redundant
int idx[2];
get_gate_idx_xy(h1, w1, h2, w2, idx, horizontal, reverse);
int h = idx[0];
int w = idx[1];
data[n*channels*height*width + c*height*width + h*width + w] = v;
}
template <typename Dtype> //this function is modified by xueyan
__device__ Dtype get_gate_xy(Dtype * data, int num, int channels, int height, int width, int n, int c, int h1, int w1, int h2, int w2, bool horizontal, bool reverse){
//handle index out of range
if(h1<0 || h1 >=height) //redundant
return 0; //redundant
if(w1<0 || w1 >= width) //redundant
return 0; //redundant
if(h2<0 || h2 >=height)
return 0;
if(w2<0 || w2 >= width)
return 0;
int idx[2];
get_gate_idx_xy(h1, w1, h2, w2, idx, horizontal, reverse);
int h = idx[0];
int w = idx[1];
return data[n*channels*height*width + c*height*width + h*width + w];
}
template <typename Dtype>
__device__ void set_data_xy(Dtype * data, int num, int channels,int height, int width,int n,int c,int h,int w, Dtype v)
{
//modify by xueyan, assert error.
if(h<0 || h >=height)
assert(0);
if(w<0 || w >= width)
assert(0);
data[n*channels*height*width + c*height*width + h*width + w] = v;
}
template <typename Dtype>
__device__ Dtype get_data_xy(Dtype *data, int num, int channels, int height, int width, int n, int c, int h, int w){
//handle index out of range
if(h<0 || h >=height)
return 0;
if(w<0 || w >= width)
return 0;
//spatial-propagation-inl.h:82 -> default configuration of dim is (batch, channel, height, width)
return data[n*channels*height*width + c*height*width + h*width + w];
}
/*LEFT->RIGHT*/
/*h(t) = (1-sum(g_i(t))) * x_i(t) + sum(g_i(t) * h_i(t-1))*/
template <typename Dtype>
__global__ void forward_one_col_left_right(const int count, int T, int num, int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, Dtype* H, bool horizontal, bool reverse){
//count -> total number of threads; T -> current_row/current_column; num -> total num batch
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int hc_count = height * channels; //in order to calculate batch size
int n,c,h,w; //w->current_col; n->current_batch; c->current_channel;
int temp = index;
w = T;
n = temp / hc_count;
temp = temp % hc_count;
c = temp / height;
temp = temp % height;
h = temp;
//locate the pixel as (n,c,h,w);
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w); //x
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
//modify logic by xueyan
Dtype g_data_1 = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); //g_1(t)
Dtype h_minus1_data_1 = get_data_xy(H,num,channels,height,width,n,c,h-1,w-1); //h_1(t-1)
Dtype h1_minus1 = g_data_1 * h_minus1_data_1; //g_1(t)*h_1(t-1)
Dtype g_data_2 = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse); //g_2(t)
Dtype h_minus1_data_2 = get_data_xy(H,num,channels,height,width,n,c,h,w-1); //h_2(t-1)
Dtype h2_minus1 = g_data_2 * h_minus1_data_2; //g_2(t)*h_2(t-1)
Dtype g_data_3 = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); //g_3(t)
Dtype h_minus1_data_3 = get_data_xy(H,num,channels,height,width,n,c,h+1,w-1); //h_3(t-1)
Dtype h3_minus1 = g_data_3 * h_minus1_data_3; //g_3(t)*h_3(t-1)
Dtype h_hype = (h1_minus1 + h2_minus1 + h3_minus1) * (1 - c_data); //sum(g_i(t)*h_i(t-1)) * (1-c) = (g_1(t)*h_1(t-1)+g_2(t)*h_2(t-1)+g_3(t)*h_3(t-1))*(1-c)
Dtype x_hype = c_data * x_data; //c * x
Dtype h_data = x_hype + h_hype; //c * x_i(t) + sum(g_i(t) * h_i(t-1))*(1-c)
set_data_xy(H,num,channels,height,width,n,c,h,w,h_data); //set H data at point x
}
}
/*END h(t) = (1-sum(g_i(t))) * x_i(t) + sum(g_i(t) * h_i(t+1))*/
/*RIGHT->LEFT*/
/*h(t) = c * x_i(t) + (1-c) * sum(g_i(t) * h_i(t+1))*/
template <typename Dtype>
__global__ void forward_one_col_right_left(const int count, int T, int num, int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, Dtype* H, bool horizontal, bool reverse){
//count -> total number of threads; T -> current_row/current_column; num -> total num batch
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int hc_count = height * channels;
int n,c,h,w; //w->current_col; n->current_batch; c->current_channel;
int temp = index;
w = T;
n = temp / hc_count;
temp = temp % hc_count;
c = temp / height;
temp = temp % height;
h = temp;
//locate the pixel as (n,c,h,w);
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w); //x
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
//modify logic by xueyan
Dtype g_data_1 = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); //g_1(t)
Dtype h_minus1_data_1 = get_data_xy(H,num,channels,height,width,n,c,h-1,w+1); //h_1(t+1)
Dtype h1_minus1 = g_data_1 * h_minus1_data_1; //g_1(t)*h_1(t+1)
Dtype g_data_2 = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse); //g_2(t)
Dtype h_minus1_data_2 = get_data_xy(H,num,channels,height,width,n,c,h,w+1); //h_2(t+1)
Dtype h2_minus1 = g_data_2 * h_minus1_data_2; //g_2(t)*h_2(t+1)
Dtype g_data_3 = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); //g_3(t)
Dtype h_minus1_data_3 = get_data_xy(H,num,channels,height,width,n,c,h+1,w+1); //h_3(t+1)
Dtype h3_minus1 = g_data_3 * h_minus1_data_3; //g_3(t)*h_3(t+1)
Dtype h_hype = (1 - c_data) * (h1_minus1 + h2_minus1 + h3_minus1); //sum(g_i(t)*h_i(t+1)) * (1-c) = (1-c) * (g_1(t)*h_1(t+1)+g_2(t)*h_2(t+1)+g_3(t)*h_3(t+1))
Dtype x_hype = c_data * x_data; //c * x
Dtype h_data = x_hype + h_hype; //(1-sum(g_i(t))) * x_i(t) + sum(g_i(t) * h_i(t+1))
set_data_xy(H,num,channels,height,width,n,c,h,w,h_data); //set H data at point x
}
}
/*END h(t) = c * x_i(t) + (1-c) * sum(g_i(t) * h_i(t+1))*/
/*TOP->BOTTOM*/
/*h(t) = c * x_i(t) + (1-c) * sum(g_i(t) * h_i(t+1))*/
template <typename Dtype>
__global__ void forward_one_row_top_bottom(const int count, int T, int num, int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, Dtype* H, bool horizontal, bool reverse){
//count -> total number of threads; T -> current_row/current_column; num -> total num batch
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int wc_count = width * channels;
int n,c,h,w; //w->current_col; n->current_batch; c->current_channel;
int temp = index;
h = T;
n = temp / wc_count;
temp = temp % wc_count;
c = temp / width;
temp = temp % width;
w = temp;
//locate the pixel as (n,c,h,w);
//modify logic by xueyan
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w); //x
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
Dtype g_data_1 = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); //g_(t)1
Dtype h_minus1_data_1 = get_data_xy(H,num,channels,height,width,n,c,h-1,w-1); //h_(t-1)1
Dtype h1_minus1 = g_data_1 * h_minus1_data_1; //g_(t)1 * h_(t-1)1
Dtype g_data_2 = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse); //g_(t)2
Dtype h_minus1_data_2 = get_data_xy(H,num,channels,height,width,n,c,h-1,w); //h_(t-1)2
Dtype h2_minus1 = g_data_2 * h_minus1_data_2; //g_(t)2 * h_(t-1)2
Dtype g_data_3 = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); //g_(t)3
Dtype h_minus1_data_3 = get_data_xy(H,num,channels,height,width,n,c,h-1,w+1); //h_(t-1)3
Dtype h3_minus1 = g_data_3 * h_minus1_data_3; //g_(t)3 * h_(t-1)3
Dtype h_hype = (h1_minus1 + h2_minus1 + h3_minus1) * (1 - c_data); //(1-c)*(sum(g_(t)i * h_(t-1)i)) = (1-c) * (g_(t)1*h_(t-1)1+g_(t)2*h_(t-1)2+g_(t)3*h_(t-1)3)
Dtype x_hype = c_data * x_data; //c * x_(t)i
Dtype h_data = x_hype + h_hype; //(1-sum(g_(t)i)) * x_(t)i + sum(g_(t)i * h_(t-1)i)
set_data_xy(H,num,channels,height,width,n,c,h,w,h_data);
}
}
/*END h(t) = c * x_i(t) + (1-c) * sum(g_i(t) * h_i(t+1))*/
/*BOTTOM->TOP*/
/*h(t) = c * x_i(t) + (1-c) * sum(g_i(t) * h_i(t+1))*/
template <typename Dtype>
__global__ void forward_one_row_bottom_top(const int count, int T, int num, int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, Dtype* H, bool horizontal, bool reverse){
//count -> total number of threads; T -> current_row/current_column; num -> total num batch
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int wc_count = width * channels;
int n,c,h,w; //w->current_col; n->current_batch; c->current_channel;
int temp = index;
h = T;
n = temp / wc_count;
temp = temp % wc_count;
c = temp / width;
temp = temp % width;
w = temp;
//locate the pixel as (n,c,h,w);
//modify logic by xueyan
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w); //w
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
Dtype g_data_1 = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); //g_(t)1
Dtype h_minus1_data_1 = get_data_xy(H,num,channels,height,width,n,c,h+1,w-1); //h_(t+1)1
Dtype h1_minus1 = g_data_1 * h_minus1_data_1; //g_(t)1 * h_(t+1)1
Dtype g_data_2 = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse); //g_(t)2
Dtype h_minus1_data_2 = get_data_xy(H,num,channels,height,width,n,c,h+1,w); //h_(t+1)2
Dtype h2_minus1 = g_data_2 * h_minus1_data_2; //g_(t)2 * h_(t+1)2
Dtype g_data_3 = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); //g_(t)3
Dtype h_minus1_data_3 = get_data_xy(H,num,channels,height,width,n,c,h+1,w+1); //h_(t+1)3
Dtype h3_minus1 = g_data_3 * h_minus1_data_3; //g_(t)3 * h_(t+1)3
Dtype h_hype = (1 - c_data) * (h1_minus1 + h2_minus1 + h3_minus1); //(1-c)*sum(g_(t)i * h_(t+1)i) = (1-c)*(g_(t)1*h_(t+1)1+g_(t)2*h_(t+1)2+g_(t)3*h_(t+1)3)
Dtype x_hype = c_data * x_data; //c * x_(t)i
Dtype h_data = x_hype + h_hype; //(1-sum(g_(t)i)) * x_(t)i + sum(g_(t)i * h_(t+1)i)
set_data_xy(H,num,channels,height,width,n,c,h,w,h_data);
}
}
/*END h(t) = c * x_i(t) + (1-c) * sum(g_i(t) * h_i(t+1))*/
template <typename Dtype>
__global__ void backward_one_col_left_right(const int count, int T, int num, int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, const Dtype* H, Dtype* X_diff, Dtype* G1_diff, Dtype* G2_diff, Dtype* G3_diff, Dtype* C_diff, Dtype* H_diff, bool horizontal, bool reverse){
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int hc_count = height * channels;
int n,c,h,w; //w->current_col; n->current_batch; c->current_channel;
int temp=index;
w = T;
n = temp / hc_count;
temp = temp % hc_count;
c = temp / height;
temp = temp % height;
h = temp;
//locate the pixel as (n,c,h,w);
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w);
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
//h(t)_diff = top(t)_diff
Dtype h_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h,w);
//h(t)_diff += h(t+1)_diff * g(t+1) if t<T
Dtype add1_h3_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h-1,w+1);
Dtype add1_g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse);
Dtype add1_c3_data = get_data_xy(C,num,channels,height,width,n,c,h-1,w+1); //c
Dtype add1_h2_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h,w+1);
Dtype add1_g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse);
Dtype add1_c2_data = get_data_xy(C,num,channels,height,width,n,c,h,w+1);
Dtype add1_h1_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h+1,w+1);
Dtype add1_g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse);
Dtype add1_c1_data = get_data_xy(C,num,channels,height,width,n,c,h+1,w+1);
h_diff = h_diff + add1_h3_diff * add1_g3_data * (1 - add1_c3_data) + add1_h2_diff * add1_g2_data * (1 - add1_c2_data) + add1_h1_diff * add1_g1_data * (1 - add1_c1_data);
//H_diff[n*channels*height*width + c*height*width + h*width + w]=0;
set_data_xy(H_diff,num,channels,height,width,n,c,h,w,h_diff);
//x(t)_diff=(1-sum(g_date))*h(t)_diff
Dtype g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse);
Dtype g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse);
Dtype g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse);
Dtype x_diff = c_data * h_diff;
set_data_xy(X_diff,num,channels,height,width,n,c,h,w,x_diff);
// g_diff = h_diff * (h_data(t-1) - x_data)
Dtype h1_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h-1,w-1);
Dtype g1_diff = h_diff * h1_minus1_data * (1 - c_data);
set_gate_xy(G1_diff,num,channels,height,width,n,c,h,w,h-1,w-1,g1_diff,horizontal,reverse);
Dtype h2_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h,w-1);
Dtype g2_diff = h_diff * h2_minus1_data * (1 - c_data);
set_gate_xy(G2_diff,num,channels,height,width,n,c,h,w,h,w-1,g2_diff,horizontal,reverse);
Dtype h3_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h+1,w-1);
Dtype g3_diff = h_diff * h3_minus1_data * (1 - c_data);
set_gate_xy(G3_diff,num,channels,height,width,n,c,h,w,h+1,w-1,g3_diff,horizontal,reverse);
// c_diff = h_diff * (x - h1_minus1_data - h2_minus1_data - h3_minus1_data)
Dtype c_diff = h_diff * (x_data - h1_minus1_data*g1_data - h2_minus1_data*g2_data - h3_minus1_data*g3_data);
set_data_xy(C_diff,num,channels,height,width,n,c,h,w,c_diff);
}
}
template <typename Dtype>
__global__ void backward_one_col_right_left(const int count, int T, int num,int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, const Dtype* H, Dtype* X_diff, Dtype* G1_diff, Dtype* G2_diff, Dtype* G3_diff, Dtype* C_diff, Dtype* H_diff, bool horizontal, bool reverse){
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int hc_count = height * channels;
int n,c,h,w; //w->current_col; n->current_batch; c->current_channel;
int temp=index;
w = T;
n = temp / hc_count;
temp = temp % hc_count;
c = temp / height;
temp = temp % height;
h = temp;
//locate the pixel as (n,c,h,w);
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w);
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
//h(t)_diff = top(t)_diff
Dtype h_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h,w);
//h(t)_diff += h(t+1)_diff * g(t+1) if t<T
Dtype add1_h3_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h-1,w-1);
Dtype add1_g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse);
Dtype add1_c3_data = get_data_xy(C,num,channels,height,width,n,c,h-1,w-1);
Dtype add1_h2_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h,w-1);
Dtype add1_g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse);
Dtype add1_c2_data = get_data_xy(C,num,channels,height,width,n,c,h,w-1);
Dtype add1_h1_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h+1,w-1);
Dtype add1_g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse);
Dtype add1_c1_data = get_data_xy(C,num,channels,height,width,n,c,h+1,w-1);
h_diff = h_diff + add1_h3_diff * add1_g3_data * (1 - add1_c3_data) + add1_h2_diff * add1_g2_data * (1 - add1_c2_data) + add1_h1_diff * add1_g1_data * (1 - add1_c1_data);
set_data_xy(H_diff,num,channels,height,width,n,c,h,w,h_diff);
Dtype g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse);
Dtype g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse);
Dtype g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse);
Dtype x_diff = c_data * h_diff;
set_data_xy(X_diff,num,channels,height,width,n,c,h,w,x_diff);
// g_diff = h_diff * (h_data(t-1) - x_data)
Dtype h1_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h-1,w+1);
Dtype g1_diff = h_diff * h1_minus1_data * (1 - c_data);
set_gate_xy(G1_diff,num,channels,height,width,n,c,h,w,h-1,w+1,g1_diff,horizontal,reverse);
Dtype h2_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h,w+1);
Dtype g2_diff = h_diff * h2_minus1_data * (1 - c_data);
set_gate_xy(G2_diff,num,channels,height,width,n,c,h,w,h,w+1,g2_diff,horizontal,reverse);
Dtype h3_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h+1,w+1);
Dtype g3_diff = h_diff * h3_minus1_data * (1 - c_data);
set_gate_xy(G3_diff,num,channels,height,width,n,c,h,w,h+1,w+1,g3_diff,horizontal,reverse);
// c_diff = h_diff * (x - h1_minus1_data - h2_minus1_data - h3_minus1_data)
Dtype c_diff = h_diff * (x_data - h1_minus1_data*g1_data - h2_minus1_data*g2_data - h3_minus1_data*g3_data);
set_data_xy(C_diff,num,channels,height,width,n,c,h,w,c_diff);
}
}
template <typename Dtype>
__global__ void backward_one_row_top_bottom(const int count, int T, int num,int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, const Dtype* H, Dtype* X_diff, Dtype* G1_diff, Dtype* G2_diff, Dtype* G3_diff, Dtype* C_diff, Dtype* H_diff, bool horizontal, bool reverse){
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int wc_count = width * channels;
int n,c,h,w; //w->current_col; n->current_batch; c->current_channel;
int temp=index;
h = T;
n = temp / wc_count;
temp = temp % wc_count;
c = temp / width;
temp = temp % width;
w = temp;
//locate the pixel as (n,c,h,w);
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w);
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
Dtype h_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h,w);
//h(t)_diff += h(t+1)_diff * g(t+1) if t<T
Dtype add1_h3_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h+1,w-1);
Dtype add1_g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse);
Dtype add1_c3_data = get_data_xy(C,num,channels,height,width,n,c,h+1,w-1);
Dtype add1_h2_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h+1,w);
Dtype add1_g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse);
Dtype add1_c2_data = get_data_xy(C,num,channels,height,width,n,c,h+1,w);
Dtype add1_h1_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h+1,w+1);
Dtype add1_g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse);
Dtype add1_c1_data = get_data_xy(C,num,channels,height,width,n,c,h+1,w+1);
h_diff = h_diff + add1_h3_diff * add1_g3_data * (1 - add1_c3_data) + add1_h2_diff * add1_g2_data * (1 - add1_c2_data) + add1_h1_diff * add1_g1_data * (1 - add1_c1_data);
set_data_xy(H_diff,num,channels,height,width,n,c,h,w,h_diff);
//x(t)_diff=(1-g(t))*h(t)_diff
Dtype g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse);
Dtype g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse);
Dtype g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse);
Dtype x_diff = c_data * h_diff;
set_data_xy(X_diff,num,channels,height,width,n,c,h,w,x_diff);
// g_diff = h_diff * (h_data(t-1) - x_data)
Dtype h1_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h-1,w-1);
Dtype g1_diff = h_diff * h1_minus1_data * (1 - c_data);
set_gate_xy(G1_diff,num,channels,height,width,n,c,h,w,h-1,w-1,g1_diff,horizontal,reverse);
Dtype h2_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h-1,w);
Dtype g2_diff = h_diff * h2_minus1_data * (1 - c_data);
set_gate_xy(G2_diff,num,channels,height,width,n,c,h,w,h-1,w,g2_diff,horizontal,reverse);
Dtype h3_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h-1,w+1);
Dtype g3_diff = h_diff * h3_minus1_data * (1 - c_data);
set_gate_xy(G3_diff,num,channels,height,width,n,c,h,w,h-1,w+1,g3_diff,horizontal,reverse);
// c_diff = h_diff * (x - h1_minus1_data - h2_minus1_data - h3_minus1_data)
Dtype c_diff = h_diff * (x_data - h1_minus1_data*g1_data - h2_minus1_data*g2_data - h3_minus1_data*g3_data);
set_data_xy(C_diff,num,channels,height,width,n,c,h,w,c_diff);
}
}
template <typename Dtype>
__global__ void backward_one_row_bottom_top(const int count, int T, int num, int channels, int height, int width, const Dtype* X, const Dtype* G1, const Dtype* G2, const Dtype* G3, const Dtype* C, const Dtype* H, Dtype* X_diff, Dtype* G1_diff, Dtype* G2_diff, Dtype* G3_diff, Dtype* C_diff, Dtype* H_diff, bool horizontal, bool reverse){
for(int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y){
//CUDA kernel loop, index trace the current thread
int wc_count = width * channels;
int n,c,h,w;
int temp=index;
h = T;
n = temp / wc_count;
temp = temp % wc_count;
c = temp / width;
temp = temp % width;
w = temp;
Dtype x_data = get_data_xy(X,num,channels,height,width,n,c,h,w);
Dtype c_data = get_data_xy(C,num,channels,height,width,n,c,h,w); //c
//h(t)_diff = top(t)_diff
Dtype h_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h,w);
//h(t)_diff += h(t+1)_diff * g(t+1) if t<T
Dtype add1_h3_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h-1,w-1);
Dtype add1_g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse);
Dtype add1_c3_data = get_data_xy(C,num,channels,height,width,n,c,h-1,w-1);
Dtype add1_h2_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h-1,w);
Dtype add1_g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse);
Dtype add1_c2_data = get_data_xy(C,num,channels,height,width,n,c,h-1,w);
Dtype add1_h1_diff = get_data_xy(H_diff,num,channels,height,width,n,c,h-1,w+1);
Dtype add1_g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse);
Dtype add1_c1_data = get_data_xy(C,num,channels,height,width,n,c,h-1,w+1);
h_diff = h_diff + add1_h3_diff * add1_g3_data * (1 - add1_c3_data) + add1_h2_diff * add1_g2_data * (1 - add1_c2_data) + add1_h1_diff * add1_g1_data * (1 - add1_c1_data);
set_data_xy(H_diff,num,channels,height,width,n,c,h,w,h_diff);
//x(t)_diff=(1-g(t))*h(t)_diff
Dtype g1_data = get_gate_xy(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse);
Dtype g2_data = get_gate_xy(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse);
Dtype g3_data = get_gate_xy(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse);
Dtype x_diff = c_data * h_diff;
set_data_xy(X_diff,num,channels,height,width,n,c,h,w,x_diff);
// g_diff = h_diff * (h_data(t-1) - x_data)
Dtype h1_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h+1,w-1);
Dtype g1_diff = h_diff * h1_minus1_data * (1 - c_data);
set_gate_xy(G1_diff,num,channels,height,width,n,c,h,w,h+1,w-1,g1_diff,horizontal,reverse);
//Dtype g2_diff = h_diff * g2_idx * x_data * -1;
Dtype h2_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h+1,w);
Dtype g2_diff = h_diff * h2_minus1_data * (1 - c_data);
set_gate_xy(G2_diff,num,channels,height,width,n,c,h,w,h+1,w,g2_diff,horizontal,reverse);
//Dtype g3_diff = h_diff * g3_idx * x_data * -1;
Dtype h3_minus1_data = get_data_xy(H,num,channels,height,width,n,c,h+1,w+1);
Dtype g3_diff = h_diff * h3_minus1_data * (1 - c_data);
set_gate_xy(G3_diff,num,channels,height,width,n,c,h,w,h+1,w+1,g3_diff,horizontal,reverse);
// c_diff = h_diff * (x - h1_minus1_data - h2_minus1_data - h3_minus1_data)
Dtype c_diff = h_diff * (x_data - h1_minus1_data*g1_data - h2_minus1_data*g2_data - h3_minus1_data*g3_data);
set_data_xy(C_diff,num,channels,height,width,n,c,h,w,c_diff);
}
}
template<typename Dtype>
inline void SCNForward(const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 4, Dtype> &g1,
const Tensor<gpu, 4, Dtype> &g2,
const Tensor<gpu, 4, Dtype> &g3,
const Tensor<gpu, 4, Dtype> &c,
const Tensor<gpu, 4, Dtype> &out,
const bool horizontal_,
const bool reverse_){
/*get pointer*/
const Dtype *X = data.dptr_;
const Dtype *G1 = g1.dptr_;
const Dtype *G2 = g2.dptr_;
const Dtype *G3 = g3.dptr_;
const Dtype *C = c.dptr_;
Dtype *H = out.dptr_;
/*END get pointer*/
/*get dimension*/
//data, g1, g2, g3, out, share the same dimension
//n_X represent number of X
const int n_batch = data.size(0);
const int n_channel = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
/*END get dimension*/
/*set cuda system param*/
const int NUM_THREADS_BLOCK = 512; //CUDA: use 512 threads per block
const int NUM_BLOCKS_GRID = kMaxGridDim; //CUDA: use largest blocks num per grid
/*END set cuda system param*/
/*allocate kernel*/
if(horizontal_ && !reverse_){ // left to right
/*logic within this block:
*1. calculate total number of execution units that run in parallel
*2. calculate block and grid dimension
*3. check block/grid dimension, get stream
*4. call cuda kernal function*/
const int n_operation_parallel = height * n_channel * n_batch;
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_col = 0; current_col < width; current_col++){ //iterate through the column
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN left->right forward"); //check whether dimGrid or dimBlock is out of range
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
forward_one_col_left_right<Dtype><<<dimGrid, dimBlock, 0, stream>>>(n_operation_parallel, current_col, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, horizontal_, reverse_);
}
}else if(horizontal_ && reverse_){ // right to left
/*logic same as previous*/
const int n_operation_parallel = height * n_channel * n_batch; //total number of execution units that run in parallel
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_col = width - 1; current_col >= 0; current_col--){
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN right->left forward"); //check whether dimGrid or dimBlock is out of range
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
forward_one_col_right_left<Dtype><<<dimGrid, dimBlock, 0, stream>>>(n_operation_parallel, current_col, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, horizontal_, reverse_);
}
}else if(!horizontal_ && !reverse_){ // top to bottom
/*logic same as previous*/
const int n_operation_parallel = width * n_channel * n_batch; //total number of execution units that run in parallel
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_row = 0; current_row < height; current_row++){
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN top->bottom forward"); //check whether dimGrid or dimBlock is out of range
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
forward_one_row_top_bottom<Dtype><<<dimGrid, dimBlock, 0, stream>>>(n_operation_parallel, current_row, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, horizontal_, reverse_);
}
}else{ //bottom to top
/*logic same as previous*/
const int n_operation_parallel = width * n_channel * n_batch; //total number of execution units that run in parallel
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_row = height - 1; current_row >= 0; current_row--){
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN bottom->top forward"); //check whether dimGrid or dimBlock is out of range
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
forward_one_row_bottom_top<Dtype><<<dimGrid, dimBlock, 0, stream>>>(n_operation_parallel, current_row, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, horizontal_, reverse_);
}
}
/*END allocate kernel*/
}//end SCNForward
template<typename Dtype>
inline void SCNBackward(const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 4, Dtype> &g1,
const Tensor<gpu, 4, Dtype> &g2,
const Tensor<gpu, 4, Dtype> &g3,
const Tensor<gpu, 4, Dtype> &c,
const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data_diff,
const Tensor<gpu, 4, Dtype> &g1_diff,
const Tensor<gpu, 4, Dtype> &g2_diff,
const Tensor<gpu, 4, Dtype> &g3_diff,
const Tensor<gpu, 4, Dtype> &c_diff,
const Tensor<gpu, 4, Dtype> &out_diff,
const bool horizontal_,
const bool reverse_){
/*get pointer*/
const Dtype *X = data.dptr_;
const Dtype *G1 = g1.dptr_;
const Dtype *G2 = g2.dptr_;
const Dtype *G3 = g3.dptr_;
const Dtype *C = c.dptr_;
const Dtype *H = out.dptr_;
Dtype *X_diff = data_diff.dptr_;
Dtype *G1_diff = g1_diff.dptr_;
Dtype *G2_diff = g2_diff.dptr_;
Dtype *G3_diff = g3_diff.dptr_;
Dtype *C_diff = c_diff.dptr_;
Dtype *H_diff = out_diff.dptr_;
/*END get pointer*/
/*get dimension*/
//data, g1, g2, g3, out, share the same dimension
//n_X represent number of X
const int n_batch = data.size(0);
const int n_channel = data.size(1);
const int height = data.size(2);
const int width = data.size(3);
/*END get dimension*/
/*set cuda system param*/
const int NUM_THREADS_BLOCK = 512; //CUDA: use 512 threads per block
const int NUM_BLOCKS_GRID = kMaxGridDim; //CUDA: use largest blocks num per grid
/*END set cuda system param*/
/*allocate kernel*/
if(horizontal_ && !reverse_){ // left to right
/*logic within this block:
*1. calculate total number of execution units that run in parallel
*2. calculate block and grid dimension
*3. check block/grid dimension, get stream
*4. call cuda kernal function*/
const int n_operation_parallel = height * n_channel * n_batch;
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_col = width - 1; current_col >= 0; current_col--){ //iterate through the column
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN left->right backward"); //check whether dimGrid or dimBlock is out of range
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
backward_one_col_left_right<Dtype><<<dimGrid, dimBlock, 0, stream>>>(n_operation_parallel, current_col, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, X_diff, G1_diff, G2_diff, G3_diff, C_diff, H_diff, horizontal_, reverse_);
}
}else if(horizontal_ && reverse_){ // right to left
/*logic same as previous*/
const int n_operation_parallel = height * n_channel * n_batch; //total number of execution units that run in parallel
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_col = 0; current_col < width; current_col++){
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN right->left backward"); //check whether dimGrid or dimBlock is out of range
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
backward_one_col_right_left<Dtype><<<dimGrid, dimBlock, 0, stream>>>(n_operation_parallel, current_col, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, X_diff, G1_diff, G2_diff, G3_diff, C_diff, H_diff, horizontal_, reverse_);
}
}else if(!horizontal_ && !reverse_){ // top to bottom
/*logic same as previous*/
const int n_operation_parallel = width * n_channel * n_batch; //total number of execution units that run in parallel
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_row = height - 1; current_row >= 0; current_row--){
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN top->bottom backward"); //check whether dimGrid or dimBlock is out of range
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
backward_one_row_top_bottom<Dtype><<<dimGrid, dimBlock, 0, stream>>>(n_operation_parallel, current_row, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, X_diff, G1_diff, G2_diff, G3_diff, C_diff, H_diff, horizontal_, reverse_);
}
}else{ //bottom to top
/*logic same as previous*/
const int n_operation_parallel = width * n_channel * n_batch; //total number of execution units that run in parallel
const int n_blocks_need = ((n_operation_parallel - 1) / NUM_THREADS_BLOCK) + 1;
const int n_grids_need = ((n_blocks_need - 1) / NUM_BLOCKS_GRID) + 1;
for(int current_row = 0; current_row < width; current_row++){
dim3 dimGrid(NUM_BLOCKS_GRID, n_grids_need);
dim3 dimBlock(NUM_THREADS_BLOCK);
CheckLaunchParam(dimGrid, dimBlock, "SCN bottom->top backward"); //check whether dimGrid or dimBlock is out of range
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); //??not sure where to find the definition of stream_
backward_one_row_bottom_top<Dtype><<<dimGrid, dimBlock, 0, stream>>>(n_operation_parallel, current_row, n_batch, n_channel, height, width, X, G1, G2, G3, C, H, X_diff, G1_diff, G2_diff, G3_diff, C_diff, H_diff, horizontal_, reverse_);
}
}
/*END allocate kernel*/
}//end SCNBackward
} //namespace cuda
template<typename Dtype>
inline void SCNForward(const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 4, Dtype> &g1,
const Tensor<gpu, 4, Dtype> &g2,
const Tensor<gpu, 4, Dtype> &g3,
const Tensor<gpu, 4, Dtype> &c,
const Tensor<gpu, 4, Dtype> &out,
const bool horizontal,
const bool reverse){
cuda::SCNForward(data, g1, g2, g3, c, out, horizontal, reverse);
}
template<typename Dtype>
inline void SCNBackward(const Tensor<gpu, 4, Dtype> &data,
const Tensor<gpu, 4, Dtype> &g1,
const Tensor<gpu, 4, Dtype> &g2,
const Tensor<gpu, 4, Dtype> &g3,
const Tensor<gpu, 4, Dtype> &c,
const Tensor<gpu, 4, Dtype> &out,
const Tensor<gpu, 4, Dtype> &data_diff,
const Tensor<gpu, 4, Dtype> &g1_diff,
const Tensor<gpu, 4, Dtype> &g2_diff,
const Tensor<gpu, 4, Dtype> &g3_diff,
const Tensor<gpu, 4, Dtype> &c_diff,
const Tensor<gpu, 4, Dtype> &out_diff,
const bool horizontal,
const bool reverse){
cuda::SCNBackward(data, g1, g2, g3, c, out, data_diff, g1_diff, g2_diff, g3_diff, c_diff, out_diff, horizontal, reverse);
}
} //namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(ScnParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new ScnOp<gpu, DType>(param);
})
return op;
}
} // namespace op
} // namespace mxnet
|
8d5bf8b96ca3d1c40264d766fabe350b6df78a74.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// zeros out the part of a block above the diagonal
// sets ones on the diagonal (kernel by V.Volkov)
extern "C" {
__global__ void enforceLU( float *matrix, int lda )
{
int i = threadIdx.x;
int j = blockIdx.x;
if( i <= j )
matrix[i + j*lda] = (i == j) ? 1 : 0;
}
}
// zeros out the whole part of matrix above the diagonal (not just a block)
extern "C" {
__global__ void zerosU(int m, int n, float *matrix, int lda, int incl)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= m || j >= n) return;
if (i < j)
matrix[i + j*lda] = 0;
else if (i == j && incl)
matrix[i + j*lda] = 0;
}
}
// zeros out the whole part of matrix below the diagonal
extern "C" {
__global__ void zerosL(int m, int n, float *matrix, int lda, int incl)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= m || j >= n) return;
if( i > j )
matrix[i + j*lda] = 0;
else if (i == j && incl)
matrix[i + j*lda] = 0;
}
} | 8d5bf8b96ca3d1c40264d766fabe350b6df78a74.cu | // zeros out the part of a block above the diagonal
// sets ones on the diagonal (kernel by V.Volkov)
extern "C" {
__global__ void enforceLU( float *matrix, int lda )
{
int i = threadIdx.x;
int j = blockIdx.x;
if( i <= j )
matrix[i + j*lda] = (i == j) ? 1 : 0;
}
}
// zeros out the whole part of matrix above the diagonal (not just a block)
extern "C" {
__global__ void zerosU(int m, int n, float *matrix, int lda, int incl)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= m || j >= n) return;
if (i < j)
matrix[i + j*lda] = 0;
else if (i == j && incl)
matrix[i + j*lda] = 0;
}
}
// zeros out the whole part of matrix below the diagonal
extern "C" {
__global__ void zerosL(int m, int n, float *matrix, int lda, int incl)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= m || j >= n) return;
if( i > j )
matrix[i + j*lda] = 0;
else if (i == j && incl)
matrix[i + j*lda] = 0;
}
} |
04a56885977059d5f71fde00b74548447d540768.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "upload.hh"
void gpuAssert(hipError_t code, const char *file, int line)
{
if (code == hipSuccess)
return;
fprintf(stderr, "CUDA: %s %s %d\n", hipGetErrorString(code), file, line);
exit(code);
}
static size_t upload_kd_node(KdNodeGpu* nodes, KdNodeGpu* nodes_gpu,
std::vector<Triangle>& triangles, Triangle* triangles_gpu,
const KdTree::childPtr& kd_node, std::size_t& idx)
{
if (!kd_node)
return 0;
size_t cur_idx = idx;
KdNodeGpu &node = nodes[idx++];
size_t left_idx = upload_kd_node(nodes, nodes_gpu, triangles,
triangles_gpu, kd_node->left, idx);
if (left_idx)
node.left = nodes_gpu + left_idx;
else
node.left = nullptr;
size_t right_idx = upload_kd_node(nodes, nodes_gpu, triangles,
triangles_gpu, kd_node->right, idx);
if (right_idx)
node.right = nodes_gpu + right_idx;
else
node.right = nullptr;
memcpy(node.box, kd_node->box, sizeof(node.box));
size_t len = kd_node->end - kd_node->beg;
size_t offset = &(*kd_node->beg) - triangles.data();
node.beg = triangles_gpu + offset;
node.end = node.beg + len;
return cur_idx;
}
KdNodeGpu* upload_kd_tree(const KdTree& kd_tree, std::vector<Triangle>& triangles)
{
std::vector<KdNodeGpu> nodes(kd_tree.nodes_count_);
KdNodeGpu* nodes_gpu;
cudaCheckError(hipMalloc(&nodes_gpu, sizeof(*nodes_gpu) * nodes.size()));
Triangle* triangles_gpu;
cudaCheckError(hipMalloc(&triangles_gpu, sizeof(*triangles_gpu) * triangles.size()));
size_t idx = 0;
upload_kd_node(nodes.data(), nodes_gpu, triangles,
triangles_gpu,
kd_tree.root_,
idx);
cudaCheckError(hipMemcpy(nodes_gpu, nodes.data(), sizeof(*nodes_gpu) * nodes.size(), hipMemcpyHostToDevice));
cudaCheckError(hipMemcpy(triangles_gpu, triangles.data(), sizeof(*triangles_gpu) * triangles.size(), hipMemcpyHostToDevice));
return nodes_gpu;
}
| 04a56885977059d5f71fde00b74548447d540768.cu | #include <stdio.h>
#include "upload.hh"
void gpuAssert(cudaError_t code, const char *file, int line)
{
if (code == cudaSuccess)
return;
fprintf(stderr, "CUDA: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
static size_t upload_kd_node(KdNodeGpu* nodes, KdNodeGpu* nodes_gpu,
std::vector<Triangle>& triangles, Triangle* triangles_gpu,
const KdTree::childPtr& kd_node, std::size_t& idx)
{
if (!kd_node)
return 0;
size_t cur_idx = idx;
KdNodeGpu &node = nodes[idx++];
size_t left_idx = upload_kd_node(nodes, nodes_gpu, triangles,
triangles_gpu, kd_node->left, idx);
if (left_idx)
node.left = nodes_gpu + left_idx;
else
node.left = nullptr;
size_t right_idx = upload_kd_node(nodes, nodes_gpu, triangles,
triangles_gpu, kd_node->right, idx);
if (right_idx)
node.right = nodes_gpu + right_idx;
else
node.right = nullptr;
memcpy(node.box, kd_node->box, sizeof(node.box));
size_t len = kd_node->end - kd_node->beg;
size_t offset = &(*kd_node->beg) - triangles.data();
node.beg = triangles_gpu + offset;
node.end = node.beg + len;
return cur_idx;
}
KdNodeGpu* upload_kd_tree(const KdTree& kd_tree, std::vector<Triangle>& triangles)
{
std::vector<KdNodeGpu> nodes(kd_tree.nodes_count_);
KdNodeGpu* nodes_gpu;
cudaCheckError(cudaMalloc(&nodes_gpu, sizeof(*nodes_gpu) * nodes.size()));
Triangle* triangles_gpu;
cudaCheckError(cudaMalloc(&triangles_gpu, sizeof(*triangles_gpu) * triangles.size()));
size_t idx = 0;
upload_kd_node(nodes.data(), nodes_gpu, triangles,
triangles_gpu,
kd_tree.root_,
idx);
cudaCheckError(cudaMemcpy(nodes_gpu, nodes.data(), sizeof(*nodes_gpu) * nodes.size(), cudaMemcpyHostToDevice));
cudaCheckError(cudaMemcpy(triangles_gpu, triangles.data(), sizeof(*triangles_gpu) * triangles.size(), cudaMemcpyHostToDevice));
return nodes_gpu;
}
|
153f1c1751aa7c177c40bd8ea3c3c3cdb5048df9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "reference_calc.cpp"
#include "utils.h"
#include "float.h"
__global__ void blelloch_scan_kernel(unsigned int *d_inout, const size_t num_bins)
{
size_t tid = threadIdx.x;
int i = 0;
for(i = 1; i < num_bins; i <<= 1){
if((tid + 1) % (i * 2) == 0){
d_inout[tid] += d_inout[tid - i];
}
__syncthreads();
}
if(tid == num_bins - 1){
d_inout[tid] = 0;
}
for(i = num_bins / 2; i >= 1; i /= 2){
if((tid + 1) % (i * 2) == 0){
unsigned int tmp = d_inout[tid - i];
d_inout[tid - i] = d_inout[tid];
d_inout[tid] += tmp;
}
__syncthreads();
}
}
__global__ void hillis_scan_kernel(unsigned int *d_inout, const size_t num_bins){
size_t tid = threadIdx.x;
extern __shared__ unsigned int sdata2[];
sdata2[tid] = d_inout[tid];
__syncthreads();
int i = 0;
for(i = 1; i < num_bins; i <<= 1){
if(tid >= i){
atomicAdd(&sdata2[tid], sdata2[tid - i]);
//sdata2[tid] += sdata2[tid - i];
}
__syncthreads();
}
// ---Inclusive to exclusive
if(tid == 0){
d_inout[tid] = 0;
}
else{
d_inout[tid] = sdata2[tid - 1];
}
}
__global__ void create_histogram_kernel(const float *d_in, unsigned int *d_out, const float minval,
const float valrange, const size_t num_bins){
size_t tid = threadIdx.x;
size_t abs_idx = blockIdx.x * blockDim.x + tid;
size_t bin = (d_in[abs_idx] - minval) / valrange * num_bins;
if(bin == num_bins){
// --- Out of range case
bin--;
}
atomicAdd(&d_out[bin], 1);
}
__global__ void shmem_reduce_kernel(const float *d_in, float *d_out, bool is_min_op){
extern __shared__ float sdata1[];
size_t tid = threadIdx.x;
size_t abs_idx = blockIdx.x * blockDim.x + tid;
sdata1[tid] = d_in[abs_idx];
// --- Before going further, we have to make sure that all the shared memory loads have been completed
__syncthreads();
// --- Reduction in shared memory. Only half of the threads contribute to reduction
size_t s = 0;
for(s = (blockDim.x >> 1); s > 0; s >>= 1){
if(tid < s){
if(is_min_op){
sdata1[tid] = fminf(sdata1[tid], sdata1[tid + s]);
}
else{
sdata1[tid] = fmaxf(sdata1[tid], sdata1[tid + s]);
}
}
// --- Make sure all min op at one stage are done
__syncthreads();
}
// --- Only thread 0 writes result for this block back to global mem
if(tid == 0){
d_out[blockIdx.x] = sdata1[0];
}
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
/*
* 1) find the minimum and maximum value in the input logLuminance channel
* store in min_logLum and max_logLum
*/
const size_t numSize = numRows * numCols;
const size_t tnum = (1 << 10);
size_t bnum = numSize >> 10;
float *d_tempbuf;
checkCudaErrors(hipMalloc((void **)&d_tempbuf, bnum * sizeof(float)));
float *d_min;
float *d_max;
checkCudaErrors(hipMalloc((void **)&d_min, 1 * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_max, 1 * sizeof(float)));hipLaunchKernelGGL((
shmem_reduce_kernel), dim3(bnum), dim3(tnum), tnum * sizeof(float), 0, d_logLuminance, d_tempbuf, true);
hipDeviceSynchronize();hipLaunchKernelGGL((
shmem_reduce_kernel), dim3(1), dim3(bnum), bnum * sizeof(float), 0, d_tempbuf, d_min, true);
hipDeviceSynchronize();
checkCudaErrors(hipMemset(d_tempbuf, 0, bnum * sizeof(float)));hipLaunchKernelGGL((
shmem_reduce_kernel), dim3(bnum), dim3(tnum), tnum * sizeof(float), 0, d_logLuminance, d_tempbuf, false);
hipDeviceSynchronize();hipLaunchKernelGGL((
shmem_reduce_kernel), dim3(1), dim3(bnum), bnum * sizeof(float), 0, d_tempbuf, d_max, false);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(&min_logLum, d_min, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&max_logLum, d_max, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_tempbuf));
checkCudaErrors(hipFree(d_min));
checkCudaErrors(hipFree(d_max));
/*
* 2) subtract them to find the range
*/
float range_logLum = max_logLum - min_logLum;
/*
* 3) generate a histogram of all the values in the logLuminance channel using
* the formula: bin = (lum[i] - lumMin) / lumRange * numBins
*/
checkCudaErrors(hipMemset(d_cdf, 0, sizeof(unsigned int)* numBins));hipLaunchKernelGGL((
create_histogram_kernel), dim3(bnum), dim3(tnum), 0, 0, d_logLuminance, d_cdf, min_logLum,
range_logLum, numBins);
hipDeviceSynchronize();
/*
* 4) Perform an exclusive scan (prefix sum) on the histogram to get
* the cumulative distribution of luminance values (this should go in the
* incoming d_cdf pointer which already has been allocated for you)
*/
//blelloch_scan_kernel<<<1, numBins, numBins * sizeof(unsigned int)>>>(d_cdf, numBins);hipLaunchKernelGGL((
blelloch_scan_kernel), dim3(1), dim3(numBins), 0, 0, d_cdf, numBins);
hipDeviceSynchronize();
}
| 153f1c1751aa7c177c40bd8ea3c3c3cdb5048df9.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "reference_calc.cpp"
#include "utils.h"
#include "float.h"
__global__ void blelloch_scan_kernel(unsigned int *d_inout, const size_t num_bins)
{
size_t tid = threadIdx.x;
int i = 0;
for(i = 1; i < num_bins; i <<= 1){
if((tid + 1) % (i * 2) == 0){
d_inout[tid] += d_inout[tid - i];
}
__syncthreads();
}
if(tid == num_bins - 1){
d_inout[tid] = 0;
}
for(i = num_bins / 2; i >= 1; i /= 2){
if((tid + 1) % (i * 2) == 0){
unsigned int tmp = d_inout[tid - i];
d_inout[tid - i] = d_inout[tid];
d_inout[tid] += tmp;
}
__syncthreads();
}
}
__global__ void hillis_scan_kernel(unsigned int *d_inout, const size_t num_bins){
size_t tid = threadIdx.x;
extern __shared__ unsigned int sdata2[];
sdata2[tid] = d_inout[tid];
__syncthreads();
int i = 0;
for(i = 1; i < num_bins; i <<= 1){
if(tid >= i){
atomicAdd(&sdata2[tid], sdata2[tid - i]);
//sdata2[tid] += sdata2[tid - i];
}
__syncthreads();
}
// ---Inclusive to exclusive
if(tid == 0){
d_inout[tid] = 0;
}
else{
d_inout[tid] = sdata2[tid - 1];
}
}
__global__ void create_histogram_kernel(const float *d_in, unsigned int *d_out, const float minval,
const float valrange, const size_t num_bins){
size_t tid = threadIdx.x;
size_t abs_idx = blockIdx.x * blockDim.x + tid;
size_t bin = (d_in[abs_idx] - minval) / valrange * num_bins;
if(bin == num_bins){
// --- Out of range case
bin--;
}
atomicAdd(&d_out[bin], 1);
}
__global__ void shmem_reduce_kernel(const float *d_in, float *d_out, bool is_min_op){
extern __shared__ float sdata1[];
size_t tid = threadIdx.x;
size_t abs_idx = blockIdx.x * blockDim.x + tid;
sdata1[tid] = d_in[abs_idx];
// --- Before going further, we have to make sure that all the shared memory loads have been completed
__syncthreads();
// --- Reduction in shared memory. Only half of the threads contribute to reduction
size_t s = 0;
for(s = (blockDim.x >> 1); s > 0; s >>= 1){
if(tid < s){
if(is_min_op){
sdata1[tid] = fminf(sdata1[tid], sdata1[tid + s]);
}
else{
sdata1[tid] = fmaxf(sdata1[tid], sdata1[tid + s]);
}
}
// --- Make sure all min op at one stage are done
__syncthreads();
}
// --- Only thread 0 writes result for this block back to global mem
if(tid == 0){
d_out[blockIdx.x] = sdata1[0];
}
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
/*
* 1) find the minimum and maximum value in the input logLuminance channel
* store in min_logLum and max_logLum
*/
const size_t numSize = numRows * numCols;
const size_t tnum = (1 << 10);
size_t bnum = numSize >> 10;
float *d_tempbuf;
checkCudaErrors(cudaMalloc((void **)&d_tempbuf, bnum * sizeof(float)));
float *d_min;
float *d_max;
checkCudaErrors(cudaMalloc((void **)&d_min, 1 * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_max, 1 * sizeof(float)));
shmem_reduce_kernel<<<bnum, tnum, tnum * sizeof(float)>>>(d_logLuminance, d_tempbuf, true);
cudaDeviceSynchronize();
shmem_reduce_kernel<<<1, bnum, bnum * sizeof(float)>>>(d_tempbuf, d_min, true);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemset(d_tempbuf, 0, bnum * sizeof(float)));
shmem_reduce_kernel<<<bnum, tnum, tnum * sizeof(float)>>>(d_logLuminance, d_tempbuf, false);
cudaDeviceSynchronize();
shmem_reduce_kernel<<<1, bnum, bnum * sizeof(float)>>>(d_tempbuf, d_max, false);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(&min_logLum, d_min, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&max_logLum, d_max, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_tempbuf));
checkCudaErrors(cudaFree(d_min));
checkCudaErrors(cudaFree(d_max));
/*
* 2) subtract them to find the range
*/
float range_logLum = max_logLum - min_logLum;
/*
* 3) generate a histogram of all the values in the logLuminance channel using
* the formula: bin = (lum[i] - lumMin) / lumRange * numBins
*/
checkCudaErrors(cudaMemset(d_cdf, 0, sizeof(unsigned int)* numBins));
create_histogram_kernel<<<bnum, tnum>>>(d_logLuminance, d_cdf, min_logLum,
range_logLum, numBins);
cudaDeviceSynchronize();
/*
* 4) Perform an exclusive scan (prefix sum) on the histogram to get
* the cumulative distribution of luminance values (this should go in the
* incoming d_cdf pointer which already has been allocated for you)
*/
//blelloch_scan_kernel<<<1, numBins, numBins * sizeof(unsigned int)>>>(d_cdf, numBins);
blelloch_scan_kernel<<<1, numBins>>>(d_cdf, numBins);
cudaDeviceSynchronize();
}
|
ac05709cba9d9701ee7b845c9f63c65a12a79be4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cuConvertLABToRGBKernel(const float4* src, float4* dst, size_t stride, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*stride + x;
if (x<width && y<height)
{
// Read
float4 in = src[c];
float L = in.x;
float a = in.y;
float b = in.z;
// convert to XYZ
const float T1 = cbrtf(216/24389.0f);
const float fy = (L+16) / 116.0f;
float4 XYZ;
if (L > 8)
XYZ.y = fy*fy*fy;
else
XYZ.y = L / (24389/27.0f);
float fx = a/500.0f + fy;
if (fx > T1)
XYZ.x = fx*fx*fx;
else
XYZ.x = (116*fx-16) / (24389/27.0f);
float fz = fy - b/200.0f;
if (fz > T1)
XYZ.z = fz*fz*fz;
else
XYZ.z = (116*fz-16) / (24389/27.0f);
// Normalize for D65 white point
XYZ.x *= 0.950456f;
XYZ.z *= 1.088754f;
float4 rgb;
rgb.x = 3.2404542f*XYZ.x + -1.5371385f*XYZ.y + -0.4985314f*XYZ.z;
rgb.y = -0.9692660f*XYZ.x + 1.8760108f*XYZ.y + 0.0415560f*XYZ.z;
rgb.z = 0.0556434f*XYZ.x + -0.2040259f*XYZ.y + 1.0572252f*XYZ.z;
rgb.w = in.w;
dst[c] = rgb;
}
} | ac05709cba9d9701ee7b845c9f63c65a12a79be4.cu | #include "includes.h"
__global__ void cuConvertLABToRGBKernel(const float4* src, float4* dst, size_t stride, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int c = y*stride + x;
if (x<width && y<height)
{
// Read
float4 in = src[c];
float L = in.x;
float a = in.y;
float b = in.z;
// convert to XYZ
const float T1 = cbrtf(216/24389.0f);
const float fy = (L+16) / 116.0f;
float4 XYZ;
if (L > 8)
XYZ.y = fy*fy*fy;
else
XYZ.y = L / (24389/27.0f);
float fx = a/500.0f + fy;
if (fx > T1)
XYZ.x = fx*fx*fx;
else
XYZ.x = (116*fx-16) / (24389/27.0f);
float fz = fy - b/200.0f;
if (fz > T1)
XYZ.z = fz*fz*fz;
else
XYZ.z = (116*fz-16) / (24389/27.0f);
// Normalize for D65 white point
XYZ.x *= 0.950456f;
XYZ.z *= 1.088754f;
float4 rgb;
rgb.x = 3.2404542f*XYZ.x + -1.5371385f*XYZ.y + -0.4985314f*XYZ.z;
rgb.y = -0.9692660f*XYZ.x + 1.8760108f*XYZ.y + 0.0415560f*XYZ.z;
rgb.z = 0.0556434f*XYZ.x + -0.2040259f*XYZ.y + 1.0572252f*XYZ.z;
rgb.w = in.w;
dst[c] = rgb;
}
} |
7573374a09e2a4163fba2e08610422c3f05da393.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2016 Stanford University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "circuit.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#define CUDA_SAFE_CALL(expr) \
{ \
hipError_t err = (expr); \
if (err != hipSuccess) \
{ \
printf("Cuda error: %s\n", hipGetErrorString(err)); \
assert(false); \
} \
}
using namespace LegionRuntime::Accessor;
class GPUAccumulateCharge {
public:
typedef CircuitNode LHS;
typedef float RHS;
template<bool EXCLUSIVE>
__device__ __forceinline__
static void apply(LHS &lhs, RHS &rhs)
{
float *target = &(lhs.charge);
atomicAdd(target,rhs);
}
template<bool EXCLUSIVE>
__device__ __forceinline__
static void fold(RHS &rhs1, RHS rhs2)
{
float *target = &rhs1;
atomicAdd(target,rhs2);
}
};
// Helper methods
template<typename AT>
__device__ __forceinline__
CircuitNode& get_node(const RegionAccessor<AT, CircuitNode> &pvt,
const RegionAccessor<AT, CircuitNode> &owned,
const RegionAccessor<AT, CircuitNode> &ghost,
PointerLocation loc, ptr_t ptr)
{
switch (loc)
{
case PRIVATE_PTR:
//assert((pvt.first_elmt <= ptr.value) && (ptr.value <= pvt.last_elmt));
return pvt.ref(ptr);
case SHARED_PTR:
//assert((owned.first_elmt <= ptr.value) && (ptr.value <= owned.last_elmt));
return owned.ref(ptr);
case GHOST_PTR:
//assert((ghost.first_elmt <= ptr.value) && (ptr.value <= ghost.last_elmt));
return ghost.ref(ptr);
default:
assert(false);
}
return pvt.ref(ptr);
}
template<typename REDOP, typename AT1, typename AT2>
__device__ __forceinline__
void reduce_local(const RegionAccessor<AT1, CircuitNode> &pvt,
const RegionAccessor<AT2, CircuitNode> &owned,
const RegionAccessor<AT2, CircuitNode> &ghost,
PointerLocation loc, ptr_t ptr, typename REDOP::RHS value)
{
switch (loc)
{
case PRIVATE_PTR:
pvt.template reduce<REDOP>(ptr, value);
break;
case SHARED_PTR:
owned.reduce(ptr, value);
break;
case GHOST_PTR:
ghost.reduce(ptr, value);
break;
default:
assert(false);
}
}
// Actual kernels
template<typename AT>
__global__
void calc_new_currents_kernel(ptr_t first,
int num_wires,
float dt,
int steps,
RegionAccessor<AT,CircuitWire> wires,
RegionAccessor<AT,CircuitNode> pvt,
RegionAccessor<AT,CircuitNode> owned,
RegionAccessor<AT,CircuitNode> ghost)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_wires)
{
ptr_t local_ptr = first + tid;
CircuitWire &wire = wires.ref(local_ptr);
CircuitNode &in_node = get_node(pvt, owned, ghost, wire.in_loc, wire.in_ptr);
CircuitNode &out_node = get_node(pvt, owned, ghost, wire.out_loc, wire.out_ptr);
// Solve RLC model iteratively
float recip_dt = 1.f/dt;
float new_i[WIRE_SEGMENTS];
float new_v[WIRE_SEGMENTS+1];
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
new_i[i] = wire.current[i];
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
new_v[i+1] = wire.voltage[i];
new_v[0] = in_node.voltage;
new_v[WIRE_SEGMENTS] = out_node.voltage;
float recip_resistance = 1.f/wire.resistance;
float recip_capacitance = 1.f/wire.capacitance;
for (int j = 0; j < steps; j++)
{
// first, figure out the new current from the voltage differential
// and our inductance:
// dV = R*I + L*I' ==> I = (dV - L*I')/R
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
{
new_i[i] = ((new_v[i] - new_v[i+1]) -
(wire.inductance*(new_i[i] - wire.current[i]) * recip_dt)) * recip_resistance;
}
// Now update the inter-node voltages
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
{
new_v[i+1] = wire.voltage[i] + dt*(new_i[i] - new_i[i+1]) * recip_capacitance;
}
}
// Copy everything back
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
wire.current[i] = new_i[i];
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
wire.voltage[i] = new_v[i+1];
}
}
__host__
void calc_new_currents_gpu(CircuitPiece *p,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
RegionAccessor<AccessorType::SOA<0>, CircuitWire> wires =
extract_accessor<AccessorType::SOA<0>,CircuitWire>(regions[0]);
RegionAccessor<AccessorType::SOA<0>, CircuitNode> pvt =
extract_accessor<AccessorType::SOA<0>,CircuitNode>(regions[1]);
RegionAccessor<AccessorType::SOA<0>, CircuitNode> owned =
extract_accessor<AccessorType::SOA<0>,CircuitNode>(regions[2]);
RegionAccessor<AccessorType::SOA<0>, CircuitNode> ghost =
extract_accessor<AccessorType::SOA<0>,CircuitNode>(regions[3]);
int num_blocks = (p->num_wires+255) >> 8;
#ifdef TIME_CUDA_KERNELS
hipEvent_t ev_start, ev_end;
CUDA_SAFE_CALL(hipEventCreate(&ev_start, hipEventDefault));
CUDA_SAFE_CALL(hipEventCreate(&ev_end, hipEventDefault));
CUDA_SAFE_CALL(hipEventRecord(ev_start));
#endif
hipLaunchKernelGGL(( calc_new_currents_kernel), dim3(num_blocks),dim3(256), 0, 0, p->first_wire,
p->num_wires,
p->dt,
p->steps,
wires, pvt, owned, ghost);
#ifdef TIME_CUDA_KERNELS
CUDA_SAFE_CALL(hipEventRecord(ev_end));
#endif
CUDA_SAFE_CALL(hipDeviceSynchronize());
#ifdef TIME_CUDA_KERNELS
float ms;
CUDA_SAFE_CALL(hipEventElapsedTime(&ms, ev_start, ev_end));
CUDA_SAFE_CALL(hipEventDestroy(ev_start));
CUDA_SAFE_CALL(hipEventDestroy(ev_end));
printf("CNC TIME = %f\n", ms);
#endif
#endif
}
template<typename AT1, typename AT2>
__global__
void distribute_charge_kernel(ptr_t first,
int num_wires,
float dt,
RegionAccessor<AT1, CircuitWire> wires,
RegionAccessor<AT1, CircuitNode> pvt,
RegionAccessor<AT2, CircuitNode> owned,
RegionAccessor<AT2, CircuitNode> ghost)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_wires)
{
ptr_t local_ptr = first + tid;
CircuitWire &wire = wires.ref(local_ptr);
reduce_local<GPUAccumulateCharge>(pvt, owned, ghost, wire.in_loc, wire.in_ptr, -dt * wire.current[0]);
reduce_local<GPUAccumulateCharge>(pvt, owned, ghost, wire.out_loc, wire.out_ptr, dt * wire.current[WIRE_SEGMENTS-1]);
}
}
__host__
void distribute_charge_gpu(CircuitPiece *p,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
RegionAccessor<AccessorType::SOA<0>, CircuitWire> wires =
extract_accessor<AccessorType::SOA<0>, CircuitWire>(regions[0]);
RegionAccessor<AccessorType::SOA<0>, CircuitNode> pvt =
extract_accessor<AccessorType::SOA<0>, CircuitNode>(regions[1]);
RegionAccessor<AccessorType::ReductionFold<GPUAccumulateCharge>, CircuitNode> owned =
extract_accessor<AccessorType::ReductionFold<GPUAccumulateCharge>, CircuitNode>(regions[2]);
RegionAccessor<AccessorType::ReductionFold<GPUAccumulateCharge>, CircuitNode> ghost =
extract_accessor<AccessorType::ReductionFold<GPUAccumulateCharge>, CircuitNode>(regions[3]);
int num_blocks = (p->num_wires+255) >> 8;
#ifdef TIME_CUDA_KERNELS
hipEvent_t ev_start, ev_end;
CUDA_SAFE_CALL(hipEventCreate(&ev_start, hipEventDefault));
CUDA_SAFE_CALL(hipEventCreate(&ev_end, hipEventDefault));
CUDA_SAFE_CALL(hipEventRecord(ev_start));
#endif
hipLaunchKernelGGL(( distribute_charge_kernel), dim3(num_blocks),dim3(256), 0, 0, p->first_wire,
p->num_wires,
p->dt,
wires, pvt, owned, ghost);
#ifdef TIME_CUDA_KERNELS
CUDA_SAFE_CALL(hipEventRecord(ev_end));
#endif
CUDA_SAFE_CALL(hipDeviceSynchronize());
#ifdef TIME_CUDA_KERNELS
float ms;
CUDA_SAFE_CALL(hipEventElapsedTime(&ms, ev_start, ev_end));
CUDA_SAFE_CALL(hipEventDestroy(ev_start));
CUDA_SAFE_CALL(hipEventDestroy(ev_end));
printf("DC TIME = %f\n", ms);
#endif
#endif
}
template<typename AT>
__global__
void update_voltages_kernel(ptr_t first,
int num_nodes,
RegionAccessor<AT, CircuitNode> pvt,
RegionAccessor<AT, CircuitNode> owned,
RegionAccessor<AT, PointerLocation> locator)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes)
{
ptr_t locator_ptr = first + tid;
ptr_t local_node = first + tid;;
// Figure out if this node is pvt or not
{
int is_pvt = locator.read(locator_ptr) == PRIVATE_PTR;
if (is_pvt)
{
CircuitNode &cur_node = pvt.ref(local_node);
// charge adds in, and then some leaks away
cur_node.voltage += cur_node.charge / cur_node.capacitance;
cur_node.voltage *= (1 - cur_node.leakage);
cur_node.charge = 0;
}
else
{
CircuitNode &cur_node = owned.ref(local_node);
// charge adds in, and then some leaks away
if(cur_node.capacitance < 1e-10)
cur_node.capacitance = 1e-10;
cur_node.voltage += cur_node.charge / cur_node.capacitance;
cur_node.voltage *= (1 - cur_node.leakage);
cur_node.charge = 0;
}
}
}
}
__host__
void update_voltages_gpu(CircuitPiece *p,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
RegionAccessor<AccessorType::SOA<0>, CircuitNode> pvt =
extract_accessor<AccessorType::SOA<0>, CircuitNode>(regions[0]);
RegionAccessor<AccessorType::SOA<0>, CircuitNode> owned =
extract_accessor<AccessorType::SOA<0>, CircuitNode>(regions[1]);
RegionAccessor<AccessorType::SOA<0>, PointerLocation> locator =
extract_accessor<AccessorType::SOA<0>, PointerLocation>(regions[2]);
int num_blocks = (p->num_nodes+255) >> 8;
#ifdef TIME_CUDA_KERNELS
hipEvent_t ev_start, ev_end;
CUDA_SAFE_CALL(hipEventCreate(&ev_start, hipEventDefault));
CUDA_SAFE_CALL(hipEventCreate(&ev_end, hipEventDefault));
CUDA_SAFE_CALL(hipEventRecord(ev_start));
#endif
hipLaunchKernelGGL(( update_voltages_kernel), dim3(num_blocks),dim3(256), 0, 0, p->first_node,
p->num_nodes,
pvt, owned, locator);
#ifdef TIME_CUDA_KERNELS
CUDA_SAFE_CALL(hipEventRecord(ev_end));
#endif
CUDA_SAFE_CALL(hipDeviceSynchronize());
#ifdef TIME_CUDA_KERNELS
float ms;
CUDA_SAFE_CALL(hipEventElapsedTime(&ms, ev_start, ev_end));
CUDA_SAFE_CALL(hipEventDestroy(ev_start));
CUDA_SAFE_CALL(hipEventDestroy(ev_end));
printf("UV TIME = %f\n", ms);
#endif
#endif
}
| 7573374a09e2a4163fba2e08610422c3f05da393.cu | /* Copyright 2016 Stanford University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "circuit.h"
#include "cuda.h"
#include "cuda_runtime.h"
#define CUDA_SAFE_CALL(expr) \
{ \
cudaError_t err = (expr); \
if (err != cudaSuccess) \
{ \
printf("Cuda error: %s\n", cudaGetErrorString(err)); \
assert(false); \
} \
}
using namespace LegionRuntime::Accessor;
class GPUAccumulateCharge {
public:
typedef CircuitNode LHS;
typedef float RHS;
template<bool EXCLUSIVE>
__device__ __forceinline__
static void apply(LHS &lhs, RHS &rhs)
{
float *target = &(lhs.charge);
atomicAdd(target,rhs);
}
template<bool EXCLUSIVE>
__device__ __forceinline__
static void fold(RHS &rhs1, RHS rhs2)
{
float *target = &rhs1;
atomicAdd(target,rhs2);
}
};
// Helper methods
template<typename AT>
__device__ __forceinline__
CircuitNode& get_node(const RegionAccessor<AT, CircuitNode> &pvt,
const RegionAccessor<AT, CircuitNode> &owned,
const RegionAccessor<AT, CircuitNode> &ghost,
PointerLocation loc, ptr_t ptr)
{
switch (loc)
{
case PRIVATE_PTR:
//assert((pvt.first_elmt <= ptr.value) && (ptr.value <= pvt.last_elmt));
return pvt.ref(ptr);
case SHARED_PTR:
//assert((owned.first_elmt <= ptr.value) && (ptr.value <= owned.last_elmt));
return owned.ref(ptr);
case GHOST_PTR:
//assert((ghost.first_elmt <= ptr.value) && (ptr.value <= ghost.last_elmt));
return ghost.ref(ptr);
default:
assert(false);
}
return pvt.ref(ptr);
}
template<typename REDOP, typename AT1, typename AT2>
__device__ __forceinline__
void reduce_local(const RegionAccessor<AT1, CircuitNode> &pvt,
const RegionAccessor<AT2, CircuitNode> &owned,
const RegionAccessor<AT2, CircuitNode> &ghost,
PointerLocation loc, ptr_t ptr, typename REDOP::RHS value)
{
switch (loc)
{
case PRIVATE_PTR:
pvt.template reduce<REDOP>(ptr, value);
break;
case SHARED_PTR:
owned.reduce(ptr, value);
break;
case GHOST_PTR:
ghost.reduce(ptr, value);
break;
default:
assert(false);
}
}
// Actual kernels
template<typename AT>
__global__
void calc_new_currents_kernel(ptr_t first,
int num_wires,
float dt,
int steps,
RegionAccessor<AT,CircuitWire> wires,
RegionAccessor<AT,CircuitNode> pvt,
RegionAccessor<AT,CircuitNode> owned,
RegionAccessor<AT,CircuitNode> ghost)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_wires)
{
ptr_t local_ptr = first + tid;
CircuitWire &wire = wires.ref(local_ptr);
CircuitNode &in_node = get_node(pvt, owned, ghost, wire.in_loc, wire.in_ptr);
CircuitNode &out_node = get_node(pvt, owned, ghost, wire.out_loc, wire.out_ptr);
// Solve RLC model iteratively
float recip_dt = 1.f/dt;
float new_i[WIRE_SEGMENTS];
float new_v[WIRE_SEGMENTS+1];
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
new_i[i] = wire.current[i];
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
new_v[i+1] = wire.voltage[i];
new_v[0] = in_node.voltage;
new_v[WIRE_SEGMENTS] = out_node.voltage;
float recip_resistance = 1.f/wire.resistance;
float recip_capacitance = 1.f/wire.capacitance;
for (int j = 0; j < steps; j++)
{
// first, figure out the new current from the voltage differential
// and our inductance:
// dV = R*I + L*I' ==> I = (dV - L*I')/R
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
{
new_i[i] = ((new_v[i] - new_v[i+1]) -
(wire.inductance*(new_i[i] - wire.current[i]) * recip_dt)) * recip_resistance;
}
// Now update the inter-node voltages
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
{
new_v[i+1] = wire.voltage[i] + dt*(new_i[i] - new_i[i+1]) * recip_capacitance;
}
}
// Copy everything back
#pragma unroll
for (int i = 0; i < WIRE_SEGMENTS; i++)
wire.current[i] = new_i[i];
#pragma unroll
for (int i = 0; i < (WIRE_SEGMENTS-1); i++)
wire.voltage[i] = new_v[i+1];
}
}
__host__
void calc_new_currents_gpu(CircuitPiece *p,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
RegionAccessor<AccessorType::SOA<0>, CircuitWire> wires =
extract_accessor<AccessorType::SOA<0>,CircuitWire>(regions[0]);
RegionAccessor<AccessorType::SOA<0>, CircuitNode> pvt =
extract_accessor<AccessorType::SOA<0>,CircuitNode>(regions[1]);
RegionAccessor<AccessorType::SOA<0>, CircuitNode> owned =
extract_accessor<AccessorType::SOA<0>,CircuitNode>(regions[2]);
RegionAccessor<AccessorType::SOA<0>, CircuitNode> ghost =
extract_accessor<AccessorType::SOA<0>,CircuitNode>(regions[3]);
int num_blocks = (p->num_wires+255) >> 8;
#ifdef TIME_CUDA_KERNELS
cudaEvent_t ev_start, ev_end;
CUDA_SAFE_CALL(cudaEventCreate(&ev_start, cudaEventDefault));
CUDA_SAFE_CALL(cudaEventCreate(&ev_end, cudaEventDefault));
CUDA_SAFE_CALL(cudaEventRecord(ev_start));
#endif
calc_new_currents_kernel<<<num_blocks,256>>>(p->first_wire,
p->num_wires,
p->dt,
p->steps,
wires, pvt, owned, ghost);
#ifdef TIME_CUDA_KERNELS
CUDA_SAFE_CALL(cudaEventRecord(ev_end));
#endif
CUDA_SAFE_CALL(cudaDeviceSynchronize());
#ifdef TIME_CUDA_KERNELS
float ms;
CUDA_SAFE_CALL(cudaEventElapsedTime(&ms, ev_start, ev_end));
CUDA_SAFE_CALL(cudaEventDestroy(ev_start));
CUDA_SAFE_CALL(cudaEventDestroy(ev_end));
printf("CNC TIME = %f\n", ms);
#endif
#endif
}
template<typename AT1, typename AT2>
__global__
void distribute_charge_kernel(ptr_t first,
int num_wires,
float dt,
RegionAccessor<AT1, CircuitWire> wires,
RegionAccessor<AT1, CircuitNode> pvt,
RegionAccessor<AT2, CircuitNode> owned,
RegionAccessor<AT2, CircuitNode> ghost)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_wires)
{
ptr_t local_ptr = first + tid;
CircuitWire &wire = wires.ref(local_ptr);
reduce_local<GPUAccumulateCharge>(pvt, owned, ghost, wire.in_loc, wire.in_ptr, -dt * wire.current[0]);
reduce_local<GPUAccumulateCharge>(pvt, owned, ghost, wire.out_loc, wire.out_ptr, dt * wire.current[WIRE_SEGMENTS-1]);
}
}
__host__
void distribute_charge_gpu(CircuitPiece *p,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
RegionAccessor<AccessorType::SOA<0>, CircuitWire> wires =
extract_accessor<AccessorType::SOA<0>, CircuitWire>(regions[0]);
RegionAccessor<AccessorType::SOA<0>, CircuitNode> pvt =
extract_accessor<AccessorType::SOA<0>, CircuitNode>(regions[1]);
RegionAccessor<AccessorType::ReductionFold<GPUAccumulateCharge>, CircuitNode> owned =
extract_accessor<AccessorType::ReductionFold<GPUAccumulateCharge>, CircuitNode>(regions[2]);
RegionAccessor<AccessorType::ReductionFold<GPUAccumulateCharge>, CircuitNode> ghost =
extract_accessor<AccessorType::ReductionFold<GPUAccumulateCharge>, CircuitNode>(regions[3]);
int num_blocks = (p->num_wires+255) >> 8;
#ifdef TIME_CUDA_KERNELS
cudaEvent_t ev_start, ev_end;
CUDA_SAFE_CALL(cudaEventCreate(&ev_start, cudaEventDefault));
CUDA_SAFE_CALL(cudaEventCreate(&ev_end, cudaEventDefault));
CUDA_SAFE_CALL(cudaEventRecord(ev_start));
#endif
distribute_charge_kernel<<<num_blocks,256>>>(p->first_wire,
p->num_wires,
p->dt,
wires, pvt, owned, ghost);
#ifdef TIME_CUDA_KERNELS
CUDA_SAFE_CALL(cudaEventRecord(ev_end));
#endif
CUDA_SAFE_CALL(cudaDeviceSynchronize());
#ifdef TIME_CUDA_KERNELS
float ms;
CUDA_SAFE_CALL(cudaEventElapsedTime(&ms, ev_start, ev_end));
CUDA_SAFE_CALL(cudaEventDestroy(ev_start));
CUDA_SAFE_CALL(cudaEventDestroy(ev_end));
printf("DC TIME = %f\n", ms);
#endif
#endif
}
template<typename AT>
__global__
void update_voltages_kernel(ptr_t first,
int num_nodes,
RegionAccessor<AT, CircuitNode> pvt,
RegionAccessor<AT, CircuitNode> owned,
RegionAccessor<AT, PointerLocation> locator)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num_nodes)
{
ptr_t locator_ptr = first + tid;
ptr_t local_node = first + tid;;
// Figure out if this node is pvt or not
{
int is_pvt = locator.read(locator_ptr) == PRIVATE_PTR;
if (is_pvt)
{
CircuitNode &cur_node = pvt.ref(local_node);
// charge adds in, and then some leaks away
cur_node.voltage += cur_node.charge / cur_node.capacitance;
cur_node.voltage *= (1 - cur_node.leakage);
cur_node.charge = 0;
}
else
{
CircuitNode &cur_node = owned.ref(local_node);
// charge adds in, and then some leaks away
if(cur_node.capacitance < 1e-10)
cur_node.capacitance = 1e-10;
cur_node.voltage += cur_node.charge / cur_node.capacitance;
cur_node.voltage *= (1 - cur_node.leakage);
cur_node.charge = 0;
}
}
}
}
__host__
void update_voltages_gpu(CircuitPiece *p,
const std::vector<PhysicalRegion> ®ions)
{
#ifndef DISABLE_MATH
RegionAccessor<AccessorType::SOA<0>, CircuitNode> pvt =
extract_accessor<AccessorType::SOA<0>, CircuitNode>(regions[0]);
RegionAccessor<AccessorType::SOA<0>, CircuitNode> owned =
extract_accessor<AccessorType::SOA<0>, CircuitNode>(regions[1]);
RegionAccessor<AccessorType::SOA<0>, PointerLocation> locator =
extract_accessor<AccessorType::SOA<0>, PointerLocation>(regions[2]);
int num_blocks = (p->num_nodes+255) >> 8;
#ifdef TIME_CUDA_KERNELS
cudaEvent_t ev_start, ev_end;
CUDA_SAFE_CALL(cudaEventCreate(&ev_start, cudaEventDefault));
CUDA_SAFE_CALL(cudaEventCreate(&ev_end, cudaEventDefault));
CUDA_SAFE_CALL(cudaEventRecord(ev_start));
#endif
update_voltages_kernel<<<num_blocks,256>>>(p->first_node,
p->num_nodes,
pvt, owned, locator);
#ifdef TIME_CUDA_KERNELS
CUDA_SAFE_CALL(cudaEventRecord(ev_end));
#endif
CUDA_SAFE_CALL(cudaDeviceSynchronize());
#ifdef TIME_CUDA_KERNELS
float ms;
CUDA_SAFE_CALL(cudaEventElapsedTime(&ms, ev_start, ev_end));
CUDA_SAFE_CALL(cudaEventDestroy(ev_start));
CUDA_SAFE_CALL(cudaEventDestroy(ev_end));
printf("UV TIME = %f\n", ms);
#endif
#endif
}
|
ed2a66fe4d79ef79723ea99f041ee659d163df4d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#include "saxpy.h"
__global__ void
//saxpy_kernel(int total_elems, int i, int N, float alpha, float* x, float* y, float* result) {
saxpy_kernel( float alpha, float* x, float* y, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
int index = blockIdx.x * blockDim.x + threadIdx.x;
//. TODO MAKE SURE CHECK IS NOT REQUIRED
// if (index < N)
// if(total_elems > (i * N + index))
result[index] = alpha * x[index] + y[index];
}
static inline
int getBlocks(long working_set_size, int threadsPerBlock) {
// TODO: implement and use this interface if necessary
}
void
getArrays(int size, float **xarray, float **yarray, float **resultarray) {
// TODO: implement and use this interface if necessary
}
void
freeArrays(float *xarray, float *yarray, float *resultarray) {
// TODO: implement and use this interface if necessary
}
void
saxpyCuda(long total_elems, float alpha, float* xarray, float* yarray, float* resultarray, int partitions) {
const int threadsPerBlock = 2; // change this if necessary
float *device_x;
float *device_y;
float *device_result;
//
// TODO: allocate device memory buffers on the GPU using
// hipMalloc. The started code issues warnings on build because
// these buffers are used in the call to saxpy_kernel below
// without being initialized.
//
hipMalloc(&device_x, total_elems * sizeof(float));
hipMalloc(&device_y, total_elems * sizeof(float));
hipMalloc(&device_result, total_elems * sizeof(float));
const long size = (total_elems/partitions);
const long NumBlocks = ((size)/threadsPerBlock);
// NEED TO EDIT THIS
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
double startGPUTime[partitions];
double endGPUTime[partitions];
double timeKernel[partitions];
double endD2HTime[partitions];
double startH2DTime[partitions];
hipStream_t streams[partitions];
hipError_t errCode;
for (int i=0; i< partitions; i++) {
//
// TODO: copy input arrays to the GPU using hipMemcpy
//
startH2DTime[i] = CycleTimer::currentSeconds();
printf("stream start=%d\n", i);
printf("size =%d\n", size);
printf("i =%d\n", i);
//errCode = hipMemcpyAsync(device_x + (i * size) ,xarray + (i * size) , size * sizeof(float) , hipMemcpyHostToDevice, streams[i]);
errCode = hipMemcpy(device_x + (i * size) ,xarray + (i * size) , size * sizeof(float) , hipMemcpyHostToDevice);
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode));
}
hipMemcpyAsync(device_y + (i * size) ,yarray + (i * size) , size * sizeof(float) , hipMemcpyHostToDevice, streams[i]);
errCode = hipPeekAtLastError();
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode));
}
//
// TODO: insert time here to begin timing only the kernel
//
startGPUTime[i] = CycleTimer::currentSeconds();
// compute number of blocks and threads per block
// run saxpy_kernel on the GPU
printf("size of float =%d\n", sizeof(float));
printf("size =%d\n", size);
printf("i =%d\n", i);
printf("increament = %u\n", i * size );
printf("xarray = %u\n", device_x + (i * size ));
printf("yarray = %u\n", device_y + (i * size ));
printf("resultarray = %d\n", device_result + i * size );
//saxpy_kernel<<<NumBlocks,threadsPerBlock,0,streams[i]>>>(total_elems, i ,size,alpha, device_x + i * size ,device_y + i * size ,device_result + i * size);
hipLaunchKernelGGL(( saxpy_kernel), dim3(NumBlocks),dim3(threadsPerBlock),0,streams[i], alpha, device_x + i * size ,device_y + i * size ,device_result + i * size);
//
// TODO: insert timer here to time only the kernel. Since the
// kernel will run asynchronously with the calling CPU thread, you
// need to call hipDeviceSynchronize() before your timer to
// ensure the kernel running on the GPU has completed. (Otherwise
// you will incorrectly observe that almost no time elapses!)
//
hipStreamSynchronize(streams[i]);
endGPUTime[i] = CycleTimer::currentSeconds();
printf("stream done=%d\n", i);
timeKernel[i] = endGPUTime[i] - startGPUTime[i];
errCode = hipPeekAtLastError();
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode));
}
//
// TODO: copy result from GPU using hipMemcpy
//
hipMemcpyAsync(resultarray+ i * size ,device_result+ i * size ,size * sizeof(float), hipMemcpyDeviceToHost,streams[i]);
errCode = hipPeekAtLastError();
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, hipGetErrorString(errCode));
}
endD2HTime[i] = CycleTimer::currentSeconds();
}
// end timing after result has been copied back into host memory.
// The time elapsed between startTime and endTime is the total
// time to copy data to the GPU, run the kernel, and copy the
// result back to the CPU
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
totalTimeAvg += overallDuration;
for(int j =0 ; j < partitions ; j++){
timeKernelAvg += timeKernel[j];
timeCopyD2HAvg += endD2HTime[j] - endGPUTime[j];
timeCopyH2DAvg += startGPUTime[j] - startH2DTime[j];
}
//
// TODO free memory buffers on the GPU
//
hipFree(device_x);
hipFree(device_y);
hipFree(device_result);
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
| ed2a66fe4d79ef79723ea99f041ee659d163df4d.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
#include "saxpy.h"
__global__ void
//saxpy_kernel(int total_elems, int i, int N, float alpha, float* x, float* y, float* result) {
saxpy_kernel( float alpha, float* x, float* y, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
int index = blockIdx.x * blockDim.x + threadIdx.x;
//. TODO MAKE SURE CHECK IS NOT REQUIRED
// if (index < N)
// if(total_elems > (i * N + index))
result[index] = alpha * x[index] + y[index];
}
static inline
int getBlocks(long working_set_size, int threadsPerBlock) {
// TODO: implement and use this interface if necessary
}
void
getArrays(int size, float **xarray, float **yarray, float **resultarray) {
// TODO: implement and use this interface if necessary
}
void
freeArrays(float *xarray, float *yarray, float *resultarray) {
// TODO: implement and use this interface if necessary
}
void
saxpyCuda(long total_elems, float alpha, float* xarray, float* yarray, float* resultarray, int partitions) {
const int threadsPerBlock = 2; // change this if necessary
float *device_x;
float *device_y;
float *device_result;
//
// TODO: allocate device memory buffers on the GPU using
// cudaMalloc. The started code issues warnings on build because
// these buffers are used in the call to saxpy_kernel below
// without being initialized.
//
cudaMalloc(&device_x, total_elems * sizeof(float));
cudaMalloc(&device_y, total_elems * sizeof(float));
cudaMalloc(&device_result, total_elems * sizeof(float));
const long size = (total_elems/partitions);
const long NumBlocks = ((size)/threadsPerBlock);
// NEED TO EDIT THIS
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
double startGPUTime[partitions];
double endGPUTime[partitions];
double timeKernel[partitions];
double endD2HTime[partitions];
double startH2DTime[partitions];
cudaStream_t streams[partitions];
cudaError_t errCode;
for (int i=0; i< partitions; i++) {
//
// TODO: copy input arrays to the GPU using cudaMemcpy
//
startH2DTime[i] = CycleTimer::currentSeconds();
printf("stream start=%d\n", i);
printf("size =%d\n", size);
printf("i =%d\n", i);
//errCode = cudaMemcpyAsync(device_x + (i * size) ,xarray + (i * size) , size * sizeof(float) , cudaMemcpyHostToDevice, streams[i]);
errCode = cudaMemcpy(device_x + (i * size) ,xarray + (i * size) , size * sizeof(float) , cudaMemcpyHostToDevice);
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode));
}
cudaMemcpyAsync(device_y + (i * size) ,yarray + (i * size) , size * sizeof(float) , cudaMemcpyHostToDevice, streams[i]);
errCode = cudaPeekAtLastError();
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode));
}
//
// TODO: insert time here to begin timing only the kernel
//
startGPUTime[i] = CycleTimer::currentSeconds();
// compute number of blocks and threads per block
// run saxpy_kernel on the GPU
printf("size of float =%d\n", sizeof(float));
printf("size =%d\n", size);
printf("i =%d\n", i);
printf("increament = %u\n", i * size );
printf("xarray = %u\n", device_x + (i * size ));
printf("yarray = %u\n", device_y + (i * size ));
printf("resultarray = %d\n", device_result + i * size );
//saxpy_kernel<<<NumBlocks,threadsPerBlock,0,streams[i]>>>(total_elems, i ,size,alpha, device_x + i * size ,device_y + i * size ,device_result + i * size);
saxpy_kernel<<<NumBlocks,threadsPerBlock,0,streams[i]>>>(alpha, device_x + i * size ,device_y + i * size ,device_result + i * size);
//
// TODO: insert timer here to time only the kernel. Since the
// kernel will run asynchronously with the calling CPU thread, you
// need to call cudaDeviceSynchronize() before your timer to
// ensure the kernel running on the GPU has completed. (Otherwise
// you will incorrectly observe that almost no time elapses!)
//
cudaStreamSynchronize(streams[i]);
endGPUTime[i] = CycleTimer::currentSeconds();
printf("stream done=%d\n", i);
timeKernel[i] = endGPUTime[i] - startGPUTime[i];
errCode = cudaPeekAtLastError();
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode));
}
//
// TODO: copy result from GPU using cudaMemcpy
//
cudaMemcpyAsync(resultarray+ i * size ,device_result+ i * size ,size * sizeof(float), cudaMemcpyDeviceToHost,streams[i]);
errCode = cudaPeekAtLastError();
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n", errCode, cudaGetErrorString(errCode));
}
endD2HTime[i] = CycleTimer::currentSeconds();
}
// end timing after result has been copied back into host memory.
// The time elapsed between startTime and endTime is the total
// time to copy data to the GPU, run the kernel, and copy the
// result back to the CPU
double endTime = CycleTimer::currentSeconds();
double overallDuration = endTime - startTime;
totalTimeAvg += overallDuration;
for(int j =0 ; j < partitions ; j++){
timeKernelAvg += timeKernel[j];
timeCopyD2HAvg += endD2HTime[j] - endGPUTime[j];
timeCopyH2DAvg += startGPUTime[j] - startH2DTime[j];
}
//
// TODO free memory buffers on the GPU
//
cudaFree(device_x);
cudaFree(device_y);
cudaFree(device_result);
}
void
printCudaInfo() {
// for fun, just print out some stats on the machine
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
a0cd9e2114e87dc46cbcb088acbdebd675c8b75b.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <chrono>
#include <random>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <thrust/partition.h>
#include <thrust/reduce.h>
#include <thrust/gather.h>
#include <glm/glm.hpp>
#include <glm/gtx/norm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include "sceneStructs.h"
#include "scene.h"
#include "svd3.h"
#include "kdtree.hpp"
#include "utilities.h"
#include "draw.h"
#include "kernel.h"
// Particle Filter Controls
#define PARTICLE_COUNT 1000
#define EFFECTIVE_PARTICLES .7
#define FREE_WEIGHT -1
#define OCCUPIED_WEIGHT 4
#define MAX_NODE_DIST 2.5f
#define MIN_NODE_DIST .5f
#define WALL_CONFIDENCE 30
#define MIN_WALL_COUNT 2
#define CLOSURE_MAP_DIST 6.0f
#define CLOSURE_GRAPH_DIST 20.0f
// Sensor Configuration
#define LIDAR_ANGLE(i) (-135.0f + i * .25f) * PI / 180
#define LIDAR_SIZE 1081
#define LIDAR_RANGE 20.0f
#define COV {0.015, 0.015, .01}
// GPU calculations
#define BLOCK_SIZE 128
// Helper Functions
#define CLAMP(a, lo, hi) (a < lo) ? lo : (a > hi) ? hi : a
#define ROUND_FRAC(a,frac) round((a/frac))*frac;
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Patch * dev_maps = NULL;
// host variables
static MAP_TYPE *occupancyGrid = NULL;
static Particle particles[PARTICLE_COUNT];
static glm::ivec2 map_dim;
static Patch map_params;
static glm::vec3 robotPos;
static std::vector<Cluster> clusters;
// device variable
static MAP_TYPE *dev_occupancyGrid = NULL;
static Particle *dev_particles = NULL;
static int *dev_fit = NULL;
static float *dev_lidar = NULL;
static float *dev_weights = NULL;
static bool *dev_freeCells = NULL;
static bool *dev_wallCells = NULL;
// KD tree variables
#define KD_MAX_SIZE 10000000
static KDTree::Node *dev_kd = NULL;
static KDTree::Node kd[KD_MAX_SIZE];
static int kdSize = 0;
static float *dev_dist = NULL;
static int *dev_pair = NULL;
static float *dev_fitf = NULL;
/**
* Handy-dandy hash function that provides seeds for random number generation.
*/
__host__ __device__ unsigned int utilhash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
__host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
// timers
float avg_motion = 0.0f, avg_measurement = 0.0f, avg_map = 0.0f, avg_sample = 0.0f;
void particleFilterInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_maps, scene->maps.size() * sizeof(Patch));
hipMemcpy(dev_maps, scene->maps.data(), scene->maps.size() * sizeof(Patch), hipMemcpyHostToDevice);
map_params = scene->maps[0];
map_dim = glm::ivec2(map_params.scale.x / map_params.resolution.x, map_params.scale.y / map_params.resolution.y);
occupancyGrid = new MAP_TYPE[map_dim.x*map_dim.y];
memset(occupancyGrid, -100, map_dim.x*map_dim.y*sizeof(MAP_TYPE));
for (int i = 0; i < PARTICLE_COUNT; i++) {
particles[i].pos = glm::vec3(0.0f, 0.0f, 0.0f);
particles[i].w = 1.0f;
particles[i].cluster = 0;
}
robotPos = glm::vec3(0.0f);
hipMalloc((void**)&dev_occupancyGrid, map_dim.x*map_dim.y * sizeof(MAP_TYPE));
hipMemcpy(dev_occupancyGrid, occupancyGrid, map_dim.x*map_dim.y * sizeof(MAP_TYPE), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_particles, PARTICLE_COUNT * sizeof(Particle));
hipMemcpy(dev_particles, particles, PARTICLE_COUNT * sizeof(Particle), hipMemcpyHostToDevice);
hipMalloc((void**)&dev_fit, PARTICLE_COUNT * sizeof(int));
hipMalloc((void**)&dev_weights, PARTICLE_COUNT * sizeof(float));
hipMalloc((void**)&dev_lidar, LIDAR_SIZE * sizeof(float));
hipMalloc((void**)&dev_freeCells, map_dim.x * map_dim.y * sizeof(bool));
hipMalloc((void**)&dev_wallCells, map_dim.x * map_dim.y * sizeof(bool));
// initialize default cluster
Cluster group1;
Node n0;
n0.pos = glm::vec2(0.0f);
n0.dist = 0.0f;
group1.id = 0;
group1.nodeIdx = 0;
group1.patchList.push_back(0);
group1.nodes.push_back(n0);
std::vector<unsigned int> empty;
group1.edges.push_back(empty);
clusters.push_back(group1);
checkCUDAError("particleFilterInit");
particleFilterInitPC();
}
void particleFilterFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_maps);
hipFree(dev_occupancyGrid);
hipFree(dev_particles);
hipFree(dev_lidar);
hipFree(dev_fit);
hipFree(dev_weights);
hipFree(dev_freeCells);
hipFree(dev_wallCells);
delete occupancyGrid;
checkCUDAError("particleFilterFree");
particleFilterFreePC();
}
// rotates generates 2d point for lidar reading
__device__ __host__ void CleanLidarScan(int n, const float scan, const float theta, glm::vec2 &intersection) {
float rot = LIDAR_ANGLE(n) + theta;
intersection.x = scan * std::cos(rot);
intersection.y = scan * std::sin(rot);
}
//Bresenham's line algorithm for integer grid
__device__ __host__ void traceRay(glm::ivec2 start, glm::ivec2 end, glm::ivec2 map_dim, bool *out){
glm::ivec2 delta = end - start;
// swap to the right octant
bool steep = abs(delta.y) > abs(delta.x);
if (steep) { // check slope
int temp = start.x;
start.x = start.y;
start.y = temp;
temp = end.x;
end.x = end.y;
end.y = temp;
}
if (start.x > end.x){
int temp = start.x;
start.x = end.x;
end.x = temp;
temp = start.y;
start.y = end.y;
end.y = temp;
}
int deltax = end.x - start.x;
int deltay = abs(end.y - start.y);
float error = deltax / 2;
int y = start.y;
int ystep = (end.y > start.y) ? 1 : -1;
// build line
for (int x = start.x; x < end.x; x++){
int idx = 0;
if (steep)
idx = y*map_dim.x + x;
else
idx = x*map_dim.x + y;
if (x < map_dim.x && y < map_dim.y && x >= 0 && y >= 0 && idx < map_dim.x * map_dim.y) { // assume square maps
out[idx] = 1;
}
error -= deltay;
if (error < 0){
y += ystep;
error += deltax;
}
}
}
// sum the value of specified points in a 2d map
__device__ __host__ int mapCorrelation(int N, const MAP_TYPE *map, glm::ivec2 dim, const glm::vec2 *points)
{
int retv = 0;
for (int i = 0; i < N; i++) {
if (points[i].x >= 0 && points[i].x < dim.x && points[i].y >= 0 && points[i].y < dim.y) {
int idx = (int)points[i].x * dim.x + (int)points[i].y;
retv += map[idx];
}
}
return retv;
}
__device__ __host__ int EvaluateParticle(MAP_TYPE *map, glm::ivec2 map_dim, Patch map_params, Particle &particle, glm::vec3 pos, float *lidar)
{
// get walls relative to robot position, add particle position
glm::vec2 walls[LIDAR_SIZE];
for (int j = 0; j < LIDAR_SIZE; j++) {
CleanLidarScan(j, lidar[j], particle.pos.z, walls[j]);
walls[j].x += particle.pos.x;
walls[j].y += particle.pos.y;
// convert to grid idx
walls[j].x = round(0.5f * map_params.scale.x / map_params.resolution.x + walls[j].x / map_params.resolution.x);
walls[j].y = round(0.5f * map_params.scale.y / map_params.resolution.y + walls[j].y / map_params.resolution.y);
}
// test the map correlation between global map and walls
return mapCorrelation(LIDAR_SIZE, map, map_dim, walls);
}
// kernel wrapper for calling Evaluate Particle
__global__ void kernEvaluateParticles(MAP_TYPE *map, glm::ivec2 map_dim, Patch map_params, Particle *particles, glm::vec3 pos, float *lidar, int *fit)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < PARTICLE_COUNT) {
fit[i] = EvaluateParticle(map, map_dim, map_params, particles[i], pos, lidar);
}
}
// simple inplace multiplication kernel
__global__ void kernUpdateWeights(int N, Particle *a, int *b, float c, int min)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
a[i].w = a[i].w * ((float)b[i] - min) * c;
}
}
// simple inplace multiplication kernel
__global__ void kernUpdateWeights(int N, Particle *a, float *b, float c, int min)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
a[i].w = a[i].w * (b[i] - min) * c;
}
}
// update particle cloud weights from measurement
glm::vec3 PFMeasurementUpdate(std::vector<float> lidar) {
glm::vec3 retv(0.0f);
if (GPU_MEASUREMENT) {
// 1D block for particles
const int blockSize1d = 128;
const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d);
// create device copy of fit array and lidar
hipMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemset(dev_fit, 0, PARTICLE_COUNT * sizeof(int));
hipDeviceSynchronize();
kernEvaluateParticles << <blocksPerGrid1d, blockSize1d >> >(dev_occupancyGrid, map_dim, map_params, dev_particles, robotPos, dev_lidar, dev_fit);
hipDeviceSynchronize();
checkCUDAError("particle measurement update error");
thrust::device_vector<int> vFit(dev_fit, dev_fit + PARTICLE_COUNT);
thrust::pair<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator> result = thrust::minmax_element(vFit.begin(), vFit.end());
int rng = *result.second - *result.first;
int best = result.second - vFit.begin();
// rescale all weights
if (rng > 0) {
float f = 1 / (float)(rng);
kernUpdateWeights << <blocksPerGrid1d, blockSize1d >> >(PARTICLE_COUNT, dev_particles, dev_fit, f, *result.first);
hipDeviceSynchronize();
checkCUDAError("particle weight update error");
}
// only use best point for return
hipMemcpy(particles, dev_particles, PARTICLE_COUNT * sizeof(glm::vec4), hipMemcpyDeviceToHost);
retv = (glm::vec3) particles[best].pos;
}
else {
int best = -128 * LIDAR_SIZE;
int worst = 128 * LIDAR_SIZE;
int iBest = 0;
int fit[PARTICLE_COUNT] = { 0 };
for (int i = 0; i < PARTICLE_COUNT; i++) {
fit[i] = EvaluateParticle(occupancyGrid, map_dim, map_params, particles[i], robotPos, &lidar[0]);
// track correlation maximums
if (fit[i] > best) {
best = fit[i];
iBest = i;
}
if (fit[i] < worst)
worst = fit[i];
}
// rescale all weights
if ((best - worst) > 0) {
float f = 1.0f;
for (int i = 0; i < PARTICLE_COUNT; i++) {
f = (float)(fit[i] - worst) / (float)(best - worst);
particles[i].w *= f;
}
}
retv = (glm::vec3) particles[iBest].pos;
}
return retv;
}
// add noise to a single particle
__device__ __host__ void ParticleAddNoise(Particle &particle, int frame, int idx)
{
float mean[3] = { 0 };
float cov[3] = COV; // covariance: x y theta
thrust::default_random_engine e2 = makeSeededRandomEngine(frame, idx, 0);
thrust::random::normal_distribution<float> distx(mean[0], cov[0]);
thrust::random::normal_distribution<float> disty(mean[1], cov[1]);
thrust::random::normal_distribution<float> distt(mean[2], cov[2]);
glm::vec3 noise(distx(e2), disty(e2), distt(e2));
particle.pos += noise;
}
// kernel wrapper for adding noise to a particle
__global__ void kernAddNoise(Particle *particles, int frame)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < PARTICLE_COUNT) {
ParticleAddNoise(particles[i], frame, i);
}
}
// perform a motion update on the particle cloud, adding in gaussian noise
void PFMotionUpdate(int frame) {
if (GPU_MOTION) {
// 1D block for particles
const int blockSize1d = 128;
const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d);
// sync up host and device arrays for now...
hipMemcpy(dev_particles, particles, PARTICLE_COUNT * sizeof(Particle), hipMemcpyHostToDevice);
kernAddNoise << <blocksPerGrid1d, blockSize1d >> >(dev_particles, frame);
hipMemcpy(particles, dev_particles, PARTICLE_COUNT * sizeof(Particle), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
checkCUDAError("particle motion update error");
} else {
for (int i = 0; i < PARTICLE_COUNT; i++)
ParticleAddNoise(particles[i], frame, i);
}
}
__global__ void kernCopyWeights(Particle *particles, float *weights, bool squared)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < PARTICLE_COUNT) {
weights[i] = (squared) ? particles[i].w * particles[i].w : particles[i].w;
}
}
__global__ void kernWeightedSample(Particle *particles, float *weights, float max, float Neff, int frame)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < PARTICLE_COUNT) {
thrust::default_random_engine gen = makeSeededRandomEngine(Neff, frame, i);
thrust::random::uniform_real_distribution<float> dist(0, max);
int idx = 0;
float rnd = dist(gen);
while (idx < PARTICLE_COUNT && rnd > weights[idx]) idx++;
particles[i] = particles[idx];
particles[i].w = 1.0f;
}
}
// check if particles need to be resampled
void PFResample(int frame) {
// 1D block for particles
const int blockSize1d = 128;
const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d);
float r = 0, r2 = 0;
if (GPU_RESAMPLE) {
kernCopyWeights << <blocksPerGrid1d, blockSize1d >> >(dev_particles, dev_weights, true);
hipDeviceSynchronize();
thrust::device_ptr<float> pWeights = thrust::device_pointer_cast(dev_weights);
r2 = thrust::reduce(pWeights, pWeights + PARTICLE_COUNT);
kernCopyWeights << <blocksPerGrid1d, blockSize1d >> >(dev_particles, dev_weights, false);
hipDeviceSynchronize();
r = thrust::reduce(pWeights, pWeights + PARTICLE_COUNT);
}
else {
for (int i = 0; i < PARTICLE_COUNT; i++) {
r += particles[i].w;
r2 += (particles[i].w) * (particles[i].w);
}
}
float Neff = r * r / r2;
if (Neff < EFFECTIVE_PARTICLES*PARTICLE_COUNT) {
if (GPU_RESAMPLE) {
thrust::device_ptr<float> pWeights = thrust::device_pointer_cast(dev_weights);
thrust::inclusive_scan(pWeights, pWeights + PARTICLE_COUNT, pWeights);
float max;
hipMemcpy(&max, &dev_weights[PARTICLE_COUNT - 1], sizeof(float), hipMemcpyDeviceToHost);
kernWeightedSample << <blocksPerGrid1d, blockSize1d >> >(dev_particles, dev_weights, max, Neff, frame);
hipMemcpy(particles, dev_particles, PARTICLE_COUNT * sizeof(Particle), hipMemcpyDeviceToHost);
checkCUDAError("resample error");
}
else {
float weightsum[PARTICLE_COUNT];
weightsum[0] = particles[0].w;
for (int i = 1; i < PARTICLE_COUNT; i++) {
weightsum[i] = weightsum[i - 1] + particles[i].w;
}
thrust::default_random_engine gen = makeSeededRandomEngine(Neff, frame, 0);
thrust::random::uniform_real_distribution<float> dist(0, weightsum[PARTICLE_COUNT - 1]);
for (int i = 0; i < PARTICLE_COUNT; i++) {
int idx = 0;
float rnd = dist(gen);
while (idx < PARTICLE_COUNT && rnd > weightsum[idx]) idx++;
particles[i] = particles[idx];
particles[i].w = 1.0f;
}
// push particles to GPU to draw
hipMemcpy(dev_particles, particles, PARTICLE_COUNT * sizeof(Particle), hipMemcpyHostToDevice);
}
}
}
__global__ void kernUpdateMap(int N, MAP_TYPE *map, bool *mask, int val)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
long clamp_val = (1 << (sizeof(MAP_TYPE)* 8 - 1)) - 15;
if (mask[i])
map[i] = CLAMP(map[i] + val, -clamp_val, clamp_val);
}
}
__global__ void kernGetWalls(float *lidar, glm::ivec2 center, float theta, bool *freeCells, bool *wallCells, glm::ivec2 map_dim, Patch map_params)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < LIDAR_SIZE) {
glm::vec2 walls;
//ego centric scan
CleanLidarScan(i, lidar[i], theta, walls);
// this will discard random bad data from sensor that was causing overflow errors
if (abs(walls.x) < LIDAR_RANGE && abs(walls.y) < LIDAR_RANGE) {
walls.x = round(walls.x / map_params.resolution.x);
walls.y = round(walls.y / map_params.resolution.y);
// center to robot pos in current map
walls += (glm::vec2) center;
// from here we need to check the wall bounds, determine if it needs to update multiple maps, and create a new patch if necessary.
traceRay(center, walls, map_dim, freeCells);
if (walls.x >= 0 && walls.x < map_dim.x && walls.y >= 0 && walls.y < map_dim.y) {
wallCells[(int)(walls.x * map_dim.x + walls.y)] = true;
}
}
}
}
void PFUpdateMap(std::vector<float> lidar) {
glm::ivec2 center_idx(
round(0.5f * map_dim.x + robotPos.x / map_params.resolution.x + map_params.resolution.x / 2),
round(0.5f * map_dim.y + robotPos.y / map_params.resolution.y + map_params.resolution.y / 2)
);
long clamp_val = (1 << (sizeof(MAP_TYPE)* 8 - 1)) - 15;
if (GPU_MAP) {
// 1D block for particles
const int blockSize1d = 128;
const dim3 blocksPerGridLidar((LIDAR_SIZE + blockSize1d - 1) / blockSize1d);
const dim3 blocksPerGridMap((map_dim.x * map_dim.y + blockSize1d - 1) / blockSize1d);
// find occupancy grid cells from translated lidar
hipMemset(dev_freeCells, 0, map_dim.x * map_dim.y*sizeof(bool));
hipMemset(dev_wallCells, 0, map_dim.x * map_dim.y*sizeof(bool));
hipMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), hipMemcpyHostToDevice);
// find intersections from lidar scan
kernGetWalls << <blocksPerGridLidar, blockSize1d >> >(dev_lidar, center_idx, robotPos.z, dev_freeCells, dev_wallCells, map_dim, map_params);
// Update free/occupied weights
kernUpdateMap << <blocksPerGridMap, blockSize1d >> >(map_dim.x * map_dim.y, dev_occupancyGrid, dev_freeCells, FREE_WEIGHT);
kernUpdateMap << <blocksPerGridMap, blockSize1d >> >(map_dim.x * map_dim.y, dev_occupancyGrid, dev_wallCells, OCCUPIED_WEIGHT);
}
else {
// find occupancy grid cells from translated lidar
bool *freeCells = new bool[map_dim.x * map_dim.y];
memset(freeCells, 0, map_dim.x * map_dim.y*sizeof(bool));
// find intersections from lidar scan
glm::vec2 walls[LIDAR_SIZE];
for (int i = 0; i < LIDAR_SIZE; i++) {
CleanLidarScan(i, lidar[i], robotPos.z, walls[i]);
walls[i].x = round(walls[i].x / map_params.resolution.x);
walls[i].y = round(walls[i].y / map_params.resolution.y);
walls[i] += center_idx;
if (walls[i].x >= 0 && walls[i].x < map_dim.x && walls[i].y >= 0 && walls[i].y < map_dim.y) {
traceRay(center_idx, walls[i], map_dim, freeCells);
}
}
// downweight free cells
for (int i = 0; i < map_dim.x; i++) {
for (int j = 0; j < map_dim.y; j++) {
int idx = i*map_dim.x + j;
if (freeCells[idx]) {
occupancyGrid[idx] += FREE_WEIGHT;
occupancyGrid[idx] = CLAMP(occupancyGrid[idx], -clamp_val, clamp_val);
}
}
}
// upweight occupied cells
for (int i = 0; i < LIDAR_SIZE; i++) {
if (walls[i].x >= 0 && walls[i].x < map_dim.x && walls[i].y >= 0 && walls[i].y < map_dim.y) {
int idx = (int)walls[i].x * map_dim.x + (int)walls[i].y;
occupancyGrid[idx] += OCCUPIED_WEIGHT;
occupancyGrid[idx] = CLAMP(occupancyGrid[idx], -clamp_val, clamp_val);
}
}
// push grid to GPU to draw
hipMemcpy(dev_occupancyGrid, occupancyGrid, map_dim.x*map_dim.y * sizeof(char), hipMemcpyHostToDevice);
delete freeCells;
}
}
void CreateNode(unsigned int i) {
// create node at current position
Node temp;
temp.pos = (glm::vec2) robotPos;
temp.dist = 0.0f;
float edgeLen = glm::distance(temp.pos, clusters[i].nodes[clusters[i].nodeIdx].pos);
// update distance for all current nodes
for (int j = 0; j < clusters[i].nodes.size(); j++) {
clusters[i].nodes[j].dist += edgeLen;
}
clusters[i].nodes.push_back(temp);
// add edge from new node to last node
std::vector<unsigned int> edge;
edge.push_back(clusters[i].nodeIdx);
clusters[i].edges.push_back(edge);
// add edge from last node to new node
clusters[i].edges[clusters[i].nodeIdx].push_back(clusters[i].nodes.size() - 1);
// update current node
clusters[i].nodeIdx = clusters[i].nodes.size() - 1;
}
// This returns true if it can see any walls
__global__ void CheckVisibility(int N, MAP_TYPE *map, bool *mask, unsigned int *retv)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
if (mask[i] )
*retv += (map[i] > WALL_CONFIDENCE) ? 1 : 0;
}
}
int FindWalls(int clusterID, int nodeID) {
unsigned int *dev_retv;
bool *freeCells = new bool[map_dim.x * map_dim.y];
hipMalloc((void**)&dev_retv, sizeof(unsigned int));
hipMemset(dev_retv, 0, sizeof(unsigned int));
memset(freeCells, 0, map_dim.x * map_dim.y*sizeof(bool));
glm::ivec2 ai(
round(0.5f * map_dim.x + robotPos.x / map_params.resolution.x + map_params.resolution.x / 2),
round(0.5f * map_dim.y + robotPos.y / map_params.resolution.y + map_params.resolution.y / 2)
);
glm::ivec2 bi(
round(0.5f * map_dim.x + clusters[clusterID].nodes[nodeID].pos.x / map_params.resolution.x + map_params.resolution.x / 2),
round(0.5f * map_dim.y + clusters[clusterID].nodes[nodeID].pos.y / map_params.resolution.y + map_params.resolution.y / 2)
);
traceRay(ai, bi, map_dim, freeCells);
hipMemcpy(dev_freeCells, freeCells, map_dim.x * map_dim.y*sizeof(bool), hipMemcpyHostToDevice);
const int blockSize1d = 128;
const dim3 blocksPerGridMap((map_dim.x * map_dim.y + blockSize1d - 1) / blockSize1d);
CheckVisibility << <blocksPerGridMap, blockSize1d >> >(map_dim.x * map_dim.y, dev_occupancyGrid, dev_freeCells, dev_retv);
hipDeviceSynchronize();
int nWalls;
hipMemcpy(&nWalls, dev_retv, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_retv);
delete freeCells;
return nWalls;
}
void UpdateTopology() {
for (int i = 0; i < clusters.size(); i++) {
bool newNode = true;
// check if we need a new node on topology graph for each cluster (this is fast on CPU)
// we could posibly improve performance here by not recalculating the distance every step, only checking relative to the distance the robot has moved.
for (int j = 0; j < clusters[i].nodes.size(); j++)
newNode &= (glm::distance((glm::vec2) robotPos, clusters[i].nodes[j].pos) > MAX_NODE_DIST);
if (newNode) {
CreateNode(i);
printf("new node from distance. number of graph nodes: %i\n", clusters[i].nodes.size());
}
// if we don't need a new node for distance, check if we need one from visibility
//if (!newNode) { // run this on GPU to prevent sending the maps back and forth, this operation can be slow even for a small graph
// newNode = true;
// // 1D block for particles
// for (int j = 0; j < clusters[i].nodes.size(); j++) {
// int nWalls = FindWalls(i, j);
// //if (nWalls > 0) printf("found %i walls for node %i\n", nWalls, j);
// newNode &= (nWalls >= MIN_WALL_COUNT);// && (glm::distance((glm::vec2) robotPos, clusters[i].nodes[j].pos) > MIN_NODE_DIST);
// }
//
// if (newNode) {
// CreateNode(i);
// printf("new node from visibility. number of graph nodes: %i\n", clusters[i].nodes.size());
// }
//}
}
}
__global__ void AssignParticlesToCluster(int N, Particle *particles) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
// find the closest visible node
}
}
void CheckLoopClosure() {
for (int i = 0; i < clusters.size(); i++) {
for (int j = 0; j < clusters[i].nodes.size(); j++) { // check each node for loop closure conditions
if (glm::distance((glm::vec2) robotPos, clusters[i].nodes[j].pos) < CLOSURE_MAP_DIST) {
float edgeLen = glm::distance((glm::vec2) robotPos, clusters[i].nodes[clusters[i].nodeIdx].pos);
if (edgeLen + clusters[i].nodes[j].dist > CLOSURE_GRAPH_DIST) {
//printf("potential loop closure with node %i\n", j);
// find all nodes that could separate clusters
// 1D block for particles
std::vector<int> visibleNodes;
for (int k = 0; k < clusters[i].nodes.size(); k++) {
int nWalls = FindWalls(i, k);
if (nWalls < MIN_WALL_COUNT) {
visibleNodes.push_back(k);
}
}
// create new clusters for each group of visible nodes
for (int k = 0; k < visibleNodes.size(); k++) {
for (int l = 0; l < clusters.size(); l++) {
std::vector<unsigned int> v = clusters[l].edges[visibleNodes[k]]; // only create new cluster if no clusters have an edge between visible and current nodes
bool createCluster = (std::find(v.begin(), v.end(), clusters[i].nodeIdx) != v.end());
if (createCluster) {
// copy cluster and get a new ID for it
Cluster newCluster = clusters[i];
newCluster.id = clusters.size(); // this will be wrong when we start deleting obsolete clusters
// add edges
newCluster.edges[clusters[i].nodeIdx].push_back(visibleNodes[k]);
newCluster.edges[visibleNodes[k]].push_back(clusters[i].nodeIdx);
// update graph distances for all nodes in cluster
//clusters.push_back(newCluster);
}
}
}
// parse all particles into correct cluster
//const int blockSize1d = 128;
//const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d);
//AssignParticlesToCluster << <blocksPerGrid1d, blockSize1d >> >(PARTICLE_COUNT, dev_particles);
// prune unused clusters
printf("now contains %i clusters\n", clusters.size());
}
}
}
}
}
void drawMap(uchar4 *pbo) {
drawAll(pbo, PARTICLE_COUNT, hst_scene, dev_image, robotPos, dev_particles, dev_occupancyGrid, dev_maps, clusters);
checkCUDAError("draw screen error");
}
void getPCData(Particle **ptrParticles, MAP_TYPE **ptrMap, KDTree::Node **ptrKD, int *nParticles, int *nKD, glm::vec3 &pos) {
// copy map to host so PCL can draw it
hipMemcpy(occupancyGrid, dev_occupancyGrid, map_dim.x*map_dim.y * sizeof(MAP_TYPE), hipMemcpyDeviceToHost);
*ptrParticles = particles;
*ptrMap = occupancyGrid;
*nParticles = PARTICLE_COUNT;
*ptrKD = kd;
*nKD = kdSize;
pos = robotPos;
}
/**
* Begin ICP code.
*/
__host__ __device__ bool sortFuncX(const glm::vec4 &p1, const glm::vec4 &p2)
{
return p1.x < p2.x;
}
__host__ __device__ bool sortFuncY(const glm::vec4 &p1, const glm::vec4 &p2)
{
return p1.y < p2.y;
}
__host__ __device__ bool sortFuncZ(const glm::vec4 &p1, const glm::vec4 &p2)
{
return p1.z < p2.z;
}
__global__ void transformPoint(int N, glm::vec4 *points, glm::mat4 transform) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
points[index] = glm::vec4(glm::vec3(transform * glm::vec4(glm::vec3(points[index]), 1)), 1);
}
__device__ float getHyperplaneDist(const glm::vec4 *pt1, const glm::vec4 *pt2, int axis, bool *branch)
{
float retv = 0.0f;
if (axis == 0) {
*branch = sortFuncX(*pt1, *pt2);
retv = abs(pt1->x - pt2->x);
}
if (axis == 1) {
*branch = sortFuncY(*pt1, *pt2);
retv = abs(pt1->y - pt2->y);
}
if (axis == 2) {
*branch = sortFuncZ(*pt1, *pt2);
retv = abs(pt1->z - pt2->z);
}
return retv;
}
__global__ void outerProduct(int N, const glm::vec4 *vec1, const glm::vec4 *vec2, glm::mat3 *out)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i >= N) {
return;
}
out[i] = glm::mat3(glm::vec3(vec1[i]) * vec2[i].x,
glm::vec3(vec1[i]) * vec2[i].y,
glm::vec3(vec1[i] * vec2[i].z));
}
__global__ void findCorrespondenceKD(int N, glm::vec4 *cor, const glm::vec4 *points, const KDTree::Node* tree)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i >= N) {
return;
}
glm::vec4 pt = points[i];
float bestDist = glm::distance(glm::vec3(pt), glm::vec3(tree[0].value));
int bestIdx = 0;
int head = 0;
bool done = false;
bool branch = false;
bool nodeFullyExplored = false;
while (!done) {
// depth first on current branch
while (head >= 0) {
// check the current node
const KDTree::Node test = tree[head];
float d = glm::distance(glm::vec3(pt), glm::vec3(test.value));
if (d < bestDist) {
bestDist = d;
bestIdx = head;
nodeFullyExplored = false;
}
// find branch path
getHyperplaneDist(&pt, &test.value, test.axis, &branch);
head = branch ? test.left : test.right;
}
if (nodeFullyExplored) {
done = true;
}
else {
// check if parent of best node could have better values on other branch
const KDTree::Node parent = tree[tree[bestIdx].parent];
if (getHyperplaneDist(&pt, &parent.value, parent.axis, &branch) < bestDist) {
head = !branch ? parent.left : parent.right;
nodeFullyExplored = true;
}
else
done = true;
}
}
cor[i] = tree[bestIdx].value;
}
__global__ void findCorrespondenceIndexKD(int N, int *cor, const glm::vec4 *points, const KDTree::Node* tree)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i >= N) {
return;
}
glm::vec4 pt = points[i];
float bestDist = glm::distance(glm::vec3(pt), glm::vec3(tree[0].value));
int bestIdx = 0;
int head = 0;
bool done = false;
bool branch = false;
bool nodeFullyExplored = false;
while (!done) {
// depth first on current branch
while (head >= 0) {
// check the current node
const KDTree::Node test = tree[head];
float d = glm::distance(glm::vec3(pt), glm::vec3(test.value));
if (d < bestDist) {
bestDist = d;
bestIdx = head;
nodeFullyExplored = false;
}
// find branch path
getHyperplaneDist(&pt, &test.value, test.axis, &branch);
head = branch ? test.left : test.right;
}
if (nodeFullyExplored) {
done = true;
}
else {
// check if parent of best node could have better values on other branch
const KDTree::Node parent = tree[tree[bestIdx].parent];
if (getHyperplaneDist(&pt, &parent.value, parent.axis, &branch) < bestDist) {
head = !branch ? parent.left : parent.right;
nodeFullyExplored = true;
}
else
done = true;
}
}
cor[i] = bestIdx;
}
__global__ void kernGetWallsKD(float *lidar, glm::vec3 robotPos, glm::vec4 *nodeVal, Patch map_params)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < LIDAR_SIZE) {
//ego centric scan
glm::vec2 walls;
CleanLidarScan(i, lidar[i], robotPos.z, walls);
// this will discard random bad data from sensor that was causing overflow errors
if (abs(walls.x) < LIDAR_RANGE && abs(walls.y) < LIDAR_RANGE) {
nodeVal[i].x = robotPos.x + walls.x, map_params.resolution.x;
nodeVal[i].y = robotPos.y + walls.y, map_params.resolution.y;
nodeVal[i].z = 0.0f;
nodeVal[i].w = OCCUPIED_WEIGHT;
}
}
}
glm::vec3 transformPointICP(glm::vec3 start, std::vector<float> lidar) {
int sizeTarget = LIDAR_SIZE;
glm::vec3 retv = start;
dim3 fullBlocksPerGrid((LIDAR_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE);
// find the closest point in the scene for each point in the target
glm::vec4 *dev_cor, *tar_c, *cor_c, *dev_target;
glm::mat3 *dev_W;
hipMalloc((void**)&dev_cor, sizeTarget*sizeof(glm::vec4));
hipMalloc((void**)&tar_c, sizeTarget*sizeof(glm::vec4));
hipMalloc((void**)&cor_c, sizeTarget*sizeof(glm::vec4));
hipMalloc((void**)&dev_target, sizeTarget*sizeof(glm::vec4));
hipMalloc((void**)&dev_W, sizeTarget * sizeof(glm::mat3));
hipMemset(dev_W, 0, sizeTarget * sizeof(glm::mat3));
// find intersections from lidar scan
hipMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), hipMemcpyHostToDevice);
//kernGetWalls << <fullBlocksPerGrid, BLOCK_SIZE >> >(dev_lidar, center_idx, robotPos.z, dev_freeCells, dev_wallCells, map_dim, map_params);
kernGetWallsKD << <fullBlocksPerGrid, BLOCK_SIZE >> >(dev_lidar, robotPos, dev_target, map_params);
findCorrespondenceKD << <fullBlocksPerGrid, BLOCK_SIZE >> >(sizeTarget, dev_cor, dev_target, dev_kd);
hipDeviceSynchronize();
// Calculate mean centered correspondenses
glm::vec3 mu_tar(0, 0, 0), mu_cor(0, 0, 0);
thrust::device_ptr<glm::vec4> ptr_target(dev_target);
thrust::device_ptr<glm::vec4> ptr_scene(dev_target);
thrust::device_ptr<glm::vec4> ptr_cor(dev_cor);
mu_tar = glm::vec3(thrust::reduce(ptr_target, ptr_target + sizeTarget, glm::vec4(0, 0, 0, 0)));
mu_cor = glm::vec3(thrust::reduce(ptr_cor, ptr_cor + sizeTarget, glm::vec4(0, 0, 0, 0)));
mu_tar /= sizeTarget;
mu_cor /= sizeTarget;
hipMemcpy(tar_c, dev_target, sizeTarget*sizeof(glm::vec4), hipMemcpyDeviceToDevice);
hipMemcpy(cor_c, dev_cor, sizeTarget*sizeof(glm::vec4), hipMemcpyDeviceToDevice);
checkCUDAError("mean centered calculation failed!");
// move the point cloud with translation
glm::vec3 r(0, 0, 0);
glm::vec3 s(1, 1, 1);
glm::mat4 center_tar = utilityCore::buildTransformationMatrix(-mu_tar, r, s);
glm::mat4 center_cor = utilityCore::buildTransformationMatrix(-mu_cor, r, s);
transformPoint << <fullBlocksPerGrid, BLOCK_SIZE >> >(sizeTarget, tar_c, center_tar);
transformPoint << <fullBlocksPerGrid, BLOCK_SIZE >> >(sizeTarget, cor_c, center_cor);
checkCUDAError("mean centered transformation failed!");
hipDeviceSynchronize();
// Calculate W
outerProduct << <fullBlocksPerGrid, BLOCK_SIZE >> >(sizeTarget, tar_c, cor_c, dev_W);
thrust::device_ptr<glm::mat3> ptr_W(dev_W);
glm::mat3 W = thrust::reduce(ptr_W, ptr_W + sizeTarget, glm::mat3(0));
checkCUDAError("outer product failed!");
hipDeviceSynchronize();
// calculate SVD of W
glm::mat3 U, S, V;
svd(W[0][0], W[0][1], W[0][2], W[1][0], W[1][1], W[1][2], W[2][0], W[2][1], W[2][2],
U[0][0], U[0][1], U[0][2], U[1][0], U[1][1], U[1][2], U[2][0], U[2][1], U[2][2],
S[0][0], S[0][1], S[0][2], S[1][0], S[1][1], S[1][2], S[2][0], S[2][1], S[2][2],
V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2]
);
glm::mat3 g_U(glm::vec3(U[0][0], U[1][0], U[2][0]), glm::vec3(U[0][1], U[1][1], U[2][1]), glm::vec3(U[0][2], U[1][2], U[2][2]));
glm::mat3 g_Vt(glm::vec3(V[0][0], V[0][1], V[0][2]), glm::vec3(V[1][0], V[1][1], V[1][2]), glm::vec3(V[2][0], V[2][1], V[2][2]));
// Get transformation from SVD
glm::mat3 R = g_U * g_Vt;
glm::vec3 t = glm::vec3(mu_cor) - R*glm::vec3(mu_tar);
// update target points
//glm::mat4 transform = glm::translate(glm::mat4(), t) * glm::mat4(R);
//transformPoint << <fullBlocksPerGrid, BLOCK_SIZE >> >(sizeTarget, dev_target, transform);
// make a massive assumption that the SVD will already result in a 2d rotation around the z-axis
//glm::vec4 newPoint(start.x, start.y, 0.0f, 0.0f);
//newPoint = transform*newPoint;
float theta = asin(R[0][1]);
retv.x += t.x;
retv.y += t.y;
retv.z += theta;
hipFree(dev_cor);
hipFree(tar_c);
hipFree(cor_c);
hipFree(dev_W);
hipFree(dev_target);
return retv;
}
void particleFilterInitPC() {
// KD tree data
hipMalloc((void**)&dev_dist, LIDAR_SIZE * sizeof(float));
checkCUDAError("hipMalloc dev_dist failed!");
hipMalloc((void**)&dev_pair, LIDAR_SIZE * sizeof(int));
checkCUDAError("hipMalloc dev_pair failed!");
hipMalloc((void**)&dev_kd, KD_MAX_SIZE * sizeof(KDTree::Node));
checkCUDAError("hipMalloc dev_kd failed!");
hipMalloc((void**)&dev_fitf, PARTICLE_COUNT * sizeof(float));
checkCUDAError("hipMalloc dev_fitf failed!");
hipDeviceSynchronize();
checkCUDAError("particleFilterInitPC");
}
void particleFilterFreePC() {
hipFree(dev_dist);
hipFree(dev_pair);
hipFree(dev_kd);
hipFree(dev_fitf);
checkCUDAError("particleFilterFreePC");
}
__device__ float weight[LIDAR_SIZE];
__device__ float particlePos[3];
__device__ float atmWeight[PARTICLE_COUNT*LIDAR_SIZE];
__global__ void kernFindWallCorrespondance(float *lidar, KDTree::Node *kdTree, int nParticle) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < LIDAR_SIZE) {
glm::vec2 walls;
glm::vec4 pt;
CleanLidarScan(i, lidar[i], particlePos[2], walls);
if (abs(walls.x) < LIDAR_RANGE && abs(walls.y) < LIDAR_RANGE) {
walls.x += particlePos[0];
walls.y += particlePos[1];
pt.x = walls.x;
pt.y = walls.y;
pt.z = 0.0f;
pt.w = 0.0f;
float bestDist = glm::distance(glm::vec3(pt), glm::vec3(kdTree[0].value));
int bestIdx = 0;
int head = 0;
bool done = false;
bool branch = false;
bool nodeFullyExplored = false;
while (!done) {
// depth first on current branch
while (head >= 0) {
// check the current node
const KDTree::Node test = kdTree[head];
float d = glm::distance(glm::vec3(pt), glm::vec3(test.value));
if (d < bestDist) {
bestDist = d;
bestIdx = head;
nodeFullyExplored = false;
}
// find branch path
getHyperplaneDist(&pt, &test.value, test.axis, &branch);
head = branch ? test.left : test.right;
}
if (nodeFullyExplored) {
done = true;
}
else {
// check if parent of best node could have better values on other branch
const KDTree::Node parent = kdTree[kdTree[bestIdx].parent];
if (getHyperplaneDist(&pt, &parent.value, parent.axis, &branch) < bestDist) {
head = !branch ? parent.left : parent.right;
nodeFullyExplored = true;
}
else
done = true;
}
}
atmWeight[LIDAR_SIZE*nParticle + i] = kdTree[bestIdx].value.w;
//atomicAdd(&atmWeight[nParticle], kdTree[bestIdx].value.w);
}
else {
atmWeight[LIDAR_SIZE*nParticle + i] = 0;
}
}
}
#define DYNAMIC_KERN 0
__device__ float EvaluateParticleKD(MAP_TYPE *map, glm::ivec2 map_dim, Patch map_params, Particle &particle, glm::vec3 pos, float *lidar, KDTree::Node *kdTree, int kdSize, int nParticle)
{
// get walls relative to robot position, add particle position
glm::vec2 walls;
glm::vec4 pt;
int nValid = 0;
glm::vec3 mu_tar(0.0f);
glm::vec3 mu_cor(0.0f);
float retv = 0.0f;
#if (DYNAMIC_KERN == 1)
// try launching dynamic kernel
// 1D block for LIDAR
const int blockSize1d = 128;
const dim3 blocksPerGrid1d((LIDAR_SIZE + blockSize1d - 1) / blockSize1d);
particlePos[0] = particle.pos.x;
particlePos[1] = particle.pos.y;
particlePos[2] = particle.pos.z;
atmWeight[nParticle] = 0.0f;
kernFindWallCorrespondance << <blocksPerGrid1d, blockSize1d >> >(lidar, kdTree, nParticle);
__syncthreads();
#endif
// get walls and find correspondence
for (int j = 0; j < LIDAR_SIZE; j++) {
#if (DYNAMIC_KERN == 0)
CleanLidarScan(j, lidar[j], particle.pos.z, walls);
if (abs(walls.x) < LIDAR_RANGE && abs(walls.y) < LIDAR_RANGE) {
walls.x += particle.pos.x;
walls.y += particle.pos.y;
pt.x = walls.x;
pt.y = walls.y;
pt.z = 0.0f;
pt.w = 0.0f;
float bestDist = glm::distance(glm::vec3(pt), glm::vec3(kdTree[0].value));
int bestIdx = 0;
int head = 0;
bool done = false;
bool branch = false;
bool nodeFullyExplored = false;
while (!done) {
// depth first on current branch
while (head >= 0) {
// check the current node
const KDTree::Node test = kdTree[head];
float d = glm::distance(glm::vec3(pt), glm::vec3(test.value));
if (d < bestDist) {
bestDist = d;
bestIdx = head;
nodeFullyExplored = false;
}
// find branch path
getHyperplaneDist(&pt, &test.value, test.axis, &branch);
head = branch ? test.left : test.right;
}
if (nodeFullyExplored) {
done = true;
}
else {
// check if parent of best node could have better values on other branch
const KDTree::Node parent = kdTree[kdTree[bestIdx].parent];
if (getHyperplaneDist(&pt, &parent.value, parent.axis, &branch) < bestDist) {
head = !branch ? parent.left : parent.right;
nodeFullyExplored = true;
}
else
done = true;
}
}
mu_tar += (glm::vec3) pt;
mu_cor += (glm::vec3) kdTree[bestIdx].value;
float minDist = sqrt(map_params.resolution.x*map_params.resolution.x + map_params.resolution.y*map_params.resolution.y) * 2;
//if (glm::distance((glm::vec3) pt, (glm::vec3) kdTree[bestIdx].value) < minDist) {
retv += kdTree[bestIdx].value.w;
//}
}
#else
retv += atmWeight[LIDAR_SIZE*nParticle + j];
#endif
}
//printf("matches found: %i %.4f\n", nValid, retv);
//mu_tar /= nValid;
//mu_cor /= nValid;
return retv;
}
// kernel wrapper for calling Evaluate Particle
__global__ void kernEvaluateParticlesKD(MAP_TYPE *map, glm::ivec2 map_dim, Patch map_params, Particle *particles, glm::vec3 pos, float *lidar, float *fit, KDTree::Node *kdTree, int kdSize)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < PARTICLE_COUNT) {
fit[i] = EvaluateParticleKD(map, map_dim, map_params, particles[i], pos, lidar, kdTree, kdSize, i);
}
}
// update particle cloud weights from measurement
glm::vec3 PFMeasurementUpdateKD(std::vector<float> lidar) {
glm::vec3 retv(0.0f);
// 1D block for particles
const int blockSize1d = 128;
const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d);
// create device copy of fit array and lidar
hipMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemset(dev_fitf, 0, PARTICLE_COUNT * sizeof(float));
hipDeviceSynchronize();
kernEvaluateParticlesKD << <blocksPerGrid1d, blockSize1d >> >(dev_occupancyGrid, map_dim, map_params, dev_particles, robotPos, dev_lidar, dev_fitf, dev_kd, kdSize);
hipDeviceSynchronize();
checkCUDAError("particle measurement kd tree update error");
thrust::device_vector<float> vFit(dev_fitf, dev_fitf + PARTICLE_COUNT);
thrust::pair<thrust::device_vector<float>::iterator, thrust::device_vector<float>::iterator> result = thrust::minmax_element(vFit.begin(), vFit.end());
float rng = *result.second - *result.first;
int best = result.second - vFit.begin();
// rescale all weights
if (rng > 0.0f) {
float f = 1 / rng;
kernUpdateWeights << <blocksPerGrid1d, blockSize1d >> >(PARTICLE_COUNT, dev_particles, dev_fitf, f, *result.first);
hipDeviceSynchronize();
checkCUDAError("particle weight kdtree update error");
}
// only use best point for return
hipMemcpy(particles, dev_particles, PARTICLE_COUNT * sizeof(glm::vec4), hipMemcpyDeviceToHost);
//retv = (glm::vec3) particles[best].pos;
// run ICP on final point only
retv = transformPointICP((glm::vec3) particles[best].pos, lidar);
return retv;
}
__global__ void kernUpdateMapKD(int N, KDTree::Node* tree, glm::vec4 *target, int *indexList, int val, Patch map_params)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
long clamp_val = (1 << (sizeof(MAP_TYPE) * 8 - 1)) - 15;
glm::vec3 a = (glm::vec3) target[i];
glm::vec3 b = (glm::vec3) tree[indexList[i]].value;
float minDist = sqrt(map_params.resolution.x*map_params.resolution.x + map_params.resolution.y*map_params.resolution.y);
if (glm::distance(a, b) < minDist) {
tree[indexList[i]].value.w = CLAMP(tree[indexList[i]].value.w + val, -clamp_val, clamp_val);
}
}
}
__global__ void kernTestCorrespondance(int N, KDTree::Node* tree, glm::vec4 *target, int *indexList, bool *diff, Patch map_params)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
glm::vec3 a = (glm::vec3) target[i];
glm::vec3 b = (glm::vec3) tree[indexList[i]].value;
float minDist = sqrt(map_params.resolution.x*map_params.resolution.x + map_params.resolution.y*map_params.resolution.y) / 2.0f;
diff[i] = (glm::distance(a, b) > minDist);
}
}
__global__ void kernGeneratePosArray(glm::vec4 *out, glm::vec3 pos, glm::ivec2 dim, Patch params)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < dim.x*dim.y) {
int x = i / dim.x;
int y = i % dim.x;
out[i].x = x * params.resolution.x - params.scale.x / 2.0f + pos.x;
out[i].y = y * params.resolution.y - params.scale.y / 2.0f + pos.y;
out[i].x = ROUND_FRAC(out[i].x, params.resolution.x);
out[i].y = ROUND_FRAC(out[i].y, params.resolution.y);
out[i].z = 0.0f;
}
}
struct is_true
{
__host__ __device__
bool operator()(const bool x)
{
return x == true;
}
};
void PFUpdateMapKD(std::vector<float> lidar) {
// get local free and occupied grid cells here. Use these values to update pointcloud
glm::ivec2 center_idx(
round(0.5f * map_dim.x + map_params.resolution.x / 2),
round(0.5f * map_dim.y + map_params.resolution.y / 2)
);
// 1D block for particles
const int blockSize1d = 128;
const dim3 blocksPerGridLidar((LIDAR_SIZE + blockSize1d - 1) / blockSize1d);
// find occupancy grid cells from translated lidar
hipMemset(dev_freeCells, 0, map_dim.x * map_dim.y*sizeof(bool));
hipMemset(dev_wallCells, 0, map_dim.x * map_dim.y*sizeof(bool));
hipMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), hipMemcpyHostToDevice);
// find intersections from lidar scan
kernGetWalls << <blocksPerGridLidar, blockSize1d >> >(dev_lidar, center_idx, robotPos.z, dev_freeCells, dev_wallCells, map_dim, map_params);
bool *wallCells = new bool[map_dim.x * map_dim.y];
bool *freeCells = new bool[map_dim.x * map_dim.y];
std::vector<glm::vec4> wallPC;
std::vector<glm::vec4> freePC;
hipMemcpy(wallCells, dev_wallCells, map_dim.x * map_dim.y * sizeof(bool), hipMemcpyDeviceToHost);
hipMemcpy(freeCells, dev_freeCells, map_dim.x * map_dim.y * sizeof(bool), hipMemcpyDeviceToHost);
// Create Pointclouds here
// parallelize through compactions and summation
for (int x = 0; x < map_dim.x; x++) {
for (int y = 0; y < map_dim.y; y++) {
int idx = (x * map_dim.x) + y;
if (wallCells[idx]) {
glm::vec4 point;
point.x = x * map_params.resolution.x - map_params.scale.x / 2.0f + robotPos.x;
point.y = y * map_params.resolution.y - map_params.scale.y / 2.0f + robotPos.y;
point.x = ROUND_FRAC(point.x, map_params.resolution.x);
point.y = ROUND_FRAC(point.y, map_params.resolution.y);
point.z = 0.0f;
wallPC.push_back(point);
}
if (freeCells[idx]) {
glm::vec4 point;
point.x = x * map_params.resolution.x - map_params.scale.x / 2.0f + robotPos.x;
point.y = y * map_params.resolution.y - map_params.scale.y / 2.0f + robotPos.y;
point.x = ROUND_FRAC(point.x, map_params.resolution.x);
point.y = ROUND_FRAC(point.y, map_params.resolution.y);
point.z = 0.0f;
freePC.push_back(point);
}
}
}
if (kdSize > 0) {
// downweight existing wall cells if in freeCells
const dim3 blocksPerGridFree((freePC.size() + blockSize1d - 1) / blockSize1d);
const dim3 blocksPerGridWall((wallPC.size() + blockSize1d - 1) / blockSize1d);
glm::vec4 *dev_walls, *dev_free;
int *dev_walls_c, *dev_free_c;
hipMalloc((void**)&dev_walls, wallPC.size()*sizeof(glm::vec4));
hipMalloc((void**)&dev_walls_c, wallPC.size()*sizeof(int));
hipMalloc((void**)&dev_free, freePC.size()*sizeof(glm::vec4));
hipMalloc((void**)&dev_free_c, freePC.size()*sizeof(int));
hipMemcpy(dev_free, &freePC[0], wallPC.size()*sizeof(glm::vec4), hipMemcpyHostToDevice);
hipMemcpy(dev_walls, &wallPC[0], wallPC.size()*sizeof(glm::vec4), hipMemcpyHostToDevice);
findCorrespondenceIndexKD << <blocksPerGridFree, BLOCK_SIZE >> >(freePC.size(), dev_free_c, dev_free, dev_kd);
findCorrespondenceIndexKD << <blocksPerGridWall, BLOCK_SIZE >> >(wallPC.size(), dev_walls_c, dev_walls, dev_kd);
hipDeviceSynchronize();
checkCUDAError("map update - correspondance failure");
bool *wallsCreate = new bool[wallPC.size()];
bool *dev_wallsCreate = NULL;
hipMalloc((void**)&dev_wallsCreate, wallPC.size()*sizeof(bool));
//hipMemcpy(free_c, &freePC[0], freePC.size()*sizeof(int), hipMemcpyHostToDevice);
//hipMemcpy(walls_c, &wallPC[0], wallPC.size()*sizeof(int), hipMemcpyHostToDevice);
// downweight free cells
kernUpdateMapKD << <blocksPerGridFree, BLOCK_SIZE >> >(freePC.size(), dev_kd, dev_free, dev_free_c, FREE_WEIGHT, map_params);
// upweight existing wall cells
kernUpdateMapKD << <blocksPerGridWall, BLOCK_SIZE >> >(wallPC.size(), dev_kd, dev_walls, dev_walls_c, OCCUPIED_WEIGHT, map_params);
hipDeviceSynchronize();
// insert any new wall cells
kernTestCorrespondance << <blocksPerGridWall, BLOCK_SIZE >> >(wallPC.size(), dev_kd, dev_walls, dev_walls_c, dev_wallsCreate, map_params);
hipMemcpy(wallsCreate, dev_wallsCreate, wallPC.size()*sizeof(bool), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
int nInsert = 0;
for (int i = 0; i < wallPC.size(); i++) {
if (wallsCreate[i]) nInsert++;
}
// we dont want to recreate the kd tree every time, we just want to maintain copies on both the host and device...
if (nInsert > 0) {
hipMemcpy(kd, dev_kd, kdSize*sizeof(KDTree::Node), hipMemcpyDeviceToHost); // make sure to copy new weight values
for (int i = 0; i < wallPC.size(); i++) {
if (wallsCreate[i]) {
wallPC[i].w = -100;
KDTree::InsertNode(wallPC[i], kd, kdSize++);
}
}
//printf("new pointcloud size: %i\n", kdSize);
hipMemcpy(dev_kd, kd, kdSize*sizeof(KDTree::Node), hipMemcpyHostToDevice);
}
checkCUDAError("map update - insert failure");
hipFree(dev_walls);
hipFree(dev_walls_c);
hipFree(dev_free);
hipFree(dev_free_c);
hipFree(dev_wallsCreate);
delete wallsCreate;
} else { // create a new kd tree from first scan
KDTree::Create(wallPC, kd);
hipMemcpy(dev_kd, kd, wallPC.size()*sizeof(KDTree::Node), hipMemcpyHostToDevice);
kdSize += wallPC.size();
}
delete[] wallCells;
delete[] freeCells;
}
void PFUpdateMapKD_SLOW(std::vector<float> lidar) {
// get local free and occupied grid cells here. Use these values to update pointcloud
glm::ivec2 center_idx(
round(0.5f * map_dim.x + map_params.resolution.x / 2),
round(0.5f * map_dim.y + map_params.resolution.y / 2)
);
// 1D block for particles
const int blockSize1d = 128;
const dim3 blocksPerGridLidar((LIDAR_SIZE + blockSize1d - 1) / blockSize1d);
// find occupancy grid cells from translated lidar
hipMemset(dev_freeCells, 0, map_dim.x * map_dim.y*sizeof(bool));
hipMemset(dev_wallCells, 0, map_dim.x * map_dim.y*sizeof(bool));
hipMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), hipMemcpyHostToDevice);
// find intersections from lidar scan
kernGetWalls << <blocksPerGridLidar, blockSize1d >> >(dev_lidar, center_idx, robotPos.z, dev_freeCells, dev_wallCells, map_dim, map_params);
//// create index list
//glm::vec4 *dev_pos, *dev_walls_pc, *dev_free_pc;
//hipMalloc((void**)&dev_pos, map_dim.x*map_dim.y*sizeof(glm::vec4));
//hipMalloc((void**)&dev_walls_pc, map_dim.x*map_dim.y*sizeof(glm::vec4));
//hipMalloc((void**)&dev_free_pc, map_dim.x*map_dim.y*sizeof(glm::vec4));
//const dim3 blocksPerGridMap((map_dim.x*map_dim.y + blockSize1d - 1) / blockSize1d);
//kernGeneratePosArray << < blocksPerGridMap, blockSize1d >> >(dev_pos, robotPos, map_dim, map_params);
//thrust::device_ptr<glm::vec4> ptr_pos = thrust::device_pointer_cast(dev_pos);
//int nWalls = thrust::copy_if(thrust::device, ptr_pos, ptr_pos + map_dim.x*map_dim.y, dev_wallCells, dev_walls_pc, is_true()) - dev_walls_pc;
//int nFree = thrust::copy_if(thrust::device, ptr_pos, ptr_pos + map_dim.x*map_dim.y, dev_freeCells, dev_free_pc, is_true()) - dev_free_pc;
bool *wallCells = new bool[map_dim.x * map_dim.y];
bool *freeCells = new bool[map_dim.x * map_dim.y];
std::vector<glm::vec4> wallPC;
std::vector<glm::vec4> freePC;
hipMemcpy(wallCells, dev_wallCells, map_dim.x * map_dim.y * sizeof(bool), hipMemcpyDeviceToHost);
hipMemcpy(freeCells, dev_freeCells, map_dim.x * map_dim.y * sizeof(bool), hipMemcpyDeviceToHost);
// Create Pointclouds here
// parallelize through compactions and summation
for (int x = 0; x < map_dim.x; x++) {
for (int y = 0; y < map_dim.y; y++) {
int idx = (x * map_dim.x) + y;
if (wallCells[idx]) {
glm::vec4 point;
point.x = x * map_params.resolution.x - map_params.scale.x / 2.0f + robotPos.x;
point.y = y * map_params.resolution.y - map_params.scale.y / 2.0f + robotPos.y;
point.x = ROUND_FRAC(point.x, map_params.resolution.x);
point.y = ROUND_FRAC(point.y, map_params.resolution.y);
point.z = 0.0f;
wallPC.push_back(point);
}
if (freeCells[idx]) {
glm::vec4 point;
point.x = x * map_params.resolution.x - map_params.scale.x / 2.0f + robotPos.x;
point.y = y * map_params.resolution.y - map_params.scale.y / 2.0f + robotPos.y;
point.x = ROUND_FRAC(point.x, map_params.resolution.x);
point.y = ROUND_FRAC(point.y, map_params.resolution.y);
point.z = 0.0f;
freePC.push_back(point);
}
}
}
int nFree = freePC.size();
int nWalls = wallPC.size();
if (kdSize > 0) {
// downweight existing wall cells if in freeCells
const dim3 blocksPerGridFree((nFree + blockSize1d - 1) / blockSize1d);
const dim3 blocksPerGridWall((nWalls + blockSize1d - 1) / blockSize1d);
glm::vec4 *dev_walls_pc, *dev_free_pc;
hipMalloc((void**)&dev_walls_pc, nWalls*sizeof(glm::vec4));
hipMalloc((void**)&dev_free_pc, nFree*sizeof(glm::vec4));
int *dev_walls_c, *dev_free_c;
hipMalloc((void**)&dev_walls_c, nWalls*sizeof(int));
hipMalloc((void**)&dev_free_c, nFree*sizeof(int));
findCorrespondenceIndexKD << <blocksPerGridFree, BLOCK_SIZE >> >(nFree, dev_free_c, dev_free_pc, dev_kd);
findCorrespondenceIndexKD << <blocksPerGridWall, BLOCK_SIZE >> >(nWalls, dev_walls_c, dev_walls_pc, dev_kd);
hipDeviceSynchronize();
checkCUDAError("map update - correspondance failure");
bool *wallsCreate = new bool[nWalls];
bool *dev_wallsCreate = NULL;
hipMalloc((void**)&dev_wallsCreate, nWalls*sizeof(bool));
// downweight free cells
kernUpdateMapKD << <blocksPerGridFree, BLOCK_SIZE >> >(nFree, dev_kd, dev_free_pc, dev_free_c, FREE_WEIGHT, map_params);
// upweight existing wall cells
kernUpdateMapKD << <blocksPerGridWall, BLOCK_SIZE >> >(nWalls, dev_kd, dev_walls_pc, dev_walls_c, OCCUPIED_WEIGHT, map_params);
hipDeviceSynchronize();
// insert any new wall cells
kernTestCorrespondance << <blocksPerGridWall, BLOCK_SIZE >> >(nWalls, dev_kd, dev_walls_pc, dev_walls_c, dev_wallsCreate, map_params);
hipMemcpy(wallsCreate, dev_wallsCreate, nWalls*sizeof(bool), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
int nInsert = thrust::count(thrust::device, dev_wallsCreate, dev_wallsCreate + nWalls, true);
// we dont want to recreate the kd tree every time, we just want to maintain copies on both the host and device...
if (nInsert > 0) {
glm::vec4 *wallPC = new glm::vec4[nWalls];
hipMemcpy(kd, dev_kd, kdSize*sizeof(KDTree::Node), hipMemcpyDeviceToHost); // make sure to copy new weight values
hipMemcpy(wallPC, dev_walls_pc, nWalls*sizeof(glm::vec4), hipMemcpyDeviceToHost);
for (int i = 0; i < nWalls; i++) {
if (wallsCreate[i]) {
wallPC[i].w = -100;
KDTree::InsertNode(wallPC[i], kd, kdSize++);
}
}
//printf("new pointcloud size: %i\n", kdSize);
hipMemcpy(dev_kd, kd, kdSize*sizeof(KDTree::Node), hipMemcpyHostToDevice);
delete[] wallPC;
}
checkCUDAError("map update - insert failure");
hipFree(dev_walls_c);
hipFree(dev_free_c);
hipFree(dev_wallsCreate);
hipFree(dev_walls_pc);
hipFree(dev_free_pc);
delete[] wallsCreate;
} else { // create a new kd tree from first scan
//std::vector<glm::vec4> wallPC;
//wallPC.reserve(nWalls);
//wallPC.push_back(glm::vec4(0.0f));
//hipMemcpy(&wallPC[0], dev_walls_pc, nWalls*sizeof(glm::vec4), hipMemcpyDeviceToHost);
KDTree::Create(wallPC, kd);
hipMemcpy(dev_kd, kd, nWalls*sizeof(KDTree::Node), hipMemcpyHostToDevice);
kdSize += wallPC.size();
}
//hipFree(dev_pos);
//hipFree(dev_walls_pc);
//hipFree(dev_free_pc);
delete[] wallCells;
delete[] freeCells;
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void particleFilter(uchar4 *pbo, int frame, Lidar *lidar) {
if (frame % 1000 == 0)
printf("PC size: %i\n", kdSize);
if (frame % 100 == 5) {
hipMemcpy(kd, dev_kd, kdSize*sizeof(KDTree::Node), hipMemcpyDeviceToHost);
KDTree::Balance(kd, kdSize);
hipMemcpy(dev_kd, kd, kdSize*sizeof(KDTree::Node), hipMemcpyHostToDevice);
}
//special case for first scan
if (kdSize == 0) {
robotPos = glm::vec3(0.0f, 0.0f, 0.0f);
PFUpdateMapKD(lidar->scans[frame]);
}
else {
// timing metrics
if (frame % 100 == 0) {
avg_motion = 0.0f;
avg_measurement = 0.0f;
avg_map = 0.0f;
avg_sample = 0.0f;
}
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
PFMotionUpdate(frame);
end = std::chrono::system_clock::now();
avg_motion += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count();
start = end;
robotPos = PFMeasurementUpdateKD(lidar->scans[frame]);
end = std::chrono::system_clock::now();
avg_measurement += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count();
start = end;
PFUpdateMapKD(lidar->scans[frame]);
end = std::chrono::system_clock::now();
avg_map += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count();
start = end;
PFResample(frame);
end = std::chrono::system_clock::now();
avg_sample += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count();
start = end;
//UpdateTopology();
//CheckLoopClosure();
// print timing metrics
if (frame % 100 == 0) {
cout << "Frame " << frame << ":" << endl;
printf(" motion: %3.2f\n", avg_motion / 100.0f);
printf(" measurement: %3.2f\n", avg_measurement / 100.0f);
printf(" map: %3.2f\n", avg_map / 100.0f);
printf(" resample: %3.2f\n", avg_sample / 100.0f);
}
}
} | a0cd9e2114e87dc46cbcb088acbdebd675c8b75b.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <chrono>
#include <random>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <thrust/partition.h>
#include <thrust/reduce.h>
#include <thrust/gather.h>
#include <glm/glm.hpp>
#include <glm/gtx/norm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include "sceneStructs.h"
#include "scene.h"
#include "svd3.h"
#include "kdtree.hpp"
#include "utilities.h"
#include "draw.h"
#include "kernel.h"
// Particle Filter Controls
#define PARTICLE_COUNT 1000
#define EFFECTIVE_PARTICLES .7
#define FREE_WEIGHT -1
#define OCCUPIED_WEIGHT 4
#define MAX_NODE_DIST 2.5f
#define MIN_NODE_DIST .5f
#define WALL_CONFIDENCE 30
#define MIN_WALL_COUNT 2
#define CLOSURE_MAP_DIST 6.0f
#define CLOSURE_GRAPH_DIST 20.0f
// Sensor Configuration
#define LIDAR_ANGLE(i) (-135.0f + i * .25f) * PI / 180
#define LIDAR_SIZE 1081
#define LIDAR_RANGE 20.0f
#define COV {0.015, 0.015, .01}
// GPU calculations
#define BLOCK_SIZE 128
// Helper Functions
#define CLAMP(a, lo, hi) (a < lo) ? lo : (a > hi) ? hi : a
#define ROUND_FRAC(a,frac) round((a/frac))*frac;
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Patch * dev_maps = NULL;
// host variables
static MAP_TYPE *occupancyGrid = NULL;
static Particle particles[PARTICLE_COUNT];
static glm::ivec2 map_dim;
static Patch map_params;
static glm::vec3 robotPos;
static std::vector<Cluster> clusters;
// device variable
static MAP_TYPE *dev_occupancyGrid = NULL;
static Particle *dev_particles = NULL;
static int *dev_fit = NULL;
static float *dev_lidar = NULL;
static float *dev_weights = NULL;
static bool *dev_freeCells = NULL;
static bool *dev_wallCells = NULL;
// KD tree variables
#define KD_MAX_SIZE 10000000
static KDTree::Node *dev_kd = NULL;
static KDTree::Node kd[KD_MAX_SIZE];
static int kdSize = 0;
static float *dev_dist = NULL;
static int *dev_pair = NULL;
static float *dev_fitf = NULL;
/**
* Handy-dandy hash function that provides seeds for random number generation.
*/
__host__ __device__ unsigned int utilhash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
__host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
// timers
float avg_motion = 0.0f, avg_measurement = 0.0f, avg_map = 0.0f, avg_sample = 0.0f;
void particleFilterInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_maps, scene->maps.size() * sizeof(Patch));
cudaMemcpy(dev_maps, scene->maps.data(), scene->maps.size() * sizeof(Patch), cudaMemcpyHostToDevice);
map_params = scene->maps[0];
map_dim = glm::ivec2(map_params.scale.x / map_params.resolution.x, map_params.scale.y / map_params.resolution.y);
occupancyGrid = new MAP_TYPE[map_dim.x*map_dim.y];
memset(occupancyGrid, -100, map_dim.x*map_dim.y*sizeof(MAP_TYPE));
for (int i = 0; i < PARTICLE_COUNT; i++) {
particles[i].pos = glm::vec3(0.0f, 0.0f, 0.0f);
particles[i].w = 1.0f;
particles[i].cluster = 0;
}
robotPos = glm::vec3(0.0f);
cudaMalloc((void**)&dev_occupancyGrid, map_dim.x*map_dim.y * sizeof(MAP_TYPE));
cudaMemcpy(dev_occupancyGrid, occupancyGrid, map_dim.x*map_dim.y * sizeof(MAP_TYPE), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_particles, PARTICLE_COUNT * sizeof(Particle));
cudaMemcpy(dev_particles, particles, PARTICLE_COUNT * sizeof(Particle), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_fit, PARTICLE_COUNT * sizeof(int));
cudaMalloc((void**)&dev_weights, PARTICLE_COUNT * sizeof(float));
cudaMalloc((void**)&dev_lidar, LIDAR_SIZE * sizeof(float));
cudaMalloc((void**)&dev_freeCells, map_dim.x * map_dim.y * sizeof(bool));
cudaMalloc((void**)&dev_wallCells, map_dim.x * map_dim.y * sizeof(bool));
// initialize default cluster
Cluster group1;
Node n0;
n0.pos = glm::vec2(0.0f);
n0.dist = 0.0f;
group1.id = 0;
group1.nodeIdx = 0;
group1.patchList.push_back(0);
group1.nodes.push_back(n0);
std::vector<unsigned int> empty;
group1.edges.push_back(empty);
clusters.push_back(group1);
checkCUDAError("particleFilterInit");
particleFilterInitPC();
}
void particleFilterFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_maps);
cudaFree(dev_occupancyGrid);
cudaFree(dev_particles);
cudaFree(dev_lidar);
cudaFree(dev_fit);
cudaFree(dev_weights);
cudaFree(dev_freeCells);
cudaFree(dev_wallCells);
delete occupancyGrid;
checkCUDAError("particleFilterFree");
particleFilterFreePC();
}
// rotates generates 2d point for lidar reading
__device__ __host__ void CleanLidarScan(int n, const float scan, const float theta, glm::vec2 &intersection) {
float rot = LIDAR_ANGLE(n) + theta;
intersection.x = scan * std::cos(rot);
intersection.y = scan * std::sin(rot);
}
//Bresenham's line algorithm for integer grid
__device__ __host__ void traceRay(glm::ivec2 start, glm::ivec2 end, glm::ivec2 map_dim, bool *out){
glm::ivec2 delta = end - start;
// swap to the right octant
bool steep = abs(delta.y) > abs(delta.x);
if (steep) { // check slope
int temp = start.x;
start.x = start.y;
start.y = temp;
temp = end.x;
end.x = end.y;
end.y = temp;
}
if (start.x > end.x){
int temp = start.x;
start.x = end.x;
end.x = temp;
temp = start.y;
start.y = end.y;
end.y = temp;
}
int deltax = end.x - start.x;
int deltay = abs(end.y - start.y);
float error = deltax / 2;
int y = start.y;
int ystep = (end.y > start.y) ? 1 : -1;
// build line
for (int x = start.x; x < end.x; x++){
int idx = 0;
if (steep)
idx = y*map_dim.x + x;
else
idx = x*map_dim.x + y;
if (x < map_dim.x && y < map_dim.y && x >= 0 && y >= 0 && idx < map_dim.x * map_dim.y) { // assume square maps
out[idx] = 1;
}
error -= deltay;
if (error < 0){
y += ystep;
error += deltax;
}
}
}
// sum the value of specified points in a 2d map
__device__ __host__ int mapCorrelation(int N, const MAP_TYPE *map, glm::ivec2 dim, const glm::vec2 *points)
{
int retv = 0;
for (int i = 0; i < N; i++) {
if (points[i].x >= 0 && points[i].x < dim.x && points[i].y >= 0 && points[i].y < dim.y) {
int idx = (int)points[i].x * dim.x + (int)points[i].y;
retv += map[idx];
}
}
return retv;
}
__device__ __host__ int EvaluateParticle(MAP_TYPE *map, glm::ivec2 map_dim, Patch map_params, Particle &particle, glm::vec3 pos, float *lidar)
{
// get walls relative to robot position, add particle position
glm::vec2 walls[LIDAR_SIZE];
for (int j = 0; j < LIDAR_SIZE; j++) {
CleanLidarScan(j, lidar[j], particle.pos.z, walls[j]);
walls[j].x += particle.pos.x;
walls[j].y += particle.pos.y;
// convert to grid idx
walls[j].x = round(0.5f * map_params.scale.x / map_params.resolution.x + walls[j].x / map_params.resolution.x);
walls[j].y = round(0.5f * map_params.scale.y / map_params.resolution.y + walls[j].y / map_params.resolution.y);
}
// test the map correlation between global map and walls
return mapCorrelation(LIDAR_SIZE, map, map_dim, walls);
}
// kernel wrapper for calling Evaluate Particle
__global__ void kernEvaluateParticles(MAP_TYPE *map, glm::ivec2 map_dim, Patch map_params, Particle *particles, glm::vec3 pos, float *lidar, int *fit)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < PARTICLE_COUNT) {
fit[i] = EvaluateParticle(map, map_dim, map_params, particles[i], pos, lidar);
}
}
// simple inplace multiplication kernel
__global__ void kernUpdateWeights(int N, Particle *a, int *b, float c, int min)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
a[i].w = a[i].w * ((float)b[i] - min) * c;
}
}
// simple inplace multiplication kernel
__global__ void kernUpdateWeights(int N, Particle *a, float *b, float c, int min)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
a[i].w = a[i].w * (b[i] - min) * c;
}
}
// update particle cloud weights from measurement
glm::vec3 PFMeasurementUpdate(std::vector<float> lidar) {
glm::vec3 retv(0.0f);
if (GPU_MEASUREMENT) {
// 1D block for particles
const int blockSize1d = 128;
const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d);
// create device copy of fit array and lidar
cudaMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(dev_fit, 0, PARTICLE_COUNT * sizeof(int));
cudaDeviceSynchronize();
kernEvaluateParticles << <blocksPerGrid1d, blockSize1d >> >(dev_occupancyGrid, map_dim, map_params, dev_particles, robotPos, dev_lidar, dev_fit);
cudaDeviceSynchronize();
checkCUDAError("particle measurement update error");
thrust::device_vector<int> vFit(dev_fit, dev_fit + PARTICLE_COUNT);
thrust::pair<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator> result = thrust::minmax_element(vFit.begin(), vFit.end());
int rng = *result.second - *result.first;
int best = result.second - vFit.begin();
// rescale all weights
if (rng > 0) {
float f = 1 / (float)(rng);
kernUpdateWeights << <blocksPerGrid1d, blockSize1d >> >(PARTICLE_COUNT, dev_particles, dev_fit, f, *result.first);
cudaDeviceSynchronize();
checkCUDAError("particle weight update error");
}
// only use best point for return
cudaMemcpy(particles, dev_particles, PARTICLE_COUNT * sizeof(glm::vec4), cudaMemcpyDeviceToHost);
retv = (glm::vec3) particles[best].pos;
}
else {
int best = -128 * LIDAR_SIZE;
int worst = 128 * LIDAR_SIZE;
int iBest = 0;
int fit[PARTICLE_COUNT] = { 0 };
for (int i = 0; i < PARTICLE_COUNT; i++) {
fit[i] = EvaluateParticle(occupancyGrid, map_dim, map_params, particles[i], robotPos, &lidar[0]);
// track correlation maximums
if (fit[i] > best) {
best = fit[i];
iBest = i;
}
if (fit[i] < worst)
worst = fit[i];
}
// rescale all weights
if ((best - worst) > 0) {
float f = 1.0f;
for (int i = 0; i < PARTICLE_COUNT; i++) {
f = (float)(fit[i] - worst) / (float)(best - worst);
particles[i].w *= f;
}
}
retv = (glm::vec3) particles[iBest].pos;
}
return retv;
}
// add noise to a single particle
__device__ __host__ void ParticleAddNoise(Particle &particle, int frame, int idx)
{
float mean[3] = { 0 };
float cov[3] = COV; // covariance: x y theta
thrust::default_random_engine e2 = makeSeededRandomEngine(frame, idx, 0);
thrust::random::normal_distribution<float> distx(mean[0], cov[0]);
thrust::random::normal_distribution<float> disty(mean[1], cov[1]);
thrust::random::normal_distribution<float> distt(mean[2], cov[2]);
glm::vec3 noise(distx(e2), disty(e2), distt(e2));
particle.pos += noise;
}
// kernel wrapper for adding noise to a particle
__global__ void kernAddNoise(Particle *particles, int frame)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < PARTICLE_COUNT) {
ParticleAddNoise(particles[i], frame, i);
}
}
// perform a motion update on the particle cloud, adding in gaussian noise
void PFMotionUpdate(int frame) {
if (GPU_MOTION) {
// 1D block for particles
const int blockSize1d = 128;
const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d);
// sync up host and device arrays for now...
cudaMemcpy(dev_particles, particles, PARTICLE_COUNT * sizeof(Particle), cudaMemcpyHostToDevice);
kernAddNoise << <blocksPerGrid1d, blockSize1d >> >(dev_particles, frame);
cudaMemcpy(particles, dev_particles, PARTICLE_COUNT * sizeof(Particle), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
checkCUDAError("particle motion update error");
} else {
for (int i = 0; i < PARTICLE_COUNT; i++)
ParticleAddNoise(particles[i], frame, i);
}
}
__global__ void kernCopyWeights(Particle *particles, float *weights, bool squared)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < PARTICLE_COUNT) {
weights[i] = (squared) ? particles[i].w * particles[i].w : particles[i].w;
}
}
__global__ void kernWeightedSample(Particle *particles, float *weights, float max, float Neff, int frame)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < PARTICLE_COUNT) {
thrust::default_random_engine gen = makeSeededRandomEngine(Neff, frame, i);
thrust::random::uniform_real_distribution<float> dist(0, max);
int idx = 0;
float rnd = dist(gen);
while (idx < PARTICLE_COUNT && rnd > weights[idx]) idx++;
particles[i] = particles[idx];
particles[i].w = 1.0f;
}
}
// check if particles need to be resampled
void PFResample(int frame) {
// 1D block for particles
const int blockSize1d = 128;
const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d);
float r = 0, r2 = 0;
if (GPU_RESAMPLE) {
kernCopyWeights << <blocksPerGrid1d, blockSize1d >> >(dev_particles, dev_weights, true);
cudaDeviceSynchronize();
thrust::device_ptr<float> pWeights = thrust::device_pointer_cast(dev_weights);
r2 = thrust::reduce(pWeights, pWeights + PARTICLE_COUNT);
kernCopyWeights << <blocksPerGrid1d, blockSize1d >> >(dev_particles, dev_weights, false);
cudaDeviceSynchronize();
r = thrust::reduce(pWeights, pWeights + PARTICLE_COUNT);
}
else {
for (int i = 0; i < PARTICLE_COUNT; i++) {
r += particles[i].w;
r2 += (particles[i].w) * (particles[i].w);
}
}
float Neff = r * r / r2;
if (Neff < EFFECTIVE_PARTICLES*PARTICLE_COUNT) {
if (GPU_RESAMPLE) {
thrust::device_ptr<float> pWeights = thrust::device_pointer_cast(dev_weights);
thrust::inclusive_scan(pWeights, pWeights + PARTICLE_COUNT, pWeights);
float max;
cudaMemcpy(&max, &dev_weights[PARTICLE_COUNT - 1], sizeof(float), cudaMemcpyDeviceToHost);
kernWeightedSample << <blocksPerGrid1d, blockSize1d >> >(dev_particles, dev_weights, max, Neff, frame);
cudaMemcpy(particles, dev_particles, PARTICLE_COUNT * sizeof(Particle), cudaMemcpyDeviceToHost);
checkCUDAError("resample error");
}
else {
float weightsum[PARTICLE_COUNT];
weightsum[0] = particles[0].w;
for (int i = 1; i < PARTICLE_COUNT; i++) {
weightsum[i] = weightsum[i - 1] + particles[i].w;
}
thrust::default_random_engine gen = makeSeededRandomEngine(Neff, frame, 0);
thrust::random::uniform_real_distribution<float> dist(0, weightsum[PARTICLE_COUNT - 1]);
for (int i = 0; i < PARTICLE_COUNT; i++) {
int idx = 0;
float rnd = dist(gen);
while (idx < PARTICLE_COUNT && rnd > weightsum[idx]) idx++;
particles[i] = particles[idx];
particles[i].w = 1.0f;
}
// push particles to GPU to draw
cudaMemcpy(dev_particles, particles, PARTICLE_COUNT * sizeof(Particle), cudaMemcpyHostToDevice);
}
}
}
__global__ void kernUpdateMap(int N, MAP_TYPE *map, bool *mask, int val)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
long clamp_val = (1 << (sizeof(MAP_TYPE)* 8 - 1)) - 15;
if (mask[i])
map[i] = CLAMP(map[i] + val, -clamp_val, clamp_val);
}
}
__global__ void kernGetWalls(float *lidar, glm::ivec2 center, float theta, bool *freeCells, bool *wallCells, glm::ivec2 map_dim, Patch map_params)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < LIDAR_SIZE) {
glm::vec2 walls;
//ego centric scan
CleanLidarScan(i, lidar[i], theta, walls);
// this will discard random bad data from sensor that was causing overflow errors
if (abs(walls.x) < LIDAR_RANGE && abs(walls.y) < LIDAR_RANGE) {
walls.x = round(walls.x / map_params.resolution.x);
walls.y = round(walls.y / map_params.resolution.y);
// center to robot pos in current map
walls += (glm::vec2) center;
// from here we need to check the wall bounds, determine if it needs to update multiple maps, and create a new patch if necessary.
traceRay(center, walls, map_dim, freeCells);
if (walls.x >= 0 && walls.x < map_dim.x && walls.y >= 0 && walls.y < map_dim.y) {
wallCells[(int)(walls.x * map_dim.x + walls.y)] = true;
}
}
}
}
void PFUpdateMap(std::vector<float> lidar) {
glm::ivec2 center_idx(
round(0.5f * map_dim.x + robotPos.x / map_params.resolution.x + map_params.resolution.x / 2),
round(0.5f * map_dim.y + robotPos.y / map_params.resolution.y + map_params.resolution.y / 2)
);
long clamp_val = (1 << (sizeof(MAP_TYPE)* 8 - 1)) - 15;
if (GPU_MAP) {
// 1D block for particles
const int blockSize1d = 128;
const dim3 blocksPerGridLidar((LIDAR_SIZE + blockSize1d - 1) / blockSize1d);
const dim3 blocksPerGridMap((map_dim.x * map_dim.y + blockSize1d - 1) / blockSize1d);
// find occupancy grid cells from translated lidar
cudaMemset(dev_freeCells, 0, map_dim.x * map_dim.y*sizeof(bool));
cudaMemset(dev_wallCells, 0, map_dim.x * map_dim.y*sizeof(bool));
cudaMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), cudaMemcpyHostToDevice);
// find intersections from lidar scan
kernGetWalls << <blocksPerGridLidar, blockSize1d >> >(dev_lidar, center_idx, robotPos.z, dev_freeCells, dev_wallCells, map_dim, map_params);
// Update free/occupied weights
kernUpdateMap << <blocksPerGridMap, blockSize1d >> >(map_dim.x * map_dim.y, dev_occupancyGrid, dev_freeCells, FREE_WEIGHT);
kernUpdateMap << <blocksPerGridMap, blockSize1d >> >(map_dim.x * map_dim.y, dev_occupancyGrid, dev_wallCells, OCCUPIED_WEIGHT);
}
else {
// find occupancy grid cells from translated lidar
bool *freeCells = new bool[map_dim.x * map_dim.y];
memset(freeCells, 0, map_dim.x * map_dim.y*sizeof(bool));
// find intersections from lidar scan
glm::vec2 walls[LIDAR_SIZE];
for (int i = 0; i < LIDAR_SIZE; i++) {
CleanLidarScan(i, lidar[i], robotPos.z, walls[i]);
walls[i].x = round(walls[i].x / map_params.resolution.x);
walls[i].y = round(walls[i].y / map_params.resolution.y);
walls[i] += center_idx;
if (walls[i].x >= 0 && walls[i].x < map_dim.x && walls[i].y >= 0 && walls[i].y < map_dim.y) {
traceRay(center_idx, walls[i], map_dim, freeCells);
}
}
// downweight free cells
for (int i = 0; i < map_dim.x; i++) {
for (int j = 0; j < map_dim.y; j++) {
int idx = i*map_dim.x + j;
if (freeCells[idx]) {
occupancyGrid[idx] += FREE_WEIGHT;
occupancyGrid[idx] = CLAMP(occupancyGrid[idx], -clamp_val, clamp_val);
}
}
}
// upweight occupied cells
for (int i = 0; i < LIDAR_SIZE; i++) {
if (walls[i].x >= 0 && walls[i].x < map_dim.x && walls[i].y >= 0 && walls[i].y < map_dim.y) {
int idx = (int)walls[i].x * map_dim.x + (int)walls[i].y;
occupancyGrid[idx] += OCCUPIED_WEIGHT;
occupancyGrid[idx] = CLAMP(occupancyGrid[idx], -clamp_val, clamp_val);
}
}
// push grid to GPU to draw
cudaMemcpy(dev_occupancyGrid, occupancyGrid, map_dim.x*map_dim.y * sizeof(char), cudaMemcpyHostToDevice);
delete freeCells;
}
}
void CreateNode(unsigned int i) {
// create node at current position
Node temp;
temp.pos = (glm::vec2) robotPos;
temp.dist = 0.0f;
float edgeLen = glm::distance(temp.pos, clusters[i].nodes[clusters[i].nodeIdx].pos);
// update distance for all current nodes
for (int j = 0; j < clusters[i].nodes.size(); j++) {
clusters[i].nodes[j].dist += edgeLen;
}
clusters[i].nodes.push_back(temp);
// add edge from new node to last node
std::vector<unsigned int> edge;
edge.push_back(clusters[i].nodeIdx);
clusters[i].edges.push_back(edge);
// add edge from last node to new node
clusters[i].edges[clusters[i].nodeIdx].push_back(clusters[i].nodes.size() - 1);
// update current node
clusters[i].nodeIdx = clusters[i].nodes.size() - 1;
}
// This returns true if it can see any walls
__global__ void CheckVisibility(int N, MAP_TYPE *map, bool *mask, unsigned int *retv)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
if (mask[i] )
*retv += (map[i] > WALL_CONFIDENCE) ? 1 : 0;
}
}
int FindWalls(int clusterID, int nodeID) {
unsigned int *dev_retv;
bool *freeCells = new bool[map_dim.x * map_dim.y];
cudaMalloc((void**)&dev_retv, sizeof(unsigned int));
cudaMemset(dev_retv, 0, sizeof(unsigned int));
memset(freeCells, 0, map_dim.x * map_dim.y*sizeof(bool));
glm::ivec2 ai(
round(0.5f * map_dim.x + robotPos.x / map_params.resolution.x + map_params.resolution.x / 2),
round(0.5f * map_dim.y + robotPos.y / map_params.resolution.y + map_params.resolution.y / 2)
);
glm::ivec2 bi(
round(0.5f * map_dim.x + clusters[clusterID].nodes[nodeID].pos.x / map_params.resolution.x + map_params.resolution.x / 2),
round(0.5f * map_dim.y + clusters[clusterID].nodes[nodeID].pos.y / map_params.resolution.y + map_params.resolution.y / 2)
);
traceRay(ai, bi, map_dim, freeCells);
cudaMemcpy(dev_freeCells, freeCells, map_dim.x * map_dim.y*sizeof(bool), cudaMemcpyHostToDevice);
const int blockSize1d = 128;
const dim3 blocksPerGridMap((map_dim.x * map_dim.y + blockSize1d - 1) / blockSize1d);
CheckVisibility << <blocksPerGridMap, blockSize1d >> >(map_dim.x * map_dim.y, dev_occupancyGrid, dev_freeCells, dev_retv);
cudaDeviceSynchronize();
int nWalls;
cudaMemcpy(&nWalls, dev_retv, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_retv);
delete freeCells;
return nWalls;
}
void UpdateTopology() {
for (int i = 0; i < clusters.size(); i++) {
bool newNode = true;
// check if we need a new node on topology graph for each cluster (this is fast on CPU)
// we could posibly improve performance here by not recalculating the distance every step, only checking relative to the distance the robot has moved.
for (int j = 0; j < clusters[i].nodes.size(); j++)
newNode &= (glm::distance((glm::vec2) robotPos, clusters[i].nodes[j].pos) > MAX_NODE_DIST);
if (newNode) {
CreateNode(i);
printf("new node from distance. number of graph nodes: %i\n", clusters[i].nodes.size());
}
// if we don't need a new node for distance, check if we need one from visibility
//if (!newNode) { // run this on GPU to prevent sending the maps back and forth, this operation can be slow even for a small graph
// newNode = true;
// // 1D block for particles
// for (int j = 0; j < clusters[i].nodes.size(); j++) {
// int nWalls = FindWalls(i, j);
// //if (nWalls > 0) printf("found %i walls for node %i\n", nWalls, j);
// newNode &= (nWalls >= MIN_WALL_COUNT);// && (glm::distance((glm::vec2) robotPos, clusters[i].nodes[j].pos) > MIN_NODE_DIST);
// }
//
// if (newNode) {
// CreateNode(i);
// printf("new node from visibility. number of graph nodes: %i\n", clusters[i].nodes.size());
// }
//}
}
}
__global__ void AssignParticlesToCluster(int N, Particle *particles) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
// find the closest visible node
}
}
void CheckLoopClosure() {
for (int i = 0; i < clusters.size(); i++) {
for (int j = 0; j < clusters[i].nodes.size(); j++) { // check each node for loop closure conditions
if (glm::distance((glm::vec2) robotPos, clusters[i].nodes[j].pos) < CLOSURE_MAP_DIST) {
float edgeLen = glm::distance((glm::vec2) robotPos, clusters[i].nodes[clusters[i].nodeIdx].pos);
if (edgeLen + clusters[i].nodes[j].dist > CLOSURE_GRAPH_DIST) {
//printf("potential loop closure with node %i\n", j);
// find all nodes that could separate clusters
// 1D block for particles
std::vector<int> visibleNodes;
for (int k = 0; k < clusters[i].nodes.size(); k++) {
int nWalls = FindWalls(i, k);
if (nWalls < MIN_WALL_COUNT) {
visibleNodes.push_back(k);
}
}
// create new clusters for each group of visible nodes
for (int k = 0; k < visibleNodes.size(); k++) {
for (int l = 0; l < clusters.size(); l++) {
std::vector<unsigned int> v = clusters[l].edges[visibleNodes[k]]; // only create new cluster if no clusters have an edge between visible and current nodes
bool createCluster = (std::find(v.begin(), v.end(), clusters[i].nodeIdx) != v.end());
if (createCluster) {
// copy cluster and get a new ID for it
Cluster newCluster = clusters[i];
newCluster.id = clusters.size(); // this will be wrong when we start deleting obsolete clusters
// add edges
newCluster.edges[clusters[i].nodeIdx].push_back(visibleNodes[k]);
newCluster.edges[visibleNodes[k]].push_back(clusters[i].nodeIdx);
// update graph distances for all nodes in cluster
//clusters.push_back(newCluster);
}
}
}
// parse all particles into correct cluster
//const int blockSize1d = 128;
//const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d);
//AssignParticlesToCluster << <blocksPerGrid1d, blockSize1d >> >(PARTICLE_COUNT, dev_particles);
// prune unused clusters
printf("now contains %i clusters\n", clusters.size());
}
}
}
}
}
void drawMap(uchar4 *pbo) {
drawAll(pbo, PARTICLE_COUNT, hst_scene, dev_image, robotPos, dev_particles, dev_occupancyGrid, dev_maps, clusters);
checkCUDAError("draw screen error");
}
void getPCData(Particle **ptrParticles, MAP_TYPE **ptrMap, KDTree::Node **ptrKD, int *nParticles, int *nKD, glm::vec3 &pos) {
// copy map to host so PCL can draw it
cudaMemcpy(occupancyGrid, dev_occupancyGrid, map_dim.x*map_dim.y * sizeof(MAP_TYPE), cudaMemcpyDeviceToHost);
*ptrParticles = particles;
*ptrMap = occupancyGrid;
*nParticles = PARTICLE_COUNT;
*ptrKD = kd;
*nKD = kdSize;
pos = robotPos;
}
/**
* Begin ICP code.
*/
__host__ __device__ bool sortFuncX(const glm::vec4 &p1, const glm::vec4 &p2)
{
return p1.x < p2.x;
}
__host__ __device__ bool sortFuncY(const glm::vec4 &p1, const glm::vec4 &p2)
{
return p1.y < p2.y;
}
__host__ __device__ bool sortFuncZ(const glm::vec4 &p1, const glm::vec4 &p2)
{
return p1.z < p2.z;
}
__global__ void transformPoint(int N, glm::vec4 *points, glm::mat4 transform) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
points[index] = glm::vec4(glm::vec3(transform * glm::vec4(glm::vec3(points[index]), 1)), 1);
}
__device__ float getHyperplaneDist(const glm::vec4 *pt1, const glm::vec4 *pt2, int axis, bool *branch)
{
float retv = 0.0f;
if (axis == 0) {
*branch = sortFuncX(*pt1, *pt2);
retv = abs(pt1->x - pt2->x);
}
if (axis == 1) {
*branch = sortFuncY(*pt1, *pt2);
retv = abs(pt1->y - pt2->y);
}
if (axis == 2) {
*branch = sortFuncZ(*pt1, *pt2);
retv = abs(pt1->z - pt2->z);
}
return retv;
}
__global__ void outerProduct(int N, const glm::vec4 *vec1, const glm::vec4 *vec2, glm::mat3 *out)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i >= N) {
return;
}
out[i] = glm::mat3(glm::vec3(vec1[i]) * vec2[i].x,
glm::vec3(vec1[i]) * vec2[i].y,
glm::vec3(vec1[i] * vec2[i].z));
}
__global__ void findCorrespondenceKD(int N, glm::vec4 *cor, const glm::vec4 *points, const KDTree::Node* tree)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i >= N) {
return;
}
glm::vec4 pt = points[i];
float bestDist = glm::distance(glm::vec3(pt), glm::vec3(tree[0].value));
int bestIdx = 0;
int head = 0;
bool done = false;
bool branch = false;
bool nodeFullyExplored = false;
while (!done) {
// depth first on current branch
while (head >= 0) {
// check the current node
const KDTree::Node test = tree[head];
float d = glm::distance(glm::vec3(pt), glm::vec3(test.value));
if (d < bestDist) {
bestDist = d;
bestIdx = head;
nodeFullyExplored = false;
}
// find branch path
getHyperplaneDist(&pt, &test.value, test.axis, &branch);
head = branch ? test.left : test.right;
}
if (nodeFullyExplored) {
done = true;
}
else {
// check if parent of best node could have better values on other branch
const KDTree::Node parent = tree[tree[bestIdx].parent];
if (getHyperplaneDist(&pt, &parent.value, parent.axis, &branch) < bestDist) {
head = !branch ? parent.left : parent.right;
nodeFullyExplored = true;
}
else
done = true;
}
}
cor[i] = tree[bestIdx].value;
}
__global__ void findCorrespondenceIndexKD(int N, int *cor, const glm::vec4 *points, const KDTree::Node* tree)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i >= N) {
return;
}
glm::vec4 pt = points[i];
float bestDist = glm::distance(glm::vec3(pt), glm::vec3(tree[0].value));
int bestIdx = 0;
int head = 0;
bool done = false;
bool branch = false;
bool nodeFullyExplored = false;
while (!done) {
// depth first on current branch
while (head >= 0) {
// check the current node
const KDTree::Node test = tree[head];
float d = glm::distance(glm::vec3(pt), glm::vec3(test.value));
if (d < bestDist) {
bestDist = d;
bestIdx = head;
nodeFullyExplored = false;
}
// find branch path
getHyperplaneDist(&pt, &test.value, test.axis, &branch);
head = branch ? test.left : test.right;
}
if (nodeFullyExplored) {
done = true;
}
else {
// check if parent of best node could have better values on other branch
const KDTree::Node parent = tree[tree[bestIdx].parent];
if (getHyperplaneDist(&pt, &parent.value, parent.axis, &branch) < bestDist) {
head = !branch ? parent.left : parent.right;
nodeFullyExplored = true;
}
else
done = true;
}
}
cor[i] = bestIdx;
}
__global__ void kernGetWallsKD(float *lidar, glm::vec3 robotPos, glm::vec4 *nodeVal, Patch map_params)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < LIDAR_SIZE) {
//ego centric scan
glm::vec2 walls;
CleanLidarScan(i, lidar[i], robotPos.z, walls);
// this will discard random bad data from sensor that was causing overflow errors
if (abs(walls.x) < LIDAR_RANGE && abs(walls.y) < LIDAR_RANGE) {
nodeVal[i].x = robotPos.x + walls.x, map_params.resolution.x;
nodeVal[i].y = robotPos.y + walls.y, map_params.resolution.y;
nodeVal[i].z = 0.0f;
nodeVal[i].w = OCCUPIED_WEIGHT;
}
}
}
glm::vec3 transformPointICP(glm::vec3 start, std::vector<float> lidar) {
int sizeTarget = LIDAR_SIZE;
glm::vec3 retv = start;
dim3 fullBlocksPerGrid((LIDAR_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE);
// find the closest point in the scene for each point in the target
glm::vec4 *dev_cor, *tar_c, *cor_c, *dev_target;
glm::mat3 *dev_W;
cudaMalloc((void**)&dev_cor, sizeTarget*sizeof(glm::vec4));
cudaMalloc((void**)&tar_c, sizeTarget*sizeof(glm::vec4));
cudaMalloc((void**)&cor_c, sizeTarget*sizeof(glm::vec4));
cudaMalloc((void**)&dev_target, sizeTarget*sizeof(glm::vec4));
cudaMalloc((void**)&dev_W, sizeTarget * sizeof(glm::mat3));
cudaMemset(dev_W, 0, sizeTarget * sizeof(glm::mat3));
// find intersections from lidar scan
cudaMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), cudaMemcpyHostToDevice);
//kernGetWalls << <fullBlocksPerGrid, BLOCK_SIZE >> >(dev_lidar, center_idx, robotPos.z, dev_freeCells, dev_wallCells, map_dim, map_params);
kernGetWallsKD << <fullBlocksPerGrid, BLOCK_SIZE >> >(dev_lidar, robotPos, dev_target, map_params);
findCorrespondenceKD << <fullBlocksPerGrid, BLOCK_SIZE >> >(sizeTarget, dev_cor, dev_target, dev_kd);
cudaThreadSynchronize();
// Calculate mean centered correspondenses
glm::vec3 mu_tar(0, 0, 0), mu_cor(0, 0, 0);
thrust::device_ptr<glm::vec4> ptr_target(dev_target);
thrust::device_ptr<glm::vec4> ptr_scene(dev_target);
thrust::device_ptr<glm::vec4> ptr_cor(dev_cor);
mu_tar = glm::vec3(thrust::reduce(ptr_target, ptr_target + sizeTarget, glm::vec4(0, 0, 0, 0)));
mu_cor = glm::vec3(thrust::reduce(ptr_cor, ptr_cor + sizeTarget, glm::vec4(0, 0, 0, 0)));
mu_tar /= sizeTarget;
mu_cor /= sizeTarget;
cudaMemcpy(tar_c, dev_target, sizeTarget*sizeof(glm::vec4), cudaMemcpyDeviceToDevice);
cudaMemcpy(cor_c, dev_cor, sizeTarget*sizeof(glm::vec4), cudaMemcpyDeviceToDevice);
checkCUDAError("mean centered calculation failed!");
// move the point cloud with translation
glm::vec3 r(0, 0, 0);
glm::vec3 s(1, 1, 1);
glm::mat4 center_tar = utilityCore::buildTransformationMatrix(-mu_tar, r, s);
glm::mat4 center_cor = utilityCore::buildTransformationMatrix(-mu_cor, r, s);
transformPoint << <fullBlocksPerGrid, BLOCK_SIZE >> >(sizeTarget, tar_c, center_tar);
transformPoint << <fullBlocksPerGrid, BLOCK_SIZE >> >(sizeTarget, cor_c, center_cor);
checkCUDAError("mean centered transformation failed!");
cudaThreadSynchronize();
// Calculate W
outerProduct << <fullBlocksPerGrid, BLOCK_SIZE >> >(sizeTarget, tar_c, cor_c, dev_W);
thrust::device_ptr<glm::mat3> ptr_W(dev_W);
glm::mat3 W = thrust::reduce(ptr_W, ptr_W + sizeTarget, glm::mat3(0));
checkCUDAError("outer product failed!");
cudaThreadSynchronize();
// calculate SVD of W
glm::mat3 U, S, V;
svd(W[0][0], W[0][1], W[0][2], W[1][0], W[1][1], W[1][2], W[2][0], W[2][1], W[2][2],
U[0][0], U[0][1], U[0][2], U[1][0], U[1][1], U[1][2], U[2][0], U[2][1], U[2][2],
S[0][0], S[0][1], S[0][2], S[1][0], S[1][1], S[1][2], S[2][0], S[2][1], S[2][2],
V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2]
);
glm::mat3 g_U(glm::vec3(U[0][0], U[1][0], U[2][0]), glm::vec3(U[0][1], U[1][1], U[2][1]), glm::vec3(U[0][2], U[1][2], U[2][2]));
glm::mat3 g_Vt(glm::vec3(V[0][0], V[0][1], V[0][2]), glm::vec3(V[1][0], V[1][1], V[1][2]), glm::vec3(V[2][0], V[2][1], V[2][2]));
// Get transformation from SVD
glm::mat3 R = g_U * g_Vt;
glm::vec3 t = glm::vec3(mu_cor) - R*glm::vec3(mu_tar);
// update target points
//glm::mat4 transform = glm::translate(glm::mat4(), t) * glm::mat4(R);
//transformPoint << <fullBlocksPerGrid, BLOCK_SIZE >> >(sizeTarget, dev_target, transform);
// make a massive assumption that the SVD will already result in a 2d rotation around the z-axis
//glm::vec4 newPoint(start.x, start.y, 0.0f, 0.0f);
//newPoint = transform*newPoint;
float theta = asin(R[0][1]);
retv.x += t.x;
retv.y += t.y;
retv.z += theta;
cudaFree(dev_cor);
cudaFree(tar_c);
cudaFree(cor_c);
cudaFree(dev_W);
cudaFree(dev_target);
return retv;
}
void particleFilterInitPC() {
// KD tree data
cudaMalloc((void**)&dev_dist, LIDAR_SIZE * sizeof(float));
checkCUDAError("cudaMalloc dev_dist failed!");
cudaMalloc((void**)&dev_pair, LIDAR_SIZE * sizeof(int));
checkCUDAError("cudaMalloc dev_pair failed!");
cudaMalloc((void**)&dev_kd, KD_MAX_SIZE * sizeof(KDTree::Node));
checkCUDAError("cudaMalloc dev_kd failed!");
cudaMalloc((void**)&dev_fitf, PARTICLE_COUNT * sizeof(float));
checkCUDAError("cudaMalloc dev_fitf failed!");
cudaThreadSynchronize();
checkCUDAError("particleFilterInitPC");
}
void particleFilterFreePC() {
cudaFree(dev_dist);
cudaFree(dev_pair);
cudaFree(dev_kd);
cudaFree(dev_fitf);
checkCUDAError("particleFilterFreePC");
}
__device__ float weight[LIDAR_SIZE];
__device__ float particlePos[3];
__device__ float atmWeight[PARTICLE_COUNT*LIDAR_SIZE];
__global__ void kernFindWallCorrespondance(float *lidar, KDTree::Node *kdTree, int nParticle) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < LIDAR_SIZE) {
glm::vec2 walls;
glm::vec4 pt;
CleanLidarScan(i, lidar[i], particlePos[2], walls);
if (abs(walls.x) < LIDAR_RANGE && abs(walls.y) < LIDAR_RANGE) {
walls.x += particlePos[0];
walls.y += particlePos[1];
pt.x = walls.x;
pt.y = walls.y;
pt.z = 0.0f;
pt.w = 0.0f;
float bestDist = glm::distance(glm::vec3(pt), glm::vec3(kdTree[0].value));
int bestIdx = 0;
int head = 0;
bool done = false;
bool branch = false;
bool nodeFullyExplored = false;
while (!done) {
// depth first on current branch
while (head >= 0) {
// check the current node
const KDTree::Node test = kdTree[head];
float d = glm::distance(glm::vec3(pt), glm::vec3(test.value));
if (d < bestDist) {
bestDist = d;
bestIdx = head;
nodeFullyExplored = false;
}
// find branch path
getHyperplaneDist(&pt, &test.value, test.axis, &branch);
head = branch ? test.left : test.right;
}
if (nodeFullyExplored) {
done = true;
}
else {
// check if parent of best node could have better values on other branch
const KDTree::Node parent = kdTree[kdTree[bestIdx].parent];
if (getHyperplaneDist(&pt, &parent.value, parent.axis, &branch) < bestDist) {
head = !branch ? parent.left : parent.right;
nodeFullyExplored = true;
}
else
done = true;
}
}
atmWeight[LIDAR_SIZE*nParticle + i] = kdTree[bestIdx].value.w;
//atomicAdd(&atmWeight[nParticle], kdTree[bestIdx].value.w);
}
else {
atmWeight[LIDAR_SIZE*nParticle + i] = 0;
}
}
}
#define DYNAMIC_KERN 0
__device__ float EvaluateParticleKD(MAP_TYPE *map, glm::ivec2 map_dim, Patch map_params, Particle &particle, glm::vec3 pos, float *lidar, KDTree::Node *kdTree, int kdSize, int nParticle)
{
// get walls relative to robot position, add particle position
glm::vec2 walls;
glm::vec4 pt;
int nValid = 0;
glm::vec3 mu_tar(0.0f);
glm::vec3 mu_cor(0.0f);
float retv = 0.0f;
#if (DYNAMIC_KERN == 1)
// try launching dynamic kernel
// 1D block for LIDAR
const int blockSize1d = 128;
const dim3 blocksPerGrid1d((LIDAR_SIZE + blockSize1d - 1) / blockSize1d);
particlePos[0] = particle.pos.x;
particlePos[1] = particle.pos.y;
particlePos[2] = particle.pos.z;
atmWeight[nParticle] = 0.0f;
kernFindWallCorrespondance << <blocksPerGrid1d, blockSize1d >> >(lidar, kdTree, nParticle);
__syncthreads();
#endif
// get walls and find correspondence
for (int j = 0; j < LIDAR_SIZE; j++) {
#if (DYNAMIC_KERN == 0)
CleanLidarScan(j, lidar[j], particle.pos.z, walls);
if (abs(walls.x) < LIDAR_RANGE && abs(walls.y) < LIDAR_RANGE) {
walls.x += particle.pos.x;
walls.y += particle.pos.y;
pt.x = walls.x;
pt.y = walls.y;
pt.z = 0.0f;
pt.w = 0.0f;
float bestDist = glm::distance(glm::vec3(pt), glm::vec3(kdTree[0].value));
int bestIdx = 0;
int head = 0;
bool done = false;
bool branch = false;
bool nodeFullyExplored = false;
while (!done) {
// depth first on current branch
while (head >= 0) {
// check the current node
const KDTree::Node test = kdTree[head];
float d = glm::distance(glm::vec3(pt), glm::vec3(test.value));
if (d < bestDist) {
bestDist = d;
bestIdx = head;
nodeFullyExplored = false;
}
// find branch path
getHyperplaneDist(&pt, &test.value, test.axis, &branch);
head = branch ? test.left : test.right;
}
if (nodeFullyExplored) {
done = true;
}
else {
// check if parent of best node could have better values on other branch
const KDTree::Node parent = kdTree[kdTree[bestIdx].parent];
if (getHyperplaneDist(&pt, &parent.value, parent.axis, &branch) < bestDist) {
head = !branch ? parent.left : parent.right;
nodeFullyExplored = true;
}
else
done = true;
}
}
mu_tar += (glm::vec3) pt;
mu_cor += (glm::vec3) kdTree[bestIdx].value;
float minDist = sqrt(map_params.resolution.x*map_params.resolution.x + map_params.resolution.y*map_params.resolution.y) * 2;
//if (glm::distance((glm::vec3) pt, (glm::vec3) kdTree[bestIdx].value) < minDist) {
retv += kdTree[bestIdx].value.w;
//}
}
#else
retv += atmWeight[LIDAR_SIZE*nParticle + j];
#endif
}
//printf("matches found: %i %.4f\n", nValid, retv);
//mu_tar /= nValid;
//mu_cor /= nValid;
return retv;
}
// kernel wrapper for calling Evaluate Particle
__global__ void kernEvaluateParticlesKD(MAP_TYPE *map, glm::ivec2 map_dim, Patch map_params, Particle *particles, glm::vec3 pos, float *lidar, float *fit, KDTree::Node *kdTree, int kdSize)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < PARTICLE_COUNT) {
fit[i] = EvaluateParticleKD(map, map_dim, map_params, particles[i], pos, lidar, kdTree, kdSize, i);
}
}
// update particle cloud weights from measurement
glm::vec3 PFMeasurementUpdateKD(std::vector<float> lidar) {
glm::vec3 retv(0.0f);
// 1D block for particles
const int blockSize1d = 128;
const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d);
// create device copy of fit array and lidar
cudaMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(dev_fitf, 0, PARTICLE_COUNT * sizeof(float));
cudaDeviceSynchronize();
kernEvaluateParticlesKD << <blocksPerGrid1d, blockSize1d >> >(dev_occupancyGrid, map_dim, map_params, dev_particles, robotPos, dev_lidar, dev_fitf, dev_kd, kdSize);
cudaDeviceSynchronize();
checkCUDAError("particle measurement kd tree update error");
thrust::device_vector<float> vFit(dev_fitf, dev_fitf + PARTICLE_COUNT);
thrust::pair<thrust::device_vector<float>::iterator, thrust::device_vector<float>::iterator> result = thrust::minmax_element(vFit.begin(), vFit.end());
float rng = *result.second - *result.first;
int best = result.second - vFit.begin();
// rescale all weights
if (rng > 0.0f) {
float f = 1 / rng;
kernUpdateWeights << <blocksPerGrid1d, blockSize1d >> >(PARTICLE_COUNT, dev_particles, dev_fitf, f, *result.first);
cudaDeviceSynchronize();
checkCUDAError("particle weight kdtree update error");
}
// only use best point for return
cudaMemcpy(particles, dev_particles, PARTICLE_COUNT * sizeof(glm::vec4), cudaMemcpyDeviceToHost);
//retv = (glm::vec3) particles[best].pos;
// run ICP on final point only
retv = transformPointICP((glm::vec3) particles[best].pos, lidar);
return retv;
}
__global__ void kernUpdateMapKD(int N, KDTree::Node* tree, glm::vec4 *target, int *indexList, int val, Patch map_params)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
long clamp_val = (1 << (sizeof(MAP_TYPE) * 8 - 1)) - 15;
glm::vec3 a = (glm::vec3) target[i];
glm::vec3 b = (glm::vec3) tree[indexList[i]].value;
float minDist = sqrt(map_params.resolution.x*map_params.resolution.x + map_params.resolution.y*map_params.resolution.y);
if (glm::distance(a, b) < minDist) {
tree[indexList[i]].value.w = CLAMP(tree[indexList[i]].value.w + val, -clamp_val, clamp_val);
}
}
}
__global__ void kernTestCorrespondance(int N, KDTree::Node* tree, glm::vec4 *target, int *indexList, bool *diff, Patch map_params)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
glm::vec3 a = (glm::vec3) target[i];
glm::vec3 b = (glm::vec3) tree[indexList[i]].value;
float minDist = sqrt(map_params.resolution.x*map_params.resolution.x + map_params.resolution.y*map_params.resolution.y) / 2.0f;
diff[i] = (glm::distance(a, b) > minDist);
}
}
__global__ void kernGeneratePosArray(glm::vec4 *out, glm::vec3 pos, glm::ivec2 dim, Patch params)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < dim.x*dim.y) {
int x = i / dim.x;
int y = i % dim.x;
out[i].x = x * params.resolution.x - params.scale.x / 2.0f + pos.x;
out[i].y = y * params.resolution.y - params.scale.y / 2.0f + pos.y;
out[i].x = ROUND_FRAC(out[i].x, params.resolution.x);
out[i].y = ROUND_FRAC(out[i].y, params.resolution.y);
out[i].z = 0.0f;
}
}
struct is_true
{
__host__ __device__
bool operator()(const bool x)
{
return x == true;
}
};
void PFUpdateMapKD(std::vector<float> lidar) {
// get local free and occupied grid cells here. Use these values to update pointcloud
glm::ivec2 center_idx(
round(0.5f * map_dim.x + map_params.resolution.x / 2),
round(0.5f * map_dim.y + map_params.resolution.y / 2)
);
// 1D block for particles
const int blockSize1d = 128;
const dim3 blocksPerGridLidar((LIDAR_SIZE + blockSize1d - 1) / blockSize1d);
// find occupancy grid cells from translated lidar
cudaMemset(dev_freeCells, 0, map_dim.x * map_dim.y*sizeof(bool));
cudaMemset(dev_wallCells, 0, map_dim.x * map_dim.y*sizeof(bool));
cudaMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), cudaMemcpyHostToDevice);
// find intersections from lidar scan
kernGetWalls << <blocksPerGridLidar, blockSize1d >> >(dev_lidar, center_idx, robotPos.z, dev_freeCells, dev_wallCells, map_dim, map_params);
bool *wallCells = new bool[map_dim.x * map_dim.y];
bool *freeCells = new bool[map_dim.x * map_dim.y];
std::vector<glm::vec4> wallPC;
std::vector<glm::vec4> freePC;
cudaMemcpy(wallCells, dev_wallCells, map_dim.x * map_dim.y * sizeof(bool), cudaMemcpyDeviceToHost);
cudaMemcpy(freeCells, dev_freeCells, map_dim.x * map_dim.y * sizeof(bool), cudaMemcpyDeviceToHost);
// Create Pointclouds here
// parallelize through compactions and summation
for (int x = 0; x < map_dim.x; x++) {
for (int y = 0; y < map_dim.y; y++) {
int idx = (x * map_dim.x) + y;
if (wallCells[idx]) {
glm::vec4 point;
point.x = x * map_params.resolution.x - map_params.scale.x / 2.0f + robotPos.x;
point.y = y * map_params.resolution.y - map_params.scale.y / 2.0f + robotPos.y;
point.x = ROUND_FRAC(point.x, map_params.resolution.x);
point.y = ROUND_FRAC(point.y, map_params.resolution.y);
point.z = 0.0f;
wallPC.push_back(point);
}
if (freeCells[idx]) {
glm::vec4 point;
point.x = x * map_params.resolution.x - map_params.scale.x / 2.0f + robotPos.x;
point.y = y * map_params.resolution.y - map_params.scale.y / 2.0f + robotPos.y;
point.x = ROUND_FRAC(point.x, map_params.resolution.x);
point.y = ROUND_FRAC(point.y, map_params.resolution.y);
point.z = 0.0f;
freePC.push_back(point);
}
}
}
if (kdSize > 0) {
// downweight existing wall cells if in freeCells
const dim3 blocksPerGridFree((freePC.size() + blockSize1d - 1) / blockSize1d);
const dim3 blocksPerGridWall((wallPC.size() + blockSize1d - 1) / blockSize1d);
glm::vec4 *dev_walls, *dev_free;
int *dev_walls_c, *dev_free_c;
cudaMalloc((void**)&dev_walls, wallPC.size()*sizeof(glm::vec4));
cudaMalloc((void**)&dev_walls_c, wallPC.size()*sizeof(int));
cudaMalloc((void**)&dev_free, freePC.size()*sizeof(glm::vec4));
cudaMalloc((void**)&dev_free_c, freePC.size()*sizeof(int));
cudaMemcpy(dev_free, &freePC[0], wallPC.size()*sizeof(glm::vec4), cudaMemcpyHostToDevice);
cudaMemcpy(dev_walls, &wallPC[0], wallPC.size()*sizeof(glm::vec4), cudaMemcpyHostToDevice);
findCorrespondenceIndexKD << <blocksPerGridFree, BLOCK_SIZE >> >(freePC.size(), dev_free_c, dev_free, dev_kd);
findCorrespondenceIndexKD << <blocksPerGridWall, BLOCK_SIZE >> >(wallPC.size(), dev_walls_c, dev_walls, dev_kd);
cudaDeviceSynchronize();
checkCUDAError("map update - correspondance failure");
bool *wallsCreate = new bool[wallPC.size()];
bool *dev_wallsCreate = NULL;
cudaMalloc((void**)&dev_wallsCreate, wallPC.size()*sizeof(bool));
//cudaMemcpy(free_c, &freePC[0], freePC.size()*sizeof(int), cudaMemcpyHostToDevice);
//cudaMemcpy(walls_c, &wallPC[0], wallPC.size()*sizeof(int), cudaMemcpyHostToDevice);
// downweight free cells
kernUpdateMapKD << <blocksPerGridFree, BLOCK_SIZE >> >(freePC.size(), dev_kd, dev_free, dev_free_c, FREE_WEIGHT, map_params);
// upweight existing wall cells
kernUpdateMapKD << <blocksPerGridWall, BLOCK_SIZE >> >(wallPC.size(), dev_kd, dev_walls, dev_walls_c, OCCUPIED_WEIGHT, map_params);
cudaDeviceSynchronize();
// insert any new wall cells
kernTestCorrespondance << <blocksPerGridWall, BLOCK_SIZE >> >(wallPC.size(), dev_kd, dev_walls, dev_walls_c, dev_wallsCreate, map_params);
cudaMemcpy(wallsCreate, dev_wallsCreate, wallPC.size()*sizeof(bool), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
int nInsert = 0;
for (int i = 0; i < wallPC.size(); i++) {
if (wallsCreate[i]) nInsert++;
}
// we dont want to recreate the kd tree every time, we just want to maintain copies on both the host and device...
if (nInsert > 0) {
cudaMemcpy(kd, dev_kd, kdSize*sizeof(KDTree::Node), cudaMemcpyDeviceToHost); // make sure to copy new weight values
for (int i = 0; i < wallPC.size(); i++) {
if (wallsCreate[i]) {
wallPC[i].w = -100;
KDTree::InsertNode(wallPC[i], kd, kdSize++);
}
}
//printf("new pointcloud size: %i\n", kdSize);
cudaMemcpy(dev_kd, kd, kdSize*sizeof(KDTree::Node), cudaMemcpyHostToDevice);
}
checkCUDAError("map update - insert failure");
cudaFree(dev_walls);
cudaFree(dev_walls_c);
cudaFree(dev_free);
cudaFree(dev_free_c);
cudaFree(dev_wallsCreate);
delete wallsCreate;
} else { // create a new kd tree from first scan
KDTree::Create(wallPC, kd);
cudaMemcpy(dev_kd, kd, wallPC.size()*sizeof(KDTree::Node), cudaMemcpyHostToDevice);
kdSize += wallPC.size();
}
delete[] wallCells;
delete[] freeCells;
}
void PFUpdateMapKD_SLOW(std::vector<float> lidar) {
// get local free and occupied grid cells here. Use these values to update pointcloud
glm::ivec2 center_idx(
round(0.5f * map_dim.x + map_params.resolution.x / 2),
round(0.5f * map_dim.y + map_params.resolution.y / 2)
);
// 1D block for particles
const int blockSize1d = 128;
const dim3 blocksPerGridLidar((LIDAR_SIZE + blockSize1d - 1) / blockSize1d);
// find occupancy grid cells from translated lidar
cudaMemset(dev_freeCells, 0, map_dim.x * map_dim.y*sizeof(bool));
cudaMemset(dev_wallCells, 0, map_dim.x * map_dim.y*sizeof(bool));
cudaMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), cudaMemcpyHostToDevice);
// find intersections from lidar scan
kernGetWalls << <blocksPerGridLidar, blockSize1d >> >(dev_lidar, center_idx, robotPos.z, dev_freeCells, dev_wallCells, map_dim, map_params);
//// create index list
//glm::vec4 *dev_pos, *dev_walls_pc, *dev_free_pc;
//cudaMalloc((void**)&dev_pos, map_dim.x*map_dim.y*sizeof(glm::vec4));
//cudaMalloc((void**)&dev_walls_pc, map_dim.x*map_dim.y*sizeof(glm::vec4));
//cudaMalloc((void**)&dev_free_pc, map_dim.x*map_dim.y*sizeof(glm::vec4));
//const dim3 blocksPerGridMap((map_dim.x*map_dim.y + blockSize1d - 1) / blockSize1d);
//kernGeneratePosArray << < blocksPerGridMap, blockSize1d >> >(dev_pos, robotPos, map_dim, map_params);
//thrust::device_ptr<glm::vec4> ptr_pos = thrust::device_pointer_cast(dev_pos);
//int nWalls = thrust::copy_if(thrust::device, ptr_pos, ptr_pos + map_dim.x*map_dim.y, dev_wallCells, dev_walls_pc, is_true()) - dev_walls_pc;
//int nFree = thrust::copy_if(thrust::device, ptr_pos, ptr_pos + map_dim.x*map_dim.y, dev_freeCells, dev_free_pc, is_true()) - dev_free_pc;
bool *wallCells = new bool[map_dim.x * map_dim.y];
bool *freeCells = new bool[map_dim.x * map_dim.y];
std::vector<glm::vec4> wallPC;
std::vector<glm::vec4> freePC;
cudaMemcpy(wallCells, dev_wallCells, map_dim.x * map_dim.y * sizeof(bool), cudaMemcpyDeviceToHost);
cudaMemcpy(freeCells, dev_freeCells, map_dim.x * map_dim.y * sizeof(bool), cudaMemcpyDeviceToHost);
// Create Pointclouds here
// parallelize through compactions and summation
for (int x = 0; x < map_dim.x; x++) {
for (int y = 0; y < map_dim.y; y++) {
int idx = (x * map_dim.x) + y;
if (wallCells[idx]) {
glm::vec4 point;
point.x = x * map_params.resolution.x - map_params.scale.x / 2.0f + robotPos.x;
point.y = y * map_params.resolution.y - map_params.scale.y / 2.0f + robotPos.y;
point.x = ROUND_FRAC(point.x, map_params.resolution.x);
point.y = ROUND_FRAC(point.y, map_params.resolution.y);
point.z = 0.0f;
wallPC.push_back(point);
}
if (freeCells[idx]) {
glm::vec4 point;
point.x = x * map_params.resolution.x - map_params.scale.x / 2.0f + robotPos.x;
point.y = y * map_params.resolution.y - map_params.scale.y / 2.0f + robotPos.y;
point.x = ROUND_FRAC(point.x, map_params.resolution.x);
point.y = ROUND_FRAC(point.y, map_params.resolution.y);
point.z = 0.0f;
freePC.push_back(point);
}
}
}
int nFree = freePC.size();
int nWalls = wallPC.size();
if (kdSize > 0) {
// downweight existing wall cells if in freeCells
const dim3 blocksPerGridFree((nFree + blockSize1d - 1) / blockSize1d);
const dim3 blocksPerGridWall((nWalls + blockSize1d - 1) / blockSize1d);
glm::vec4 *dev_walls_pc, *dev_free_pc;
cudaMalloc((void**)&dev_walls_pc, nWalls*sizeof(glm::vec4));
cudaMalloc((void**)&dev_free_pc, nFree*sizeof(glm::vec4));
int *dev_walls_c, *dev_free_c;
cudaMalloc((void**)&dev_walls_c, nWalls*sizeof(int));
cudaMalloc((void**)&dev_free_c, nFree*sizeof(int));
findCorrespondenceIndexKD << <blocksPerGridFree, BLOCK_SIZE >> >(nFree, dev_free_c, dev_free_pc, dev_kd);
findCorrespondenceIndexKD << <blocksPerGridWall, BLOCK_SIZE >> >(nWalls, dev_walls_c, dev_walls_pc, dev_kd);
cudaDeviceSynchronize();
checkCUDAError("map update - correspondance failure");
bool *wallsCreate = new bool[nWalls];
bool *dev_wallsCreate = NULL;
cudaMalloc((void**)&dev_wallsCreate, nWalls*sizeof(bool));
// downweight free cells
kernUpdateMapKD << <blocksPerGridFree, BLOCK_SIZE >> >(nFree, dev_kd, dev_free_pc, dev_free_c, FREE_WEIGHT, map_params);
// upweight existing wall cells
kernUpdateMapKD << <blocksPerGridWall, BLOCK_SIZE >> >(nWalls, dev_kd, dev_walls_pc, dev_walls_c, OCCUPIED_WEIGHT, map_params);
cudaDeviceSynchronize();
// insert any new wall cells
kernTestCorrespondance << <blocksPerGridWall, BLOCK_SIZE >> >(nWalls, dev_kd, dev_walls_pc, dev_walls_c, dev_wallsCreate, map_params);
cudaMemcpy(wallsCreate, dev_wallsCreate, nWalls*sizeof(bool), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
int nInsert = thrust::count(thrust::device, dev_wallsCreate, dev_wallsCreate + nWalls, true);
// we dont want to recreate the kd tree every time, we just want to maintain copies on both the host and device...
if (nInsert > 0) {
glm::vec4 *wallPC = new glm::vec4[nWalls];
cudaMemcpy(kd, dev_kd, kdSize*sizeof(KDTree::Node), cudaMemcpyDeviceToHost); // make sure to copy new weight values
cudaMemcpy(wallPC, dev_walls_pc, nWalls*sizeof(glm::vec4), cudaMemcpyDeviceToHost);
for (int i = 0; i < nWalls; i++) {
if (wallsCreate[i]) {
wallPC[i].w = -100;
KDTree::InsertNode(wallPC[i], kd, kdSize++);
}
}
//printf("new pointcloud size: %i\n", kdSize);
cudaMemcpy(dev_kd, kd, kdSize*sizeof(KDTree::Node), cudaMemcpyHostToDevice);
delete[] wallPC;
}
checkCUDAError("map update - insert failure");
cudaFree(dev_walls_c);
cudaFree(dev_free_c);
cudaFree(dev_wallsCreate);
cudaFree(dev_walls_pc);
cudaFree(dev_free_pc);
delete[] wallsCreate;
} else { // create a new kd tree from first scan
//std::vector<glm::vec4> wallPC;
//wallPC.reserve(nWalls);
//wallPC.push_back(glm::vec4(0.0f));
//cudaMemcpy(&wallPC[0], dev_walls_pc, nWalls*sizeof(glm::vec4), cudaMemcpyDeviceToHost);
KDTree::Create(wallPC, kd);
cudaMemcpy(dev_kd, kd, nWalls*sizeof(KDTree::Node), cudaMemcpyHostToDevice);
kdSize += wallPC.size();
}
//cudaFree(dev_pos);
//cudaFree(dev_walls_pc);
//cudaFree(dev_free_pc);
delete[] wallCells;
delete[] freeCells;
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void particleFilter(uchar4 *pbo, int frame, Lidar *lidar) {
if (frame % 1000 == 0)
printf("PC size: %i\n", kdSize);
if (frame % 100 == 5) {
cudaMemcpy(kd, dev_kd, kdSize*sizeof(KDTree::Node), cudaMemcpyDeviceToHost);
KDTree::Balance(kd, kdSize);
cudaMemcpy(dev_kd, kd, kdSize*sizeof(KDTree::Node), cudaMemcpyHostToDevice);
}
//special case for first scan
if (kdSize == 0) {
robotPos = glm::vec3(0.0f, 0.0f, 0.0f);
PFUpdateMapKD(lidar->scans[frame]);
}
else {
// timing metrics
if (frame % 100 == 0) {
avg_motion = 0.0f;
avg_measurement = 0.0f;
avg_map = 0.0f;
avg_sample = 0.0f;
}
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
PFMotionUpdate(frame);
end = std::chrono::system_clock::now();
avg_motion += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count();
start = end;
robotPos = PFMeasurementUpdateKD(lidar->scans[frame]);
end = std::chrono::system_clock::now();
avg_measurement += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count();
start = end;
PFUpdateMapKD(lidar->scans[frame]);
end = std::chrono::system_clock::now();
avg_map += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count();
start = end;
PFResample(frame);
end = std::chrono::system_clock::now();
avg_sample += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count();
start = end;
//UpdateTopology();
//CheckLoopClosure();
// print timing metrics
if (frame % 100 == 0) {
cout << "Frame " << frame << ":" << endl;
printf(" motion: %3.2f\n", avg_motion / 100.0f);
printf(" measurement: %3.2f\n", avg_measurement / 100.0f);
printf(" map: %3.2f\n", avg_map / 100.0f);
printf(" resample: %3.2f\n", avg_sample / 100.0f);
}
}
} |
85802c7cd69631a8da0f74cb0c0879409f59e83a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "cutil_inline.h"
#include "kanulia.h"
#include "kanuliacalc.cu"
// Rotation de quaternion
__device__ inline void rotate4(float *px, float *py, float *pz, float *pw, const float4 angle)
{
float t;
if (angle.x != 0. ) {
t = *py * cos(angle.x) + *pz * sin(angle.x);
*pz = - *py * sin(angle.x) + *pz * cos(angle.x);
*py = t;
};
if (angle.y != 0. ) {
t = *px * cos(angle.y) + *pz * sin(angle.y);
*pz = - *px * sin(angle.y) + *pz * cos(angle.y);
*px = t;
};
if (angle.z != 0. ) {
t = *pz * cos(angle.z) + *pw * sin(angle.z);
*pw = - *pz * sin(angle.z) + *pw * cos(angle.z);
*pz = t;
};
if (angle.w != 0. ) {
t = *py * cos(angle.w) + *pw * sin(angle.w);
*pw = - *py * sin(angle.w) + *pw * cos(angle.w);
*py = t;
};
}
__device__ inline void rotate4inv(float *px, float *py, float *pz, float *pw, const float4 angle)
{
float t;
if (angle.w != 0. ) {
t = *py * cos(-angle.w) + *pw * sin(-angle.w);
*pw = - *py * sin(-angle.w) + *pw * cos(-angle.w);
*py = t;
};
if (angle.z != 0. ) {
t = *pz * cos(-angle.z) + *pw * sin(-angle.z);
*pw = - *pz * sin(-angle.z) + *pw * cos(-angle.z);
*pz = t;
};
if (angle.y != 0. ) {
t = *px * cos(-angle.y) + *pz * sin(-angle.y);
*pz = - *px * sin(-angle.y) + *pz * cos(-angle.y);
*px = t;
};
if (angle.x != 0. ) {
t = *py * cos(-angle.x) + *pz * sin(-angle.x);
*pz = - *py * sin(-angle.x) + *pz * cos(-angle.x);
*py = t;
};
}
__device__ inline void rotate3(float *px, float *py, float *pz, const float4 angle)
{
float t;
if (angle.x != 0. ) {
t = *py * cos(angle.x) + *pz * sin(angle.x);
*pz = - *py * sin(angle.x) + *pz * cos(angle.x);
*py = t;
};
if (angle.y != 0. ) {
t = *px * cos(angle.y) + *pz * sin(angle.y);
*pz = - *px * sin(angle.y) + *pz * cos(angle.y);
*px = t;
};
if (angle.z != 0. ) {
t = *px * cos(angle.z) - *py * sin(angle.z);
*py = *px * sin(angle.z) + *py * cos(angle.z);
*px = t;
};
/* if (angle.w != 0. ) {
t = *py * cos(angle.w) + *pw * sin(angle.w);
*pw = - *py * sin(angle.w) + *pw * cos(angle.w);
*py = t;
};*/
}
// The Julia4D CUDA GPU thread function
/*
Version using software scheduling of thread blocks.
The idea here is to launch of fixed number of worker blocks to fill the
machine, and have each block loop over the available work until it is all done.
We use a counter in global memory to keep track of which blocks have been
completed. The counter is incremented atomically by each worker block.
This method can achieve higher performance when blocks take a wide range of
different times to complete.
*/
__device__ unsigned int blockCounter; // global counter, initialized to zero before kernel launch
__global__ void Julia4Drepart(uchar4 *dst, const int imageW, const int imageH,
const float4 Off, const float4 JS, const float4 angle, const float scale, const float scalei,
const float xJOff, const float yJOff, const float scaleJ,
const float xblur, const float yblur,
const unsigned int maxgropix,
const unsigned int gropix, const unsigned int bloc, const unsigned int crn,
const uchar4 colors, const int frame,
const int animationFrame, const int gridWidth, const int numBlocks, const int julia, const int julia4D)
{
__shared__ unsigned int blockIndex;
__shared__ unsigned int blockX, blockY;
// loop until all blocks completed
while(1) {
if ((threadIdx.x==0) && (threadIdx.y==0)) {
// get block to process
blockIndex = atomicAdd(&blockCounter, 1);
//blockIndex++;
blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here
blockY = blockIndex / gridWidth;
}
#ifndef __DEVICE_EMULATION__ // device emu doesn't like syncthreads inside while()
__syncthreads();
#endif
// if (blockIndex >= ((numBlocks/nbloc)+1)*(bloc+1)) break; // finish
if (blockIndex >= numBlocks) break; // finish
// process this block
const int ix = blockDim.x * blockX * maxgropix + threadIdx.x * maxgropix + ((bloc * gropix) % maxgropix);
const int iy = blockDim.y * blockY * maxgropix + threadIdx.y * maxgropix + ((bloc * gropix) / maxgropix) * gropix;
int r = 0;int g = 0;int b = 0;
bool seedre = false;bool seedim = false;
if ((ix < imageW) && (iy < imageH)) {
int m = 0;
if ( (julia<32) && (ix < imageW / julia) && (iy < imageH / julia)) {
// Calculate the location
const float xPos = (float)ix * scale * julia + Off.x;
const float yPos = (float)iy * scale * julia + Off.y;
// Calculate the Mandelbrot index for the current location
if (abs(JS.x-xPos)+abs(JS.y-yPos) < 2.1 * scale * julia )
{
seedre = true;
}
if (!seedre)
{
float hue;
// m = CalcMandelbrot(xPos , yPos);
m = CalcMandel4Dcore(xPos, yPos, JS.z, JS.w, &hue);
if (m<=256) HSL2RGB(hue, 0.6, 0.5, &r, &g, &b);
}
} else if (julia4D&& (julia<32) &&((imageW - ix < imageW / julia) && (iy < imageH / julia))) {
// Calculate the location
const float zPos = (float)(imageW - ix) * scalei * julia + Off.z;
const float wPos = (float)iy * scalei * julia + Off.w;
// Calculate the Mandelbrot index for the current location
if (abs(JS.z-zPos)+abs(JS.w-wPos) < 2.1 * scalei * julia )
{
seedim = true;
}
if (!seedim)
{
float hue;
// m = CalcMandelbrot(zPos , wPos);
m = CalcMandel4Dcore(JS.x, JS.y, zPos, wPos, &hue);
if (m<=256) HSL2RGB(hue, 0.6, 0.5, &r, &g, &b);
}
} else {
// Calculate the location
const float xPos = (float)ix * scaleJ + xJOff;
const float yPos = (float)iy * scaleJ + yJOff;
/* const float zPos = (float)0.;
const float wPos = (float)0.;*/
// Calculate the Mandelbrot index for the current location
if (julia4D == JULIA2D)
{
m = CalcJulia(xPos, yPos, JS, crn);
}
if (julia4D == CLOUDJULIA)
{
float dist = 6.0;
float step = 0.009;
float ox = (float)ix * scaleJ + xJOff;
float oy = (float)iy * scaleJ + yJOff;
float oz = - 3.0;
float ow = 0.0;
float dx = sin( 0.7 * step * ( (float)ix + xblur - (imageW/2.)) / ((float) imageW) );
float dy = sin( 0.7 * step * ( (float)iy + yblur - (imageH/2.)) / ((float) imageW) );
float dz = step;
float dw = 0.;
rotate4(&ox,&oy,&oz,&ow,angle);
rotate4(&dx,&dy,&dz,&dw,angle);
int nb = (dist/step);
m = CloudJulia4D(ox,oy,oz,ow,JS,dx,dy,dz,dw,&r,&g,&b,nb,crn);
}
if (julia4D & JULIA4D)
{
/* if ((julia4D & CROSSEYE)&&
( (sqrt( (float)((ix- imageW/4)*(ix- imageW/4) + (iy-(imageH)/5)*(iy-(imageH)/5) )) < 20.) // si viseur
||(sqrt( (float)((ix-3*imageW/4)*(ix-3*imageW/4) + (iy-(imageH)/5)*(iy-(imageH)/5) )) < 20.)))
{
r = 255;
g = 255;
b = 255;
}
else*/
m = SolidJulia4D(ix-1,iy-1,JS,angle,imageW,imageH,scaleJ,xblur,yblur,&r,&g,&b,xJOff,yJOff,crn,julia4D);
// m = SolidMandelBox3D(ix-1,iy-1,JS,angle,imageW,imageH,scaleJ,xblur,yblur,&r,&g,&b,xJOff,yJOff,crn);
}
}
// m = blockIdx.x; // uncomment to see scheduling order
// Convert the Mandelbrot index into a color
uchar4 color;
// m = m > 0 ? crn - m : 0;
if ((julia4D)&&((ix >= imageW / julia) || (iy >= imageH / julia))) {
color.x = r;
color.y = g;
color.z = b;
} else
{
if (seedim||seedre)
{
color.x = 150;
color.y = 250;
color.z = 250;
} else {
color.x = r;
color.y = g;
color.z = b;
/* if (m) {
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
} else {
color.x = 0;
color.y = 0;
color.z = 0;
}*/
}
}
// activer pour voir le calcul progressif
// if (gropix==1) color.z += 120;
// if (gropix==2) color.y += 120;
// if (gropix==4) color.x += 120;
//
// Output the pixel
int pixel = imageW * iy + ix;
if (frame == 0) {
color.w = 0;
if (gropix==1)
dst[pixel] = color;
else
for (int i=0;i<gropix;i++) for (int j=0;j<gropix;j++)
if ((ix+i<imageW)&&(iy+j<imageH))
dst[pixel+i+imageW*j] = color;
} else {
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1;
dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1;
dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1;
}
}
}
} // Julia4D0
// The host CPU Mandebrot thread spawner
void RunJulia4Drepart(uchar4 *dst, const int imageW, const int imageH,
const float4 Off,
const float4 JS,
const float4 angle,
const double scale, const double scalei,
const double xJOff, const double yJOff, const double scaleJ,
const float xblur, const float yblur,
const unsigned int maxgropix,
const unsigned int gropix, const unsigned int bloc, const unsigned int crn,
const uchar4 colors, const int frame, const int animationFrame, const int numSMs, const int julia, const int julia4D)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW/maxgropix, BLOCKDIM_X), iDivUp(imageH/(maxgropix), BLOCKDIM_Y));
// zero block counter
// unsigned int hBlockCounter = (((grid.x)*(grid.y)/nbloc)+1)*(bloc);
unsigned int hBlockCounter = 0;
/*cutilSafeCall( */hipMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, hipMemcpyHostToDevice /*)*/ );
int numWorkUnit = numSMs;
hipLaunchKernelGGL(( Julia4Drepart), dim3(numWorkUnit), dim3(threads), 0, 0, dst, imageW, imageH,
Off, JS, angle, (float)scale, (float)scalei,
(float)xJOff, (float)yJOff, (float)scaleJ,
xblur, yblur,
maxgropix, gropix, bloc, crn,
colors, frame, animationFrame, grid.x, (grid.x)*(grid.y), julia, julia4D);
// cutilCheckMsg("Julia4D0_sm13 kernel execution failed.\n");
} // RunJulia4D0
// check if we're running in emulation mode
int inEmulationMode()
{
#ifdef __DEVICE_EMULATION__
return 1;
#else
return 0;
#endif
} | 85802c7cd69631a8da0f74cb0c0879409f59e83a.cu | #include <stdio.h>
#include "cutil_inline.h"
#include "kanulia.h"
#include "kanuliacalc.cu"
// Rotation de quaternion
__device__ inline void rotate4(float *px, float *py, float *pz, float *pw, const float4 angle)
{
float t;
if (angle.x != 0. ) {
t = *py * cos(angle.x) + *pz * sin(angle.x);
*pz = - *py * sin(angle.x) + *pz * cos(angle.x);
*py = t;
};
if (angle.y != 0. ) {
t = *px * cos(angle.y) + *pz * sin(angle.y);
*pz = - *px * sin(angle.y) + *pz * cos(angle.y);
*px = t;
};
if (angle.z != 0. ) {
t = *pz * cos(angle.z) + *pw * sin(angle.z);
*pw = - *pz * sin(angle.z) + *pw * cos(angle.z);
*pz = t;
};
if (angle.w != 0. ) {
t = *py * cos(angle.w) + *pw * sin(angle.w);
*pw = - *py * sin(angle.w) + *pw * cos(angle.w);
*py = t;
};
}
__device__ inline void rotate4inv(float *px, float *py, float *pz, float *pw, const float4 angle)
{
float t;
if (angle.w != 0. ) {
t = *py * cos(-angle.w) + *pw * sin(-angle.w);
*pw = - *py * sin(-angle.w) + *pw * cos(-angle.w);
*py = t;
};
if (angle.z != 0. ) {
t = *pz * cos(-angle.z) + *pw * sin(-angle.z);
*pw = - *pz * sin(-angle.z) + *pw * cos(-angle.z);
*pz = t;
};
if (angle.y != 0. ) {
t = *px * cos(-angle.y) + *pz * sin(-angle.y);
*pz = - *px * sin(-angle.y) + *pz * cos(-angle.y);
*px = t;
};
if (angle.x != 0. ) {
t = *py * cos(-angle.x) + *pz * sin(-angle.x);
*pz = - *py * sin(-angle.x) + *pz * cos(-angle.x);
*py = t;
};
}
__device__ inline void rotate3(float *px, float *py, float *pz, const float4 angle)
{
float t;
if (angle.x != 0. ) {
t = *py * cos(angle.x) + *pz * sin(angle.x);
*pz = - *py * sin(angle.x) + *pz * cos(angle.x);
*py = t;
};
if (angle.y != 0. ) {
t = *px * cos(angle.y) + *pz * sin(angle.y);
*pz = - *px * sin(angle.y) + *pz * cos(angle.y);
*px = t;
};
if (angle.z != 0. ) {
t = *px * cos(angle.z) - *py * sin(angle.z);
*py = *px * sin(angle.z) + *py * cos(angle.z);
*px = t;
};
/* if (angle.w != 0. ) {
t = *py * cos(angle.w) + *pw * sin(angle.w);
*pw = - *py * sin(angle.w) + *pw * cos(angle.w);
*py = t;
};*/
}
// The Julia4D CUDA GPU thread function
/*
Version using software scheduling of thread blocks.
The idea here is to launch of fixed number of worker blocks to fill the
machine, and have each block loop over the available work until it is all done.
We use a counter in global memory to keep track of which blocks have been
completed. The counter is incremented atomically by each worker block.
This method can achieve higher performance when blocks take a wide range of
different times to complete.
*/
__device__ unsigned int blockCounter; // global counter, initialized to zero before kernel launch
__global__ void Julia4Drepart(uchar4 *dst, const int imageW, const int imageH,
const float4 Off, const float4 JS, const float4 angle, const float scale, const float scalei,
const float xJOff, const float yJOff, const float scaleJ,
const float xblur, const float yblur,
const unsigned int maxgropix,
const unsigned int gropix, const unsigned int bloc, const unsigned int crn,
const uchar4 colors, const int frame,
const int animationFrame, const int gridWidth, const int numBlocks, const int julia, const int julia4D)
{
__shared__ unsigned int blockIndex;
__shared__ unsigned int blockX, blockY;
// loop until all blocks completed
while(1) {
if ((threadIdx.x==0) && (threadIdx.y==0)) {
// get block to process
blockIndex = atomicAdd(&blockCounter, 1);
//blockIndex++;
blockX = blockIndex % gridWidth; // note: this is slow, but only called once per block here
blockY = blockIndex / gridWidth;
}
#ifndef __DEVICE_EMULATION__ // device emu doesn't like syncthreads inside while()
__syncthreads();
#endif
// if (blockIndex >= ((numBlocks/nbloc)+1)*(bloc+1)) break; // finish
if (blockIndex >= numBlocks) break; // finish
// process this block
const int ix = blockDim.x * blockX * maxgropix + threadIdx.x * maxgropix + ((bloc * gropix) % maxgropix);
const int iy = blockDim.y * blockY * maxgropix + threadIdx.y * maxgropix + ((bloc * gropix) / maxgropix) * gropix;
int r = 0;int g = 0;int b = 0;
bool seedre = false;bool seedim = false;
if ((ix < imageW) && (iy < imageH)) {
int m = 0;
if ( (julia<32) && (ix < imageW / julia) && (iy < imageH / julia)) {
// Calculate the location
const float xPos = (float)ix * scale * julia + Off.x;
const float yPos = (float)iy * scale * julia + Off.y;
// Calculate the Mandelbrot index for the current location
if (abs(JS.x-xPos)+abs(JS.y-yPos) < 2.1 * scale * julia )
{
seedre = true;
}
if (!seedre)
{
float hue;
// m = CalcMandelbrot(xPos , yPos);
m = CalcMandel4Dcore(xPos, yPos, JS.z, JS.w, &hue);
if (m<=256) HSL2RGB(hue, 0.6, 0.5, &r, &g, &b);
}
} else if (julia4D&& (julia<32) &&((imageW - ix < imageW / julia) && (iy < imageH / julia))) {
// Calculate the location
const float zPos = (float)(imageW - ix) * scalei * julia + Off.z;
const float wPos = (float)iy * scalei * julia + Off.w;
// Calculate the Mandelbrot index for the current location
if (abs(JS.z-zPos)+abs(JS.w-wPos) < 2.1 * scalei * julia )
{
seedim = true;
}
if (!seedim)
{
float hue;
// m = CalcMandelbrot(zPos , wPos);
m = CalcMandel4Dcore(JS.x, JS.y, zPos, wPos, &hue);
if (m<=256) HSL2RGB(hue, 0.6, 0.5, &r, &g, &b);
}
} else {
// Calculate the location
const float xPos = (float)ix * scaleJ + xJOff;
const float yPos = (float)iy * scaleJ + yJOff;
/* const float zPos = (float)0.;
const float wPos = (float)0.;*/
// Calculate the Mandelbrot index for the current location
if (julia4D == JULIA2D)
{
m = CalcJulia(xPos, yPos, JS, crn);
}
if (julia4D == CLOUDJULIA)
{
float dist = 6.0;
float step = 0.009;
float ox = (float)ix * scaleJ + xJOff;
float oy = (float)iy * scaleJ + yJOff;
float oz = - 3.0;
float ow = 0.0;
float dx = sin( 0.7 * step * ( (float)ix + xblur - (imageW/2.)) / ((float) imageW) );
float dy = sin( 0.7 * step * ( (float)iy + yblur - (imageH/2.)) / ((float) imageW) );
float dz = step;
float dw = 0.;
rotate4(&ox,&oy,&oz,&ow,angle);
rotate4(&dx,&dy,&dz,&dw,angle);
int nb = (dist/step);
m = CloudJulia4D(ox,oy,oz,ow,JS,dx,dy,dz,dw,&r,&g,&b,nb,crn);
}
if (julia4D & JULIA4D)
{
/* if ((julia4D & CROSSEYE)&&
( (sqrt( (float)((ix- imageW/4)*(ix- imageW/4) + (iy-(imageH)/5)*(iy-(imageH)/5) )) < 20.) // si viseur
||(sqrt( (float)((ix-3*imageW/4)*(ix-3*imageW/4) + (iy-(imageH)/5)*(iy-(imageH)/5) )) < 20.)))
{
r = 255;
g = 255;
b = 255;
}
else*/
m = SolidJulia4D(ix-1,iy-1,JS,angle,imageW,imageH,scaleJ,xblur,yblur,&r,&g,&b,xJOff,yJOff,crn,julia4D);
// m = SolidMandelBox3D(ix-1,iy-1,JS,angle,imageW,imageH,scaleJ,xblur,yblur,&r,&g,&b,xJOff,yJOff,crn);
}
}
// m = blockIdx.x; // uncomment to see scheduling order
// Convert the Mandelbrot index into a color
uchar4 color;
// m = m > 0 ? crn - m : 0;
if ((julia4D)&&((ix >= imageW / julia) || (iy >= imageH / julia))) {
color.x = r;
color.y = g;
color.z = b;
} else
{
if (seedim||seedre)
{
color.x = 150;
color.y = 250;
color.z = 250;
} else {
color.x = r;
color.y = g;
color.z = b;
/* if (m) {
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
} else {
color.x = 0;
color.y = 0;
color.z = 0;
}*/
}
}
// activer pour voir le calcul progressif
// if (gropix==1) color.z += 120;
// if (gropix==2) color.y += 120;
// if (gropix==4) color.x += 120;
//
// Output the pixel
int pixel = imageW * iy + ix;
if (frame == 0) {
color.w = 0;
if (gropix==1)
dst[pixel] = color;
else
for (int i=0;i<gropix;i++) for (int j=0;j<gropix;j++)
if ((ix+i<imageW)&&(iy+j<imageH))
dst[pixel+i+imageW*j] = color;
} else {
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1;
dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1;
dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1;
}
}
}
} // Julia4D0
// The host CPU Mandebrot thread spawner
void RunJulia4Drepart(uchar4 *dst, const int imageW, const int imageH,
const float4 Off,
const float4 JS,
const float4 angle,
const double scale, const double scalei,
const double xJOff, const double yJOff, const double scaleJ,
const float xblur, const float yblur,
const unsigned int maxgropix,
const unsigned int gropix, const unsigned int bloc, const unsigned int crn,
const uchar4 colors, const int frame, const int animationFrame, const int numSMs, const int julia, const int julia4D)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW/maxgropix, BLOCKDIM_X), iDivUp(imageH/(maxgropix), BLOCKDIM_Y));
// zero block counter
// unsigned int hBlockCounter = (((grid.x)*(grid.y)/nbloc)+1)*(bloc);
unsigned int hBlockCounter = 0;
/*cutilSafeCall( */cudaMemcpyToSymbol(blockCounter, &hBlockCounter, sizeof(unsigned int), 0, cudaMemcpyHostToDevice /*)*/ );
int numWorkUnit = numSMs;
Julia4Drepart<<<numWorkUnit, threads>>>(dst, imageW, imageH,
Off, JS, angle, (float)scale, (float)scalei,
(float)xJOff, (float)yJOff, (float)scaleJ,
xblur, yblur,
maxgropix, gropix, bloc, crn,
colors, frame, animationFrame, grid.x, (grid.x)*(grid.y), julia, julia4D);
// cutilCheckMsg("Julia4D0_sm13 kernel execution failed.\n");
} // RunJulia4D0
// check if we're running in emulation mode
int inEmulationMode()
{
#ifdef __DEVICE_EMULATION__
return 1;
#else
return 0;
#endif
} |
f2d5e75d0e6a03a7372d0c38471d34b629af137e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <string>
#include <map>
#include <set>
#include <vector>
/* every tool needs to include this once */
#include "nvbit_tool.h"
/* nvbit interface file */
#include "nvbit.h"
/* for channel */
#include "utils/channel.hpp"
/* Channel used to communicate from GPU to CPU receiving thread */
#define CHANNEL_SIZE (1l << 20)
static __managed__ ChannelDev channel_dev;
static ChannelHost channel_host;
/* receiving thread and its control variables */
pthread_t recv_thread;
volatile bool recv_thread_started = false;
volatile bool recv_thread_receiving = false;
/* skip flag used to avoid re-entry on the nvbit_callback when issuing
* flush_channel kernel call */
bool skip_flag = false;
int cache_line_size = 128;
int exclude_pred_off = 0;
int count_warp_level = 1;
/* global control variables for this tool */
uint32_t instr_begin_interval = 0;
uint32_t instr_end_interval = UINT32_MAX;
int verbose = 0;
__managed__ int kernel_id=0;
__managed__ int rep_warp[1000][1000]={-1};
/* opcode to id map and reverse map */
std::map<std::string, int> opcode_to_id_map;
std::map<int, std::string> id_to_opcode_map;
__managed__ uint64_t counter = 0;
/* information collected in the instrumentation function */
typedef struct {
int cta_id_x;
int cta_id_y;
int cta_id_z;
int warp_id;
int opcode_id;
int offset;
int RoW;
int sm_id;
uint64_t addrs[32];
} mem_access_t;
typedef struct
{ int warp_id;
int opcode_id;
int offset;
int des_reg;
int source_reg_1;
int source_reg_2;
int source_reg_3;
}instruction_t;
/* instrumentation function that we want to inject, please note the use of
* 1. "extern "C" __device__ __noinline__" to prevent code elimination by the
* compiler.
* 2. NVBIT_EXPORT_FUNC(count_instrs) to notify nvbit the name of the function
* we want to inject. This name must match exactly the function name */
extern "C" __device__ __noinline__ void dep_instrs(int predicate,
int count_warp_level,int offset,int opcode_id, int des_reg=10000,int reg_2=10000,int reg_3=10000,int reg_4=10000) {
/* all the active threads will compute the active mask */
const int active_mask = __ballot(1);
/* compute the predicate mask */
const int predicate_mask = __ballot(predicate);
/* each thread will get a lane id (get_lane_id is in utils/utils.h) */
const int laneid = get_laneid();
/* get the id of the first active thread */
const int first_laneid = __ffs(active_mask) - 1;
/* count all the active thread */
const int num_threads = __popc(predicate_mask);
const int warp_id =get_global_warp_id();
instruction_t ta;
ta.warp_id = warp_id;
int push=0;
for(int i=0;i<1000;i++)
{if(rep_warp[kernel_id][i]==-1)
break;
if(warp_id==rep_warp[kernel_id][i])
{push=1;
break;
}
else
i++;
}
if(push)
{
ta.offset = offset;
ta.opcode_id = opcode_id;
ta.des_reg = des_reg;
if(reg_2<1000)
ta.source_reg_1=reg_2;
if(reg_3<1000)
ta.source_reg_2=reg_3;
if(reg_4<1000)
ta.source_reg_3=reg_4;
}
/* only the first active thread will perform the atomic */
//inst->print();
if (first_laneid == laneid)
{
if (count_warp_level)
{
/* num threads can be zero when accounting for predicates off */
if (num_threads > 0)
{atomicAdd((unsigned long long *)&counter, 1);
if(push)
channel_dev.push(&ta, sizeof(instruction_t));
}
else
{
atomicAdd((unsigned long long *)&counter, num_threads);
}
}
}
}
NVBIT_EXPORT_FUNC(dep_instrs);
/* Instrumentation function that we want to inject, please note the use of
* 1. extern "C" __device__ __noinline__
* To prevent "dead"-code elimination by the compiler.
* 2. NVBIT_EXPORT_FUNC(dev_func)
* To notify nvbit the name of the function we want to inject.
* This name must match exactly the function name.
*/
extern "C" __device__ __noinline__ void instrument_mem(int pred, int opcode_id,
uint32_t reg_high,
uint32_t reg_low,
int32_t imm,int offset,int RoW) {
if (!pred) {
return;
}
int64_t base_addr = (((uint64_t)reg_high) << 32) | ((uint64_t)reg_low);
uint64_t addr = base_addr + imm;
int active_mask = __ballot(1);
const int laneid = get_laneid();
const int first_laneid = __ffs(active_mask) - 1;
mem_access_t ma;
/* collect memory address information */
for (int i = 0; i < 32; i++) {
ma.addrs[i] = __shfl(addr, i);
}
int4 cta = get_ctaid();
ma.cta_id_x = cta.x;
ma.cta_id_y = cta.y;
ma.cta_id_z = cta.z;
ma.warp_id = get_global_warp_id();
ma.sm_id = get_smid();
ma.opcode_id = opcode_id;
ma.offset = offset;
ma.RoW =RoW;
/* first active lane pushes information on the channel */
if (first_laneid == laneid) {
channel_dev.push(&ma, sizeof(mem_access_t));
}
}
NVBIT_EXPORT_FUNC(instrument_mem);
void nvbit_at_init() {
setenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC", "1", 1);
GET_VAR_INT(
instr_begin_interval, "INSTR_BEGIN", 0,
"Beginning of the instruction interval where to apply instrumentation");
GET_VAR_INT(
instr_end_interval, "INSTR_END", UINT32_MAX,
"End of the instruction interval where to apply instrumentation");
GET_VAR_INT(verbose, "TOOL_VERBOSE", 0, "Enable verbosity inside the tool");
std::string pad(100, '-');
printf("%s\n", pad.c_str());
}
/* instrument each memory instruction adding a call to the above instrumentation
* function */
void nvbit_at_function_first_load(hipCtx_t ctx, hipFunction_t f) {
const std::vector<Instr *> &instrs = nvbit_get_instrs(ctx, f);
if (verbose) {
printf("Inspecting function %s at address 0x%lx\n",
nvbit_get_func_name(ctx, f), nvbit_get_func_addr(f));
}
uint32_t cnt = 0;
//read file, select rep_warps in each path
FILE * rep_warp_info = fopen("./bb_trace/rep_warp.txt","r");
if(rep_warp_info!=NULL)
{ int k;
int warp_id;
int i=0;
while(fscanf(rep_warp_info,"%d,%d",&k,&warp_id)!=EOF)
{rep_warp[k][i]=warp_id;
i++;
}
}
fclose(rep_warp_info);
/* iterate on all the static instructions in the function */
for (auto instr : instrs) {
if (cnt < instr_begin_interval || cnt >= instr_end_interval)
break;
if (verbose) {
instr->printDecoded();
}
if (opcode_to_id_map.find(instr->getOpcode()) ==
opcode_to_id_map.end()) {
int opcode_id = opcode_to_id_map.size();
opcode_to_id_map[instr->getOpcode()] = opcode_id;
printf("OPCODE %s MAPS TO ID %d\n",instr->getOpcode(),opcode_id);
id_to_opcode_map[opcode_id] = std::string(instr->getOpcode());
}
instr->print();
int opcode_id = opcode_to_id_map[instr->getOpcode()];
int offset = instr->getOffset();
/* instrument for instruction trace */
{
nvbit_insert_call(instr, "dep_instrs", IPOINT_BEFORE);
if (exclude_pred_off) {
/* pass predicate value */
nvbit_add_call_arg_pred_val(instr);
} else {
/* pass always true */
nvbit_add_call_arg_const_val32(instr, 1);
}
/* add count warps option */
nvbit_add_call_arg_const_val32(instr, count_warp_level);
/* add instruction pc */
nvbit_add_call_arg_const_val32(instr,offset);
/* add opcode */
nvbit_add_call_arg_const_val32(instr,opcode_id);
//nvbit_add_call_arg_const_val64(instr, uint64_t(&rep_warp));
//if(!i->isStore())
// {
for (int j=0;j<instr->getNumOperands();j++)
{const Instr::operand_t * op=instr->getOperand(j);/*get each operand*/
//if((op->type==Instr::REG))
nvbit_add_call_arg_const_val32(instr,op->value[0]);/* get register_id*/
//else
// {
// if(j==0)
// nvbit_add_call_arg_const_val32(instr,10000);
// }
}
}
/* instrument for memory trace */
cnt++;
}
}
__global__ void flush_channel() {
/* push memory access with negative cta id to communicate the kernel is
* completed */
instruction_t ta;
ta.warp_id=-1;
channel_dev.push(&ta, sizeof(instruction_t));
/* flush channel */
channel_dev.flush();
}
/*
__global__ void flush_channel() {
//push memory access with negative cta id to communicate the kernel is completed
}
*/
void nvbit_at_cuda_event(hipCtx_t ctx, int is_exit, nvbit_api_cuda_t cbid,
const char *name, void *params, hipError_t *pStatus) {
if (skip_flag) return;
if (cbid == API_CUDA_cuLaunchKernel_ptsz ||
cbid == API_CUDA_cuLaunchKernel) {
cuLaunchKernel_params *p = (cuLaunchKernel_params *)params;
if (!is_exit) {
int nregs;
CUDA_SAFECALL(
hipFuncGetAttribute(&nregs, hipFuncAttributeNumRegs, p->f));
int shmem_static_nbytes;
CUDA_SAFECALL(hipFuncGetAttribute(&shmem_static_nbytes,
hipFuncAttributeSharedSizeBytes,
p->f));
printf(
"Kernel %s - grid size %d,%d,%d - block size %d,%d,%d - nregs "
"%d - shmem %d - cuda stream id %ld\n",
nvbit_get_func_name(ctx, p->f), p->gridDimX, p->gridDimY,
p->gridDimZ, p->blockDimX, p->blockDimY, p->blockDimZ, nregs,
shmem_static_nbytes + p->sharedMemBytes, (uint64_t)p->hStream);
recv_thread_receiving = true;
} else {
kernel_id++;
/* make sure current kernel is completed */
hipDeviceSynchronize();
assert(hipGetLastError() == hipSuccess);
/* make sure we prevent re-entry on the nvbit_callback when issuing
* the flush_channel kernel */
skip_flag = true;
/* issue flush of channel so we are sure all the memory accesses
* have been pushed */
hipLaunchKernelGGL(( flush_channel), dim3(1), dim3(1), 0, 0, );
hipDeviceSynchronize();
assert(hipGetLastError() == hipSuccess);
/* unset the skip flag */
skip_flag = false;
/* wait here until the receiving thread has not finished with the
* current kernel */
while (recv_thread_receiving) {
pthread_yield();
}
}
}
}
void *recv_thread_fun(void *) {
char *recv_buffer = (char *)malloc(CHANNEL_SIZE);
// std::map<int, std::vector<mem_access_t *>> per_warp_mem_trace;
while (recv_thread_started) {
uint32_t num_recv_bytes = 0;
if (recv_thread_receiving &&
(num_recv_bytes = channel_host.recv(recv_buffer, CHANNEL_SIZE)) >
0) {
uint32_t num_processed_bytes = 0;
while (num_processed_bytes < num_recv_bytes) {
instruction_t *ta =
(instruction_t *)&recv_buffer[num_processed_bytes];
/* when we get this cta_id_x it means the kernel has completed
*/
if (ta->warp_id == -1) {
recv_thread_receiving = false;
break;
}
int warp_id =ta->warp_id;
//per_warp_mem_trace[warp_id].push_back(ma);
char fn[100];
snprintf(fn,sizeof(fn),"./instruction_trace_rep_warp_%d.txt",kernel_id);
FILE * f = fopen(fn,"a");
if(f!=NULL)
{
fprintf(f,"%d,%d,%d,%d,%d,%d,%d\n",ta->warp_id,ta->offset,ta->opcode_id,ta->des_reg,ta->source_reg_1,ta->source_reg_2,ta->source_reg_3);
}
fclose(f);
num_processed_bytes += sizeof(instruction_t);
}
}
}
free(recv_buffer);
return NULL;
}
void nvbit_at_ctx_init(hipCtx_t ctx) {
recv_thread_started = true;
channel_host.init(0, CHANNEL_SIZE, &channel_dev, NULL);
pthread_create(&recv_thread, NULL, recv_thread_fun, NULL);
}
void nvbit_at_ctx_term(hipCtx_t ctx) {
if (recv_thread_started) {
recv_thread_started = false;
pthread_join(recv_thread, NULL);
}
}
| f2d5e75d0e6a03a7372d0c38471d34b629af137e.cu | /* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include <unistd.h>
#include <string>
#include <map>
#include <set>
#include <vector>
/* every tool needs to include this once */
#include "nvbit_tool.h"
/* nvbit interface file */
#include "nvbit.h"
/* for channel */
#include "utils/channel.hpp"
/* Channel used to communicate from GPU to CPU receiving thread */
#define CHANNEL_SIZE (1l << 20)
static __managed__ ChannelDev channel_dev;
static ChannelHost channel_host;
/* receiving thread and its control variables */
pthread_t recv_thread;
volatile bool recv_thread_started = false;
volatile bool recv_thread_receiving = false;
/* skip flag used to avoid re-entry on the nvbit_callback when issuing
* flush_channel kernel call */
bool skip_flag = false;
int cache_line_size = 128;
int exclude_pred_off = 0;
int count_warp_level = 1;
/* global control variables for this tool */
uint32_t instr_begin_interval = 0;
uint32_t instr_end_interval = UINT32_MAX;
int verbose = 0;
__managed__ int kernel_id=0;
__managed__ int rep_warp[1000][1000]={-1};
/* opcode to id map and reverse map */
std::map<std::string, int> opcode_to_id_map;
std::map<int, std::string> id_to_opcode_map;
__managed__ uint64_t counter = 0;
/* information collected in the instrumentation function */
typedef struct {
int cta_id_x;
int cta_id_y;
int cta_id_z;
int warp_id;
int opcode_id;
int offset;
int RoW;
int sm_id;
uint64_t addrs[32];
} mem_access_t;
typedef struct
{ int warp_id;
int opcode_id;
int offset;
int des_reg;
int source_reg_1;
int source_reg_2;
int source_reg_3;
}instruction_t;
/* instrumentation function that we want to inject, please note the use of
* 1. "extern "C" __device__ __noinline__" to prevent code elimination by the
* compiler.
* 2. NVBIT_EXPORT_FUNC(count_instrs) to notify nvbit the name of the function
* we want to inject. This name must match exactly the function name */
extern "C" __device__ __noinline__ void dep_instrs(int predicate,
int count_warp_level,int offset,int opcode_id, int des_reg=10000,int reg_2=10000,int reg_3=10000,int reg_4=10000) {
/* all the active threads will compute the active mask */
const int active_mask = __ballot(1);
/* compute the predicate mask */
const int predicate_mask = __ballot(predicate);
/* each thread will get a lane id (get_lane_id is in utils/utils.h) */
const int laneid = get_laneid();
/* get the id of the first active thread */
const int first_laneid = __ffs(active_mask) - 1;
/* count all the active thread */
const int num_threads = __popc(predicate_mask);
const int warp_id =get_global_warp_id();
instruction_t ta;
ta.warp_id = warp_id;
int push=0;
for(int i=0;i<1000;i++)
{if(rep_warp[kernel_id][i]==-1)
break;
if(warp_id==rep_warp[kernel_id][i])
{push=1;
break;
}
else
i++;
}
if(push)
{
ta.offset = offset;
ta.opcode_id = opcode_id;
ta.des_reg = des_reg;
if(reg_2<1000)
ta.source_reg_1=reg_2;
if(reg_3<1000)
ta.source_reg_2=reg_3;
if(reg_4<1000)
ta.source_reg_3=reg_4;
}
/* only the first active thread will perform the atomic */
//inst->print();
if (first_laneid == laneid)
{
if (count_warp_level)
{
/* num threads can be zero when accounting for predicates off */
if (num_threads > 0)
{atomicAdd((unsigned long long *)&counter, 1);
if(push)
channel_dev.push(&ta, sizeof(instruction_t));
}
else
{
atomicAdd((unsigned long long *)&counter, num_threads);
}
}
}
}
NVBIT_EXPORT_FUNC(dep_instrs);
/* Instrumentation function that we want to inject, please note the use of
* 1. extern "C" __device__ __noinline__
* To prevent "dead"-code elimination by the compiler.
* 2. NVBIT_EXPORT_FUNC(dev_func)
* To notify nvbit the name of the function we want to inject.
* This name must match exactly the function name.
*/
extern "C" __device__ __noinline__ void instrument_mem(int pred, int opcode_id,
uint32_t reg_high,
uint32_t reg_low,
int32_t imm,int offset,int RoW) {
if (!pred) {
return;
}
int64_t base_addr = (((uint64_t)reg_high) << 32) | ((uint64_t)reg_low);
uint64_t addr = base_addr + imm;
int active_mask = __ballot(1);
const int laneid = get_laneid();
const int first_laneid = __ffs(active_mask) - 1;
mem_access_t ma;
/* collect memory address information */
for (int i = 0; i < 32; i++) {
ma.addrs[i] = __shfl(addr, i);
}
int4 cta = get_ctaid();
ma.cta_id_x = cta.x;
ma.cta_id_y = cta.y;
ma.cta_id_z = cta.z;
ma.warp_id = get_global_warp_id();
ma.sm_id = get_smid();
ma.opcode_id = opcode_id;
ma.offset = offset;
ma.RoW =RoW;
/* first active lane pushes information on the channel */
if (first_laneid == laneid) {
channel_dev.push(&ma, sizeof(mem_access_t));
}
}
NVBIT_EXPORT_FUNC(instrument_mem);
void nvbit_at_init() {
setenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC", "1", 1);
GET_VAR_INT(
instr_begin_interval, "INSTR_BEGIN", 0,
"Beginning of the instruction interval where to apply instrumentation");
GET_VAR_INT(
instr_end_interval, "INSTR_END", UINT32_MAX,
"End of the instruction interval where to apply instrumentation");
GET_VAR_INT(verbose, "TOOL_VERBOSE", 0, "Enable verbosity inside the tool");
std::string pad(100, '-');
printf("%s\n", pad.c_str());
}
/* instrument each memory instruction adding a call to the above instrumentation
* function */
void nvbit_at_function_first_load(CUcontext ctx, CUfunction f) {
const std::vector<Instr *> &instrs = nvbit_get_instrs(ctx, f);
if (verbose) {
printf("Inspecting function %s at address 0x%lx\n",
nvbit_get_func_name(ctx, f), nvbit_get_func_addr(f));
}
uint32_t cnt = 0;
//read file, select rep_warps in each path
FILE * rep_warp_info = fopen("./bb_trace/rep_warp.txt","r");
if(rep_warp_info!=NULL)
{ int k;
int warp_id;
int i=0;
while(fscanf(rep_warp_info,"%d,%d",&k,&warp_id)!=EOF)
{rep_warp[k][i]=warp_id;
i++;
}
}
fclose(rep_warp_info);
/* iterate on all the static instructions in the function */
for (auto instr : instrs) {
if (cnt < instr_begin_interval || cnt >= instr_end_interval)
break;
if (verbose) {
instr->printDecoded();
}
if (opcode_to_id_map.find(instr->getOpcode()) ==
opcode_to_id_map.end()) {
int opcode_id = opcode_to_id_map.size();
opcode_to_id_map[instr->getOpcode()] = opcode_id;
printf("OPCODE %s MAPS TO ID %d\n",instr->getOpcode(),opcode_id);
id_to_opcode_map[opcode_id] = std::string(instr->getOpcode());
}
instr->print();
int opcode_id = opcode_to_id_map[instr->getOpcode()];
int offset = instr->getOffset();
/* instrument for instruction trace */
{
nvbit_insert_call(instr, "dep_instrs", IPOINT_BEFORE);
if (exclude_pred_off) {
/* pass predicate value */
nvbit_add_call_arg_pred_val(instr);
} else {
/* pass always true */
nvbit_add_call_arg_const_val32(instr, 1);
}
/* add count warps option */
nvbit_add_call_arg_const_val32(instr, count_warp_level);
/* add instruction pc */
nvbit_add_call_arg_const_val32(instr,offset);
/* add opcode */
nvbit_add_call_arg_const_val32(instr,opcode_id);
//nvbit_add_call_arg_const_val64(instr, uint64_t(&rep_warp));
//if(!i->isStore())
// {
for (int j=0;j<instr->getNumOperands();j++)
{const Instr::operand_t * op=instr->getOperand(j);/*get each operand*/
//if((op->type==Instr::REG))
nvbit_add_call_arg_const_val32(instr,op->value[0]);/* get register_id*/
//else
// {
// if(j==0)
// nvbit_add_call_arg_const_val32(instr,10000);
// }
}
}
/* instrument for memory trace */
cnt++;
}
}
__global__ void flush_channel() {
/* push memory access with negative cta id to communicate the kernel is
* completed */
instruction_t ta;
ta.warp_id=-1;
channel_dev.push(&ta, sizeof(instruction_t));
/* flush channel */
channel_dev.flush();
}
/*
__global__ void flush_channel() {
//push memory access with negative cta id to communicate the kernel is completed
}
*/
void nvbit_at_cuda_event(CUcontext ctx, int is_exit, nvbit_api_cuda_t cbid,
const char *name, void *params, CUresult *pStatus) {
if (skip_flag) return;
if (cbid == API_CUDA_cuLaunchKernel_ptsz ||
cbid == API_CUDA_cuLaunchKernel) {
cuLaunchKernel_params *p = (cuLaunchKernel_params *)params;
if (!is_exit) {
int nregs;
CUDA_SAFECALL(
cuFuncGetAttribute(&nregs, CU_FUNC_ATTRIBUTE_NUM_REGS, p->f));
int shmem_static_nbytes;
CUDA_SAFECALL(cuFuncGetAttribute(&shmem_static_nbytes,
CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES,
p->f));
printf(
"Kernel %s - grid size %d,%d,%d - block size %d,%d,%d - nregs "
"%d - shmem %d - cuda stream id %ld\n",
nvbit_get_func_name(ctx, p->f), p->gridDimX, p->gridDimY,
p->gridDimZ, p->blockDimX, p->blockDimY, p->blockDimZ, nregs,
shmem_static_nbytes + p->sharedMemBytes, (uint64_t)p->hStream);
recv_thread_receiving = true;
} else {
kernel_id++;
/* make sure current kernel is completed */
cudaDeviceSynchronize();
assert(cudaGetLastError() == cudaSuccess);
/* make sure we prevent re-entry on the nvbit_callback when issuing
* the flush_channel kernel */
skip_flag = true;
/* issue flush of channel so we are sure all the memory accesses
* have been pushed */
flush_channel<<<1, 1>>>();
cudaDeviceSynchronize();
assert(cudaGetLastError() == cudaSuccess);
/* unset the skip flag */
skip_flag = false;
/* wait here until the receiving thread has not finished with the
* current kernel */
while (recv_thread_receiving) {
pthread_yield();
}
}
}
}
void *recv_thread_fun(void *) {
char *recv_buffer = (char *)malloc(CHANNEL_SIZE);
// std::map<int, std::vector<mem_access_t *>> per_warp_mem_trace;
while (recv_thread_started) {
uint32_t num_recv_bytes = 0;
if (recv_thread_receiving &&
(num_recv_bytes = channel_host.recv(recv_buffer, CHANNEL_SIZE)) >
0) {
uint32_t num_processed_bytes = 0;
while (num_processed_bytes < num_recv_bytes) {
instruction_t *ta =
(instruction_t *)&recv_buffer[num_processed_bytes];
/* when we get this cta_id_x it means the kernel has completed
*/
if (ta->warp_id == -1) {
recv_thread_receiving = false;
break;
}
int warp_id =ta->warp_id;
//per_warp_mem_trace[warp_id].push_back(ma);
char fn[100];
snprintf(fn,sizeof(fn),"./instruction_trace_rep_warp_%d.txt",kernel_id);
FILE * f = fopen(fn,"a");
if(f!=NULL)
{
fprintf(f,"%d,%d,%d,%d,%d,%d,%d\n",ta->warp_id,ta->offset,ta->opcode_id,ta->des_reg,ta->source_reg_1,ta->source_reg_2,ta->source_reg_3);
}
fclose(f);
num_processed_bytes += sizeof(instruction_t);
}
}
}
free(recv_buffer);
return NULL;
}
void nvbit_at_ctx_init(CUcontext ctx) {
recv_thread_started = true;
channel_host.init(0, CHANNEL_SIZE, &channel_dev, NULL);
pthread_create(&recv_thread, NULL, recv_thread_fun, NULL);
}
void nvbit_at_ctx_term(CUcontext ctx) {
if (recv_thread_started) {
recv_thread_started = false;
pthread_join(recv_thread, NULL);
}
}
|
dbe7a42d77b3fc37e39b63cdceadc83922be927e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Discrete Sine Transform in Column wise (DST three)
* DST_III_Column
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_III_Column(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DST_III_Column.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "ERRORCHK.h"
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const float PI_d = 3.141592653589793238462643383279502884f; //pi
template <unsigned int TILE_DIM > __global__ void DSTIII_Column_Kernelx(float *A, float *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
float CValue = 0.0f;
const float PI_d = 3.141592653589793238462643383279502884f; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) {
//As[threadIdx.y][threadIdx.x] = __cosf(((2 * (threadIdx.x + (k*TILE_DIM)) + 1) / (2.0 * numARows))*PI_d*Row)*sqrtf(1.0 / (1 + DELTA(1, Row + 1)))*sqrtf(2.0 / numARows);
if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = __sinf((((threadIdx.x + k*TILE_DIM) + 1)*PI_d*(Row + 0.5)) / (numARows))*sqrtf(2.0 / (numARows))*sqrtf(1.0 / (1 + DELTA(numARows, (threadIdx.x + k*TILE_DIM) + 1))); }
//As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDSTColumnThreeS(float * A, float * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
float * hostA = A; // The A matrix
//float * hostB = B; // The B matrix
float * hostC = C; // The output C matrix
//float * hostComputedC;
float * deviceA;
//float * deviceB;
float * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
hipError_t error;
int devID = 0;
// get number of SMs on this GPU
error = hipGetDevice(&devID);
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(hipMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
//hipMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns);
gpuErrchk(hipMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
//thrust::device_ptr< float >dev_ptr_A(deviceA);
//thrust::device_ptr< float >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice));
//hipMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTIII_Column_Kernelx <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTIII_Column_Kernelx <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
}
}
| dbe7a42d77b3fc37e39b63cdceadc83922be927e.cu | /*
* Discrete Sine Transform in Column wise (DST three)
* DST_III_Column
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_III_Column(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DST_III_Column.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include "ERRORCHK.h"
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const float PI_d = 3.141592653589793238462643383279502884f; //pi
template <unsigned int TILE_DIM > __global__ void DSTIII_Column_Kernelx(float *A, float *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
float CValue = 0.0f;
const float PI_d = 3.141592653589793238462643383279502884f; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) {
//As[threadIdx.y][threadIdx.x] = __cosf(((2 * (threadIdx.x + (k*TILE_DIM)) + 1) / (2.0 * numARows))*PI_d*Row)*sqrtf(1.0 / (1 + DELTA(1, Row + 1)))*sqrtf(2.0 / numARows);
if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = __sinf((((threadIdx.x + k*TILE_DIM) + 1)*PI_d*(Row + 0.5)) / (numARows))*sqrtf(2.0 / (numARows))*sqrtf(1.0 / (1 + DELTA(numARows, (threadIdx.x + k*TILE_DIM) + 1))); }
//As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDSTColumnThreeS(float * A, float * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
float * hostA = A; // The A matrix
//float * hostB = B; // The B matrix
float * hostC = C; // The output C matrix
//float * hostComputedC;
float * deviceA;
//float * deviceB;
float * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
cudaError_t error;
int devID = 0;
// get number of SMs on this GPU
error = cudaGetDevice(&devID);
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
//cudaMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns);
gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
//thrust::device_ptr< float >dev_ptr_A(deviceA);
//thrust::device_ptr< float >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice));
//cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTIII_Column_Kernelx <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTIII_Column_Kernelx <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
}
}
|
49f1d4410a930293d992ef24fd87c9a6c5960734.hip | // !!! This is a file automatically generated by hipify!!!
/*********************
MIT License
Copyright (c) 2020 Matzoros Christos
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
***********************/
/*********************
The purpose of this code is to execute multi-GPU matrix multiplication with multiple kernel
invocations using streams. The program split the computation into 4 individual computations
as it is shown below. The proportion of the size of the block is variable.
------------------------------------------------
A * B = C
| A1 | | | | C1 | C2
-------- * | B1 | B2 | = -------
| A2 | | | | C3 | C4
A1 * B1 = C1
A1 * B2 = c2
A2 * B1 = C3
A2 * B2 = C4
These 4 computations may take place simultaneously on 4 different GPUs.
------------------------------------------------
***********************/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
// CUDA runtime
//#include <hip/hip_runtime.h>
//Error handling using functions of the CUDA runtime API
#define cudaCheckError() { \
hipError_t e=hipGetLastError(); \
if(e!=hipSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \
hipDeviceReset(); \
exit(EXIT_FAILURE); \
} \
}
//This macro checks malloc() and hipMalloc() return values
#define Check_Allocation_Return_Value(a){ \
if(a==NULL) { \
printf("Allocation Error\n"); \
hipDeviceReset(); \
exit(EXIT_FAILURE); \
} \
}
//general kernel(not used)
__global__ void matrix_multiplication(double *A,double *B,double *C,int width){
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int step;
double prod_val = 0;
if((idy>=width)||((idx>=width))) return;
for(step=0;step<width;step++){
prod_val += A[idy*width+step] * B[step*width+idx];
}
C[idy*width+idx] = prod_val;
}
// Kernel for the computation of C1 portion
__global__ void kernelC1(double *A,double *B,double *C,int width, double r){
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int step;
double prod_val = 0;
if((idy>=(int)(width*r))||(idx>=(int)(width*r))) return;
for(step=0;step<width;step++){
prod_val += A[idy*width+step] * B[step*(int)(width*r)+idx];
}
C[idy*(int)(width*r)+idx] = prod_val;
}
// Kernel for the computation of C2 portion
__global__ void kernelC2(double *A,double *B,double *C,int width, double r){
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int step;
double prod_val = 0;
if((idy>=(int)(width*r))||(idx>=(int)(width*(1-r)))) return;
for(step=0;step<width;step++){
prod_val += A[idy*width+step] * B[step*(int)(width*(1-r))+idx];
}
C[idy*(int)(width*(1-r))+idx] = prod_val;
}
// Kernel for the computation of C3 portion
__global__ void kernelC3(double *A,double *B,double *C,int width, double r){
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int step;
double prod_val = 0;
if((idy>=(int)(width*(1-r)))||(idx>=(int)(width*r))) return;
for(step=0;step<width;step++){
prod_val += A[idy*width+step] * B[step*(int)(width*r)+idx];
}
C[idy*(int)(width*r)+idx] = prod_val;
}
// // Kernel for the computation of C4 portion
__global__ void kernelC4(double *A,double *B,double *C,int width, double r){
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int step;
double prod_val = 0;
if((idy>=(int)(width*(1-r)))||(idx>=(int)(width*(1-r)))) return;
for(step=0;step<width;step++){
prod_val += A[idy*width+step] * B[step*(int)(width*(1-r))+idx];
}
C[idy*(int)(width*(1-r))+idx] = prod_val;
}
int main(int argc,char *argv[]){
const int num_streams = 4;
hipStream_t streams[num_streams];
int N =7000;
double *hA,*hB,*hC;
int id,j,i;
int ndev;
double r = 0.5;
double inv_r = (1-r);
double *hA1,*hA2,*hB1,*hB2,*hC1,*hC2,*hC3,*hC4;
double *dA1,*dA1_2,*dA2,*dA2_2,*dB1,*dB1_2,*dB2,*dB2_2;
double *dC1,*dC2,*dC3,*dC4;
printf("\nNumber of elements of the final matrix: %d\n",N * N);
printf("Block 1 width: %d\n",(int)(N*r));
printf("Block 2 width: %d\n",(int)(N*inv_r));
hipGetDeviceCount(&ndev);
if(ndev==0){
printf("NO GPU DEVICES AVAILABLE\n\n");
exit(-1);
}else{
printf("Number of available GPUs: %d\n\n",ndev);
}
hipHostMalloc(&hA,N*N*sizeof(double));
Check_Allocation_Return_Value(hA)
hipHostMalloc(&hB,N*N*sizeof(double));
Check_Allocation_Return_Value(hB)
hipHostMalloc(&hC,N*N*sizeof(double));
Check_Allocation_Return_Value(hC)
memset(hC,0,N*N*sizeof(double));
srand (time(NULL));
for(i=0;i<N*N;i++){
hA[i] = rand()%10;
hB[i] = rand()%10;
}
//Grid and block size initialization
int grid_width = 1+N/32;
dim3 dimGrid(grid_width,grid_width,1);
dim3 dimBlock(32,32,1);
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// kernel 1
id=0;
hipSetDevice((int)(id%ndev));
//hipStreamCreate(&streams[id]);
hipStreamCreateWithFlags(&streams[id],hipStreamNonBlocking);
hipHostMalloc(&hA1,(int)(N*N*r*sizeof(double)));
Check_Allocation_Return_Value(hA1)
hipHostMalloc(&hB1,(int)(N*N*r*sizeof(double)));
Check_Allocation_Return_Value(hB1)
hipHostMalloc(&hC1,(int)(N*N*r*r*sizeof(double)));
Check_Allocation_Return_Value(hC1)
for(int i=0;i<(int)(N*r);i++){
for(int j=0;j<N;j++){
hA1[i*N+j] = hA[i*N+j];
}
}
for(int i=0;i<N;i++){
for(int j=0;j<(N*r);j++){
hB1[i*(int)(N*r)+j] = hB[i*N+j];
}
}
hipMalloc((void**)&dA1,(int)(N*N*r*sizeof(double)));
cudaCheckError()
hipMalloc((void**)&dB1,(int)(N*N*r*sizeof(double)));
cudaCheckError()
hipMalloc((void**)&dC1,(int)(N*N*r*r*sizeof(double)));
cudaCheckError()
// kernel 2
id=1;
hipSetDevice((int)(id%ndev));
//hipStreamCreate(&streams[id]);
hipStreamCreateWithFlags(&streams[id],hipStreamNonBlocking);
hipHostMalloc(&hB2,(int)(N*N*inv_r*sizeof(double)));
Check_Allocation_Return_Value(hB2)
hipHostMalloc(&hC2,(int)(N*N*r*inv_r*sizeof(double)));
Check_Allocation_Return_Value(hC2)
for(int i=0;i<N;i++){
for(int j=0;j<(N*inv_r);j++){
hB2[i*(int)(N*inv_r)+j] = hB[i*N+(int)(N*r)+j];
}
}
hipMalloc((void**)&dA1_2,(int)(N*N*r*sizeof(double)));
cudaCheckError()
hipMalloc((void**)&dB2,(int)(N*N*inv_r*sizeof(double)));
cudaCheckError()
hipMalloc((void**)&dC2,(int)(N*N*r*inv_r*sizeof(double)));
cudaCheckError()
// kernel 3
id=2;
hipSetDevice(id%ndev);
//hipStreamCreate(&streams[id]);
hipStreamCreateWithFlags(&streams[id],hipStreamNonBlocking);
hipHostMalloc(&hA2,(int)(N*N*inv_r*sizeof(double)));
Check_Allocation_Return_Value(hA2)
hipHostMalloc(&hC3,(int)(N*N*inv_r*r*sizeof(double)));
Check_Allocation_Return_Value(hC3)
for(int i=0;i<(int)(N*inv_r);i++){
for(int j=0;j<N;j++){
hA2[i*N+j] = hA[(i+(int)(N*r))*N+j];
}
}
hipMalloc((void**)&dA2,(int)(N*N*inv_r*sizeof(double)));
cudaCheckError()
hipMalloc((void**)&dB1_2,(int)(N*N*r*sizeof(double)));
cudaCheckError()
hipMalloc((void**)&dC3,(int)(N*N*r*inv_r*sizeof(double)));
cudaCheckError()
// kernel 4
id=3;
hipSetDevice(id%ndev);
//hipStreamCreate(&streams[id]);
hipStreamCreateWithFlags(&streams[id],hipStreamNonBlocking);
hipHostMalloc(&hC4,(int)(N*N*inv_r*inv_r*sizeof(double)));
Check_Allocation_Return_Value(hC4)
hipMalloc((void**)&dA2_2,(int)(N*N*inv_r*sizeof(double)));
cudaCheckError()
hipMalloc((void**)&dB2_2,(int)(N*N*inv_r*sizeof(double)));
cudaCheckError()
hipMalloc((void**)&dC4,(int)(N*N*inv_r*inv_r*sizeof(double)));
cudaCheckError()
//////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////
printf("CPU-->GPU Memory copy(A1,B1,C1) - hipMemcpyAsync\n");
id=0;
hipSetDevice(id%ndev);
hipMemcpyAsync(dA1,hA1,(int)(N*N*r*sizeof(double)),hipMemcpyHostToDevice,streams[id]);
cudaCheckError()
hipMemcpyAsync(dB1,hB1,(int)(N*N*r*sizeof(double)),hipMemcpyHostToDevice,streams[id]);
cudaCheckError()
printf("Kernel 1 Execution...\n");
hipLaunchKernelGGL(( kernelC1) , dim3(dimGrid),dim3(dimBlock),0,streams[id], dA1,dB1,dC1,N,r);
cudaCheckError()
///////////////////////////////////////////////////////////////////////////////
id=1;
hipSetDevice(id%ndev);
printf("CPU-->GPU Memory copy(A1,B2,C2) - hipMemcpyAsync\n");
hipMemcpyAsync(dA1_2,hA1,(int)(N*N*r*sizeof(double)),hipMemcpyHostToDevice,streams[id]);
cudaCheckError()
hipMemcpyAsync(dB2,hB2,(int)(N*N*inv_r*sizeof(double)),hipMemcpyHostToDevice,streams[id]);
cudaCheckError()
printf("Kernel 2 Execution...\n");
hipLaunchKernelGGL(( kernelC2) , dim3(dimGrid),dim3(dimBlock),0,streams[id], dA1_2,dB2,dC2,N,r);
cudaCheckError()
///////////////////////////////////////////////////////////////////////////////
id=2;
hipSetDevice(id%ndev);
printf("CPU-->GPU Memory copy(A2,B1,C3) - hipMemcpyAsync\n");
hipMemcpyAsync(dA2,hA2,(int)(N*N*inv_r*sizeof(double)),hipMemcpyHostToDevice,streams[id]);
cudaCheckError()
hipMemcpyAsync(dB1_2,hB1,(int)(N*N*r*sizeof(double)),hipMemcpyHostToDevice,streams[id]);
cudaCheckError()
printf("Kernel 3 Execution...\n");
hipLaunchKernelGGL(( kernelC3) , dim3(dimGrid),dim3(dimBlock),0,streams[id], dA2,dB1_2,dC3,N,r);
cudaCheckError()
///////////////////////////////////////////////////////////////////////////////
id=3;
hipSetDevice(id%ndev);
printf("CPU-->GPU Memory copy(A2,B2,C4) - hipMemcpyAsync\n");
hipMemcpyAsync(dA2_2,hA2,(int)(N*N*inv_r*sizeof(double)),hipMemcpyHostToDevice,streams[id]);
cudaCheckError()
hipMemcpyAsync(dB2_2,hB2,(int)(N*N*inv_r*sizeof(double)),hipMemcpyHostToDevice,streams[id]);
cudaCheckError()
printf("Kernel 4 Execution...\n");
hipLaunchKernelGGL(( kernelC4) , dim3(dimGrid),dim3(dimBlock),0,streams[id], dA2_2,dB2_2,dC4,N,r);
cudaCheckError()
///////////////////////////////////////////////////////////////////////////////
printf("GPU-->CPU Memory copy (dC1) - hipMemcpyAsync\n");
hipMemcpyAsync(hC1,dC1,(int)(N*N*r*r*sizeof(double)),hipMemcpyDeviceToHost,streams[id]);
cudaCheckError()
printf("GPU-->CPU Memory copy (dC2) - hipMemcpyAsync\n");
hipMemcpyAsync(hC2,dC2,(int)(N*N*r*inv_r*sizeof(double)),hipMemcpyDeviceToHost,streams[id]);
cudaCheckError()
printf("GPU-->CPU Memory copy (dC3) - hipMemcpyAsync\n");
hipMemcpyAsync(hC3,dC3,(int)(N*N*r*inv_r*sizeof(double)),hipMemcpyDeviceToHost,streams[id]);
cudaCheckError()
printf("GPU-->CPU Memory copy (dC4) - hipMemcpyAsync\n");
hipMemcpyAsync(hC4,dC4,(int)(N*N*inv_r*inv_r*sizeof(double)),hipMemcpyDeviceToHost,streams[id]);
cudaCheckError()
//Synchronize in order to process the results of every invocation
id=0;
hipSetDevice(id%ndev);
hipStreamSynchronize(streams[id]);
id=1;
hipSetDevice(id%ndev);
hipStreamSynchronize(streams[id]);
id=2;
hipSetDevice(id%ndev);
hipStreamSynchronize(streams[id]);
id=3;
hipSetDevice(id%ndev);
hipStreamSynchronize(streams[id]);
//create the final Matrix
for(i=0;i<(int)N*r;i++){
for(j=0;j<(int)N*r;j++){
hC[i*N+j] = hC1[i*(int)(N*r)+j];
//printf("hC[%d]:%f ",i*N+j,hC[i*N+j]);
}
//printf("\n");
}
//printf("\n");
for(i=0;i<(int)N*r;i++){
for(j=0;j<(int)(N*inv_r);j++){
hC[i*N+j+(int)(N*r)] = hC2[i*(int)(N*inv_r)+j];
//printf("hC[%d]:%f",i*N+j+(int)(N*r),hC[i*N+j+(int)(N*r)]);
}
//printf("\n");
}
//printf("\n");
for(i=0;i<(int)(N*inv_r);i++){
for(j=0;j<(int)(N*r);j++){
hC[(i+(int)(N*r))*N+j] = hC3[i*(int)(N*r)+j];
//printf("hC[%d]:%f",(i+(int)(N*r))*N+j,hC[(i+(int)(N*r))*N+j]);
}
//printf("\n");
}
//printf("\n");
for(i=0;i<(int)(N*inv_r);i++){
for(j=0;j<(int)(N*inv_r);j++){
hC[(i+(int)(N*r))*N+j+(int)(N*r)] = hC4[i*(int)(N*inv_r)+j];
// printf("hC[%d]:%f",(i+(int)(N*r))*N+j+(int)(N*r),hC[(i+(int)(N*r))*N+j+(int)(N*r)]);
}
// printf("\n");
}
// printf("\n");
/*
//Compare the GPU result with CPU computation(for validation)
printf("Check results...\n");
int k;
double res;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
res=0;
for(k=0;k<N;k++){
res+=hA[i*N+k]*hB[k*N+j];
}
//printf("%8.3f ",res);
if(res != hC[i*N+j]){
printf("NOT OK i:%d, j:%d\n",i,j);
printf("true value:%f - computed value:%f\n\n",res,hC[i*N+j]);
}
}
//printf("\n");
}
*/
printf("Free Host and Device Memory\n");
hipHostFree(hA);
hipHostFree(hB);
hipHostFree(hC);
hipHostFree(hA1);
hipHostFree(hA2);
hipHostFree(hB1);
hipHostFree(hB2);
hipHostFree(hC1);
hipHostFree(hC2);
hipHostFree(hC3);
hipHostFree(hC4);
id=0;
hipSetDevice(id%ndev);
hipFree(dA1);
cudaCheckError()
hipFree(dB1);
cudaCheckError()
hipFree(dC1);
cudaCheckError()
id=1;
hipSetDevice(id%ndev);
hipFree(dA1_2);
cudaCheckError()
hipFree(dB2);
cudaCheckError()
hipFree(dC2);
cudaCheckError()
id=2;
hipSetDevice(id%ndev);
hipFree(dA2);
cudaCheckError()
hipFree(dB1_2);
cudaCheckError()
hipFree(dC3);
cudaCheckError()
id=3;
hipSetDevice(id%ndev);
hipFree(dA2_2);
cudaCheckError()
hipFree(dB2_2);
cudaCheckError()
hipFree(dC4);
cudaCheckError()
hipStreamDestroy(streams[0]);
hipStreamDestroy(streams[1]);
hipStreamDestroy(streams[2]);
hipStreamDestroy(streams[3]);
return(0);
}
| 49f1d4410a930293d992ef24fd87c9a6c5960734.cu | /*********************
MIT License
Copyright (c) 2020 Matzoros Christos
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
***********************/
/*********************
The purpose of this code is to execute multi-GPU matrix multiplication with multiple kernel
invocations using streams. The program split the computation into 4 individual computations
as it is shown below. The proportion of the size of the block is variable.
------------------------------------------------
A * B = C
| A1 | | | | C1 | C2
-------- * | B1 | B2 | = -------
| A2 | | | | C3 | C4
A1 * B1 = C1
A1 * B2 = c2
A2 * B1 = C3
A2 * B2 = C4
These 4 computations may take place simultaneously on 4 different GPUs.
------------------------------------------------
***********************/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
// CUDA runtime
//#include <cuda_runtime.h>
//Error handling using functions of the CUDA runtime API
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
cudaDeviceReset(); \
exit(EXIT_FAILURE); \
} \
}
//This macro checks malloc() and cudaMalloc() return values
#define Check_Allocation_Return_Value(a){ \
if(a==NULL) { \
printf("Allocation Error\n"); \
cudaDeviceReset(); \
exit(EXIT_FAILURE); \
} \
}
//general kernel(not used)
__global__ void matrix_multiplication(double *A,double *B,double *C,int width){
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int step;
double prod_val = 0;
if((idy>=width)||((idx>=width))) return;
for(step=0;step<width;step++){
prod_val += A[idy*width+step] * B[step*width+idx];
}
C[idy*width+idx] = prod_val;
}
// Kernel for the computation of C1 portion
__global__ void kernelC1(double *A,double *B,double *C,int width, double r){
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int step;
double prod_val = 0;
if((idy>=(int)(width*r))||(idx>=(int)(width*r))) return;
for(step=0;step<width;step++){
prod_val += A[idy*width+step] * B[step*(int)(width*r)+idx];
}
C[idy*(int)(width*r)+idx] = prod_val;
}
// Kernel for the computation of C2 portion
__global__ void kernelC2(double *A,double *B,double *C,int width, double r){
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int step;
double prod_val = 0;
if((idy>=(int)(width*r))||(idx>=(int)(width*(1-r)))) return;
for(step=0;step<width;step++){
prod_val += A[idy*width+step] * B[step*(int)(width*(1-r))+idx];
}
C[idy*(int)(width*(1-r))+idx] = prod_val;
}
// Kernel for the computation of C3 portion
__global__ void kernelC3(double *A,double *B,double *C,int width, double r){
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int step;
double prod_val = 0;
if((idy>=(int)(width*(1-r)))||(idx>=(int)(width*r))) return;
for(step=0;step<width;step++){
prod_val += A[idy*width+step] * B[step*(int)(width*r)+idx];
}
C[idy*(int)(width*r)+idx] = prod_val;
}
// // Kernel for the computation of C4 portion
__global__ void kernelC4(double *A,double *B,double *C,int width, double r){
int idy = blockIdx.y*blockDim.y+threadIdx.y;
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int step;
double prod_val = 0;
if((idy>=(int)(width*(1-r)))||(idx>=(int)(width*(1-r)))) return;
for(step=0;step<width;step++){
prod_val += A[idy*width+step] * B[step*(int)(width*(1-r))+idx];
}
C[idy*(int)(width*(1-r))+idx] = prod_val;
}
int main(int argc,char *argv[]){
const int num_streams = 4;
cudaStream_t streams[num_streams];
int N =7000;
double *hA,*hB,*hC;
int id,j,i;
int ndev;
double r = 0.5;
double inv_r = (1-r);
double *hA1,*hA2,*hB1,*hB2,*hC1,*hC2,*hC3,*hC4;
double *dA1,*dA1_2,*dA2,*dA2_2,*dB1,*dB1_2,*dB2,*dB2_2;
double *dC1,*dC2,*dC3,*dC4;
printf("\nNumber of elements of the final matrix: %d\n",N * N);
printf("Block 1 width: %d\n",(int)(N*r));
printf("Block 2 width: %d\n",(int)(N*inv_r));
cudaGetDeviceCount(&ndev);
if(ndev==0){
printf("NO GPU DEVICES AVAILABLE\n\n");
exit(-1);
}else{
printf("Number of available GPUs: %d\n\n",ndev);
}
cudaMallocHost(&hA,N*N*sizeof(double));
Check_Allocation_Return_Value(hA)
cudaMallocHost(&hB,N*N*sizeof(double));
Check_Allocation_Return_Value(hB)
cudaMallocHost(&hC,N*N*sizeof(double));
Check_Allocation_Return_Value(hC)
memset(hC,0,N*N*sizeof(double));
srand (time(NULL));
for(i=0;i<N*N;i++){
hA[i] = rand()%10;
hB[i] = rand()%10;
}
//Grid and block size initialization
int grid_width = 1+N/32;
dim3 dimGrid(grid_width,grid_width,1);
dim3 dimBlock(32,32,1);
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// kernel 1
id=0;
cudaSetDevice((int)(id%ndev));
//cudaStreamCreate(&streams[id]);
cudaStreamCreateWithFlags(&streams[id],cudaStreamNonBlocking);
cudaMallocHost(&hA1,(int)(N*N*r*sizeof(double)));
Check_Allocation_Return_Value(hA1)
cudaMallocHost(&hB1,(int)(N*N*r*sizeof(double)));
Check_Allocation_Return_Value(hB1)
cudaMallocHost(&hC1,(int)(N*N*r*r*sizeof(double)));
Check_Allocation_Return_Value(hC1)
for(int i=0;i<(int)(N*r);i++){
for(int j=0;j<N;j++){
hA1[i*N+j] = hA[i*N+j];
}
}
for(int i=0;i<N;i++){
for(int j=0;j<(N*r);j++){
hB1[i*(int)(N*r)+j] = hB[i*N+j];
}
}
cudaMalloc((void**)&dA1,(int)(N*N*r*sizeof(double)));
cudaCheckError()
cudaMalloc((void**)&dB1,(int)(N*N*r*sizeof(double)));
cudaCheckError()
cudaMalloc((void**)&dC1,(int)(N*N*r*r*sizeof(double)));
cudaCheckError()
// kernel 2
id=1;
cudaSetDevice((int)(id%ndev));
//cudaStreamCreate(&streams[id]);
cudaStreamCreateWithFlags(&streams[id],cudaStreamNonBlocking);
cudaMallocHost(&hB2,(int)(N*N*inv_r*sizeof(double)));
Check_Allocation_Return_Value(hB2)
cudaMallocHost(&hC2,(int)(N*N*r*inv_r*sizeof(double)));
Check_Allocation_Return_Value(hC2)
for(int i=0;i<N;i++){
for(int j=0;j<(N*inv_r);j++){
hB2[i*(int)(N*inv_r)+j] = hB[i*N+(int)(N*r)+j];
}
}
cudaMalloc((void**)&dA1_2,(int)(N*N*r*sizeof(double)));
cudaCheckError()
cudaMalloc((void**)&dB2,(int)(N*N*inv_r*sizeof(double)));
cudaCheckError()
cudaMalloc((void**)&dC2,(int)(N*N*r*inv_r*sizeof(double)));
cudaCheckError()
// kernel 3
id=2;
cudaSetDevice(id%ndev);
//cudaStreamCreate(&streams[id]);
cudaStreamCreateWithFlags(&streams[id],cudaStreamNonBlocking);
cudaMallocHost(&hA2,(int)(N*N*inv_r*sizeof(double)));
Check_Allocation_Return_Value(hA2)
cudaMallocHost(&hC3,(int)(N*N*inv_r*r*sizeof(double)));
Check_Allocation_Return_Value(hC3)
for(int i=0;i<(int)(N*inv_r);i++){
for(int j=0;j<N;j++){
hA2[i*N+j] = hA[(i+(int)(N*r))*N+j];
}
}
cudaMalloc((void**)&dA2,(int)(N*N*inv_r*sizeof(double)));
cudaCheckError()
cudaMalloc((void**)&dB1_2,(int)(N*N*r*sizeof(double)));
cudaCheckError()
cudaMalloc((void**)&dC3,(int)(N*N*r*inv_r*sizeof(double)));
cudaCheckError()
// kernel 4
id=3;
cudaSetDevice(id%ndev);
//cudaStreamCreate(&streams[id]);
cudaStreamCreateWithFlags(&streams[id],cudaStreamNonBlocking);
cudaMallocHost(&hC4,(int)(N*N*inv_r*inv_r*sizeof(double)));
Check_Allocation_Return_Value(hC4)
cudaMalloc((void**)&dA2_2,(int)(N*N*inv_r*sizeof(double)));
cudaCheckError()
cudaMalloc((void**)&dB2_2,(int)(N*N*inv_r*sizeof(double)));
cudaCheckError()
cudaMalloc((void**)&dC4,(int)(N*N*inv_r*inv_r*sizeof(double)));
cudaCheckError()
//////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////
printf("CPU-->GPU Memory copy(A1,B1,C1) - cudaMemcpyAsync\n");
id=0;
cudaSetDevice(id%ndev);
cudaMemcpyAsync(dA1,hA1,(int)(N*N*r*sizeof(double)),cudaMemcpyHostToDevice,streams[id]);
cudaCheckError()
cudaMemcpyAsync(dB1,hB1,(int)(N*N*r*sizeof(double)),cudaMemcpyHostToDevice,streams[id]);
cudaCheckError()
printf("Kernel 1 Execution...\n");
kernelC1 <<< dimGrid,dimBlock,0,streams[id]>>>(dA1,dB1,dC1,N,r);
cudaCheckError()
///////////////////////////////////////////////////////////////////////////////
id=1;
cudaSetDevice(id%ndev);
printf("CPU-->GPU Memory copy(A1,B2,C2) - cudaMemcpyAsync\n");
cudaMemcpyAsync(dA1_2,hA1,(int)(N*N*r*sizeof(double)),cudaMemcpyHostToDevice,streams[id]);
cudaCheckError()
cudaMemcpyAsync(dB2,hB2,(int)(N*N*inv_r*sizeof(double)),cudaMemcpyHostToDevice,streams[id]);
cudaCheckError()
printf("Kernel 2 Execution...\n");
kernelC2 <<< dimGrid,dimBlock,0,streams[id]>>>(dA1_2,dB2,dC2,N,r);
cudaCheckError()
///////////////////////////////////////////////////////////////////////////////
id=2;
cudaSetDevice(id%ndev);
printf("CPU-->GPU Memory copy(A2,B1,C3) - cudaMemcpyAsync\n");
cudaMemcpyAsync(dA2,hA2,(int)(N*N*inv_r*sizeof(double)),cudaMemcpyHostToDevice,streams[id]);
cudaCheckError()
cudaMemcpyAsync(dB1_2,hB1,(int)(N*N*r*sizeof(double)),cudaMemcpyHostToDevice,streams[id]);
cudaCheckError()
printf("Kernel 3 Execution...\n");
kernelC3 <<< dimGrid,dimBlock,0,streams[id]>>>(dA2,dB1_2,dC3,N,r);
cudaCheckError()
///////////////////////////////////////////////////////////////////////////////
id=3;
cudaSetDevice(id%ndev);
printf("CPU-->GPU Memory copy(A2,B2,C4) - cudaMemcpyAsync\n");
cudaMemcpyAsync(dA2_2,hA2,(int)(N*N*inv_r*sizeof(double)),cudaMemcpyHostToDevice,streams[id]);
cudaCheckError()
cudaMemcpyAsync(dB2_2,hB2,(int)(N*N*inv_r*sizeof(double)),cudaMemcpyHostToDevice,streams[id]);
cudaCheckError()
printf("Kernel 4 Execution...\n");
kernelC4 <<< dimGrid,dimBlock,0,streams[id]>>>(dA2_2,dB2_2,dC4,N,r);
cudaCheckError()
///////////////////////////////////////////////////////////////////////////////
printf("GPU-->CPU Memory copy (dC1) - cudaMemcpyAsync\n");
cudaMemcpyAsync(hC1,dC1,(int)(N*N*r*r*sizeof(double)),cudaMemcpyDeviceToHost,streams[id]);
cudaCheckError()
printf("GPU-->CPU Memory copy (dC2) - cudaMemcpyAsync\n");
cudaMemcpyAsync(hC2,dC2,(int)(N*N*r*inv_r*sizeof(double)),cudaMemcpyDeviceToHost,streams[id]);
cudaCheckError()
printf("GPU-->CPU Memory copy (dC3) - cudaMemcpyAsync\n");
cudaMemcpyAsync(hC3,dC3,(int)(N*N*r*inv_r*sizeof(double)),cudaMemcpyDeviceToHost,streams[id]);
cudaCheckError()
printf("GPU-->CPU Memory copy (dC4) - cudaMemcpyAsync\n");
cudaMemcpyAsync(hC4,dC4,(int)(N*N*inv_r*inv_r*sizeof(double)),cudaMemcpyDeviceToHost,streams[id]);
cudaCheckError()
//Synchronize in order to process the results of every invocation
id=0;
cudaSetDevice(id%ndev);
cudaStreamSynchronize(streams[id]);
id=1;
cudaSetDevice(id%ndev);
cudaStreamSynchronize(streams[id]);
id=2;
cudaSetDevice(id%ndev);
cudaStreamSynchronize(streams[id]);
id=3;
cudaSetDevice(id%ndev);
cudaStreamSynchronize(streams[id]);
//create the final Matrix
for(i=0;i<(int)N*r;i++){
for(j=0;j<(int)N*r;j++){
hC[i*N+j] = hC1[i*(int)(N*r)+j];
//printf("hC[%d]:%f ",i*N+j,hC[i*N+j]);
}
//printf("\n");
}
//printf("\n");
for(i=0;i<(int)N*r;i++){
for(j=0;j<(int)(N*inv_r);j++){
hC[i*N+j+(int)(N*r)] = hC2[i*(int)(N*inv_r)+j];
//printf("hC[%d]:%f",i*N+j+(int)(N*r),hC[i*N+j+(int)(N*r)]);
}
//printf("\n");
}
//printf("\n");
for(i=0;i<(int)(N*inv_r);i++){
for(j=0;j<(int)(N*r);j++){
hC[(i+(int)(N*r))*N+j] = hC3[i*(int)(N*r)+j];
//printf("hC[%d]:%f",(i+(int)(N*r))*N+j,hC[(i+(int)(N*r))*N+j]);
}
//printf("\n");
}
//printf("\n");
for(i=0;i<(int)(N*inv_r);i++){
for(j=0;j<(int)(N*inv_r);j++){
hC[(i+(int)(N*r))*N+j+(int)(N*r)] = hC4[i*(int)(N*inv_r)+j];
// printf("hC[%d]:%f",(i+(int)(N*r))*N+j+(int)(N*r),hC[(i+(int)(N*r))*N+j+(int)(N*r)]);
}
// printf("\n");
}
// printf("\n");
/*
//Compare the GPU result with CPU computation(for validation)
printf("Check results...\n");
int k;
double res;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
res=0;
for(k=0;k<N;k++){
res+=hA[i*N+k]*hB[k*N+j];
}
//printf("%8.3f ",res);
if(res != hC[i*N+j]){
printf("NOT OK i:%d, j:%d\n",i,j);
printf("true value:%f - computed value:%f\n\n",res,hC[i*N+j]);
}
}
//printf("\n");
}
*/
printf("Free Host and Device Memory\n");
cudaFreeHost(hA);
cudaFreeHost(hB);
cudaFreeHost(hC);
cudaFreeHost(hA1);
cudaFreeHost(hA2);
cudaFreeHost(hB1);
cudaFreeHost(hB2);
cudaFreeHost(hC1);
cudaFreeHost(hC2);
cudaFreeHost(hC3);
cudaFreeHost(hC4);
id=0;
cudaSetDevice(id%ndev);
cudaFree(dA1);
cudaCheckError()
cudaFree(dB1);
cudaCheckError()
cudaFree(dC1);
cudaCheckError()
id=1;
cudaSetDevice(id%ndev);
cudaFree(dA1_2);
cudaCheckError()
cudaFree(dB2);
cudaCheckError()
cudaFree(dC2);
cudaCheckError()
id=2;
cudaSetDevice(id%ndev);
cudaFree(dA2);
cudaCheckError()
cudaFree(dB1_2);
cudaCheckError()
cudaFree(dC3);
cudaCheckError()
id=3;
cudaSetDevice(id%ndev);
cudaFree(dA2_2);
cudaCheckError()
cudaFree(dB2_2);
cudaCheckError()
cudaFree(dC4);
cudaCheckError()
cudaStreamDestroy(streams[0]);
cudaStreamDestroy(streams[1]);
cudaStreamDestroy(streams[2]);
cudaStreamDestroy(streams[3]);
return(0);
}
|
0e7d6baeb18895629ae3ea833aa220a220c760a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/cudnn_wrappers.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include <hipcub/hipcub.hpp>
namespace caffe2 {
namespace {
// Explicit fast paths for avg and max global pooling due to CuDNN global
// pooling performance bug which makes pooling extremely slow.
template <typename T>
__global__ void
global_avgpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T sum(0);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
sum += data[j * sz + k];
}
float totalsum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
out[j] = totalsum / sz;
}
__syncthreads();
}
}
template <typename T>
__global__ void
global_avgpool_backward_NCHW(const int NC, const int sz, const T* dx, T* out) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
out[i] = dx[i / sz] / sz;
}
}
template <typename T>
__global__ void
global_maxpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T max(-FLT_MAX);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
max = data[j * sz + k] > max ? data[j * sz + k] : max;
}
float totalmax = BlockReduce(temp_storage).Reduce(max, hipcub::Max());
if (threadIdx.x == 0) {
out[j] = totalmax;
}
__syncthreads();
}
}
template <typename T>
__global__ void global_maxpool_backward_NCHW(
const int NC,
const int sz,
const T* dx,
T* out,
const T* x,
const T* in) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
if (in[i] == x[i / sz]) {
out[i] = dx[i / sz];
} else {
out[i] = 0.0;
}
}
}
template <typename T>
void setTensorDescriptor(
const int size,
const StorageOrder order,
const int N,
const int C,
const int H,
const int W,
const int D,
cudnnTensorDescriptor_t& desc) {
if (size == 4) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
desc,
GetCudnnTensorFormat(order),
cudnnTypeWrapper<T>::type,
N,
C,
H,
W));
} else {
vector<int> dims = {N, C, H, W, D};
vector<int> strides;
order == NCHW
? strides.insert(strides.end(), {C * H * W * D, H * W * D, W * D, D, 1})
: strides.insert(
strides.end(), {H * W * D * C, 1, W * D * C, D * C, C});
CUDNN_ENFORCE(cudnnSetTensorNdDescriptor(
desc,
cudnnTypeWrapper<T>::type,
size > 3 ? size : 4,
dims.data(),
strides.data()));
}
}
} // namespace
class CuDNNPoolOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
OPERATOR_NEEDS_FEATURE(kernel_.size() >=2 && kernel_.size() <=3,
"Cudnn pooling only supports 4d and 5d tensor");
if (legacy_pad_ != LegacyPadding::CAFFE_LEGACY_POOLING) {
for (int i = 0; i < kernel_.size(); ++i) {
OPERATOR_NEEDS_FEATURE(
pads_[i] == pads_[kernel_.size() + i],
"The current padding scheme leads to unequal padding on the left "
"and right, which is not supported by cudnn.");
}
}
// Figure out the pooling descriptor.
if (operator_def.type().substr(0, 7) == "MaxPool") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (operator_def.type().substr(0, 11) == "AveragePool") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto* Y = Output(0);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(1);
W_out = Y->ndim() > 3 ? Y->dim32(2) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(2);
W_out = Y->ndim() > 3 ? Y->dim32(3) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
hipLaunchKernelGGL(( global_avgpool_kernel_NCHW<float>)
, dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
if (mode_ == CUDNN_POOLING_MAX) {
hipLaunchKernelGGL(( global_maxpool_kernel_NCHW<float>)
, dim3(::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
}
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y->ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
T* Ydata = Y->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingForward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
top_desc_,
Ydata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto* Y = Output(0);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
private:
};
class CuDNNPoolGradientOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type() == "MaxPoolGradient" ||
operator_def.type() == "MaxPool1DGradient" ||
operator_def.type() == "MaxPool2DGradient" ||
operator_def.type() == "MaxPool3DGradient") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (
operator_def.type() == "AveragePoolGradient" ||
operator_def.type() == "AveragePool1DGradient" ||
operator_def.type() == "AveragePool2DGradient" ||
operator_def.type() == "AveragePool3DGradient") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolGradientOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
dX->ResizeLike(X);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
H_out = Y.dim32(1);
W_out = Y.ndim() > 3 ? Y.dim32(2) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
H_out = Y.dim32(2);
W_out = Y.ndim() > 3 ? Y.dim32(3) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
hipLaunchKernelGGL(( global_avgpool_backward_NCHW<float>)
, dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>());
return true;
}
#if CUDNN_VERSION_MIN(6, 0, 0)
if (mode_ == CUDNN_POOLING_MAX ||
mode_ == CUDNN_POOLING_MAX_DETERMINISTIC) {
#else
if (mode_ == CUDNN_POOLING_MAX) {
#endif
hipLaunchKernelGGL(( global_maxpool_backward_NCHW<float>)
, dim3(CAFFE_GET_BLOCKS(dX->size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>(),
Y.data<float>(),
X.data<float>());
return true;
}
}
}
if (kernel_.size() == 1) {
ConvPoolOpBase<CUDAContext>::ComputePads({H});
} else if (kernel_.size() == 2) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W});
} else if (kernel_.size() == 3) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W, D});
} else {
CAFFE_THROW("Unsupported kernel size :", kernel_.size());
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y.ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
const T* Ydata = Y.template data<T>();
const T* dYdata = dY.template data<T>();
T* dXdata = dX->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingBackward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
top_desc_,
Ydata,
top_desc_,
dYdata,
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
bottom_desc_,
dXdata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
};
namespace {
REGISTER_CUDNN_OPERATOR(AveragePool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool3DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool3DGradient, CuDNNPoolGradientOp);
} // namespace
} // namespace caffe2
| 0e7d6baeb18895629ae3ea833aa220a220c760a4.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/cudnn_wrappers.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include <cub/cub.cuh>
namespace caffe2 {
namespace {
// Explicit fast paths for avg and max global pooling due to CuDNN global
// pooling performance bug which makes pooling extremely slow.
template <typename T>
__global__ void
global_avgpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T sum(0);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
sum += data[j * sz + k];
}
float totalsum = BlockReduce(temp_storage).Sum(sum);
if (threadIdx.x == 0) {
out[j] = totalsum / sz;
}
__syncthreads();
}
}
template <typename T>
__global__ void
global_avgpool_backward_NCHW(const int NC, const int sz, const T* dx, T* out) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
out[i] = dx[i / sz] / sz;
}
}
template <typename T>
__global__ void
global_maxpool_kernel_NCHW(const int NC, const int sz, const T* data, T* out) {
typedef cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int j = blockIdx.x; j < NC; j += gridDim.x) {
T max(-FLT_MAX);
for (int k = threadIdx.x; k < sz; k += blockDim.x) {
max = data[j * sz + k] > max ? data[j * sz + k] : max;
}
float totalmax = BlockReduce(temp_storage).Reduce(max, cub::Max());
if (threadIdx.x == 0) {
out[j] = totalmax;
}
__syncthreads();
}
}
template <typename T>
__global__ void global_maxpool_backward_NCHW(
const int NC,
const int sz,
const T* dx,
T* out,
const T* x,
const T* in) {
CUDA_1D_KERNEL_LOOP(i, NC * sz) {
if (in[i] == x[i / sz]) {
out[i] = dx[i / sz];
} else {
out[i] = 0.0;
}
}
}
template <typename T>
void setTensorDescriptor(
const int size,
const StorageOrder order,
const int N,
const int C,
const int H,
const int W,
const int D,
cudnnTensorDescriptor_t& desc) {
if (size == 4) {
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
desc,
GetCudnnTensorFormat(order),
cudnnTypeWrapper<T>::type,
N,
C,
H,
W));
} else {
vector<int> dims = {N, C, H, W, D};
vector<int> strides;
order == NCHW
? strides.insert(strides.end(), {C * H * W * D, H * W * D, W * D, D, 1})
: strides.insert(
strides.end(), {H * W * D * C, 1, W * D * C, D * C, C});
CUDNN_ENFORCE(cudnnSetTensorNdDescriptor(
desc,
cudnnTypeWrapper<T>::type,
size > 3 ? size : 4,
dims.data(),
strides.data()));
}
}
} // namespace
class CuDNNPoolOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
OPERATOR_NEEDS_FEATURE(kernel_.size() >=2 && kernel_.size() <=3,
"Cudnn pooling only supports 4d and 5d tensor");
if (legacy_pad_ != LegacyPadding::CAFFE_LEGACY_POOLING) {
for (int i = 0; i < kernel_.size(); ++i) {
OPERATOR_NEEDS_FEATURE(
pads_[i] == pads_[kernel_.size() + i],
"The current padding scheme leads to unequal padding on the left "
"and right, which is not supported by cudnn.");
}
}
// Figure out the pooling descriptor.
if (operator_def.type().substr(0, 7) == "MaxPool") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (operator_def.type().substr(0, 11) == "AveragePool") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto* Y = Output(0);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(1);
W_out = Y->ndim() > 3 ? Y->dim32(2) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
ConvPoolOpBase::SetOutputSize(X, Y, C);
H_out = Y->dim32(2);
W_out = Y->ndim() > 3 ? Y->dim32(3) : 1;
D_out = Y->ndim() > 4 ? Y->dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
global_avgpool_kernel_NCHW<float>
<<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
if (mode_ == CUDNN_POOLING_MAX) {
global_maxpool_kernel_NCHW<float>
<<<std::min(N * C, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C, H * W * D, X.data<float>(), Y->mutable_data<float>());
return true;
}
}
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y->ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
T* Ydata = Y->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingForward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
top_desc_,
Ydata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto* Y = Output(0);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
private:
};
class CuDNNPoolGradientOp : public ConvPoolOpBase<CUDAContext> {
public:
CuDNNPoolGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&bottom_desc_));
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&top_desc_));
CUDNN_ENFORCE(cudnnCreatePoolingDescriptor(&pooling_desc_));
// Figure out the pooling descriptor.
if (operator_def.type() == "MaxPoolGradient" ||
operator_def.type() == "MaxPool1DGradient" ||
operator_def.type() == "MaxPool2DGradient" ||
operator_def.type() == "MaxPool3DGradient") {
bool deterministic =
OperatorBase::GetSingleArgument<bool>("deterministic", false);
#if CUDNN_VERSION_MIN(6, 0, 0)
mode_ =
deterministic ? CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
#else
mode_ = CUDNN_POOLING_MAX;
#endif
} else if (
operator_def.type() == "AveragePoolGradient" ||
operator_def.type() == "AveragePool1DGradient" ||
operator_def.type() == "AveragePool2DGradient" ||
operator_def.type() == "AveragePool3DGradient") {
mode_ = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
LOG(FATAL) << "Unsupported pooling method: " << operator_def.type();
}
}
~CuDNNPoolGradientOp() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(bottom_desc_));
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(top_desc_));
CUDNN_ENFORCE(cudnnDestroyPoolingDescriptor(pooling_desc_));
}
template <typename T, typename M>
bool DoRunWithType() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
// cuDNN pooling support only 2 and 3 spatial dimensions.
CAFFE_ENFORCE(X.ndim() >= 4 && X.ndim() <= 5);
dX->ResizeLike(X);
int N = 0, C = 0, H = 0, W = 0, D = 0;
int H_out = 0, W_out = 0, D_out = 0;
switch (order_) {
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.ndim() > 3 ? X.dim32(2) : 1;
D = X.ndim() > 4 ? X.dim32(3) : 1;
C = X.dim32(X.ndim() - 1);
H_out = Y.dim32(1);
W_out = Y.ndim() > 3 ? Y.dim32(2) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(3) : 1;
break;
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.ndim() > 3 ? X.dim32(3) : 1;
D = X.ndim() > 4 ? X.dim32(4) : 1;
H_out = Y.dim32(2);
W_out = Y.ndim() > 3 ? Y.dim32(3) : 1;
D_out = Y.ndim() > 4 ? Y.dim32(4) : 1;
break;
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// Fast path for global pooling, as cudnn is slow. But only
// on float, because fp16 not supported for CUB.
if (std::is_same<T, float>::value) {
if (order_ == StorageOrder::NCHW && global_pooling_) {
if (mode_ == CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING) {
global_avgpool_backward_NCHW<float>
<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>());
return true;
}
#if CUDNN_VERSION_MIN(6, 0, 0)
if (mode_ == CUDNN_POOLING_MAX ||
mode_ == CUDNN_POOLING_MAX_DETERMINISTIC) {
#else
if (mode_ == CUDNN_POOLING_MAX) {
#endif
global_maxpool_backward_NCHW<float>
<<<CAFFE_GET_BLOCKS(dX->size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N * C,
H * W * D,
dY.data<float>(),
dX->mutable_data<float>(),
Y.data<float>(),
X.data<float>());
return true;
}
}
}
if (kernel_.size() == 1) {
ConvPoolOpBase<CUDAContext>::ComputePads({H});
} else if (kernel_.size() == 2) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W});
} else if (kernel_.size() == 3) {
ConvPoolOpBase<CUDAContext>::ComputePads({H, W, D});
} else {
CAFFE_THROW("Unsupported kernel size :", kernel_.size());
}
if (cudnn_input_dims_ != X.dims()) {
// Dimensions changed; we will need to re-initialize things.
VLOG(1) << "Changing the cudnn descriptor configurations.";
cudnn_input_dims_ = X.dims();
setTensorDescriptor<T>(X.ndim(), order_, N, C, H, W, D, bottom_desc_);
setTensorDescriptor<T>(
Y.ndim(), order_, N, C, H_out, W_out, D_out, top_desc_);
for (int i = 0; i < kernel_.size(); ++i) {
if (pads_[i] != pads_[kernel_.size() + i]) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::CAFFE_LEGACY_POOLING,
"Cudnn pooling only supports even padding on both sides, with "
"the only exception of the caffe legacy pooling case where we "
"try to preserve backward compatibility with Caffe.");
}
}
if (kernel_.size() == 2) {
CUDNN_ENFORCE(cudnnSetPooling2dDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_h(),
kernel_w(),
pad_t(),
pad_l(),
stride_h(),
stride_w()));
} else {
CUDNN_ENFORCE(cudnnSetPoolingNdDescriptor(
pooling_desc_,
mode_,
CUDNN_NOT_PROPAGATE_NAN,
kernel_.size(),
kernel_.data(),
pads_.data(),
stride_.data()));
}
}
// Carry out the pooling computation.
const T* Xdata = X.template data<T>();
const T* Ydata = Y.template data<T>();
const T* dYdata = dY.template data<T>();
T* dXdata = dX->template mutable_data<T>();
CUDNN_ENFORCE(cudnnPoolingBackward(
cudnn_wrapper_.inline_cudnn_handle(),
pooling_desc_,
cudnnTypeWrapper<T>::kOne(),
top_desc_,
Ydata,
top_desc_,
dYdata,
bottom_desc_,
Xdata,
cudnnTypeWrapper<T>::kZero(),
bottom_desc_,
dXdata));
return true;
}
bool RunOnDevice() final {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
auto* dX = Output(0);
dX->ResizeLike(X);
if (X.IsType<float>()) {
return DoRunWithType<float, float>();
} else if (X.IsType<float16>()) {
return DoRunWithType<float16, float>();
} else {
LOG(FATAL) << "Unsupported input types";
}
return true;
}
protected:
vector<TIndex> cudnn_input_dims_;
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t bottom_desc_;
cudnnTensorDescriptor_t top_desc_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnPoolingMode_t mode_;
};
namespace {
REGISTER_CUDNN_OPERATOR(AveragePool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(AveragePool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(AveragePool3DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPoolGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool1D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool1DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool2D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool2DGradient, CuDNNPoolGradientOp);
REGISTER_CUDNN_OPERATOR(MaxPool3D, CuDNNPoolOp);
REGISTER_CUDNN_OPERATOR(MaxPool3DGradient, CuDNNPoolGradientOp);
} // namespace
} // namespace caffe2
|
6212aef98ece50fbc6ee76c0713ab457dac5e0fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/perception/inference/tensorrt/plugins/kernels.h"
namespace apollo {
namespace perception {
namespace inference {
// Decode bbox.
// boxes dims: [num_box, 4], deltas dims: [N, num_box, C, 4],
// out_boxes dims: [N, num_box, C, 4]
// nthreads = N * num_box * C
__global__ void bbox_transform_inv_kernel(
const int nthreads, const float *boxes, const float *deltas,
const int num_box, const int num_channel, float *out_boxes) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= nthreads) {
return;
}
int box_id = (index / num_channel) % num_box;
float x_min = boxes[box_id * 4];
float y_min = boxes[box_id * 4 + 1];
float x_max = boxes[box_id * 4 + 2];
float y_max = boxes[box_id * 4 + 3];
float w = x_max - x_min + 1;
float h = y_max - y_min + 1;
float x_ctr = x_min + 0.5 * (w - 1);
float y_ctr = y_min + 0.5 * (h - 1);
float dx = deltas[index * 4];
float dy = deltas[index * 4 + 1];
float dw = deltas[index * 4 + 2];
float dh = deltas[index * 4 + 3];
float pred_x_ctr = dx * w + x_ctr;
float pred_y_ctr = dy * h + y_ctr;
float pred_w = ::exp(dw) * w;
float pred_h = ::exp(dh) * h;
out_boxes[index * 4] = pred_x_ctr - 0.5 * (pred_w - 1); // pred x_min
out_boxes[index * 4 + 1] = pred_y_ctr - 0.5 * (pred_h - 1); // pred y_min
out_boxes[index * 4 + 2] = pred_x_ctr + 0.5 * (pred_w - 1); // pred x_max
out_boxes[index * 4 + 3] = pred_y_ctr + 0.5 * (pred_h - 1); // pred y_max
}
// boxes dim: [N, num_box, 4], nthreads = N * num_box * 4
__global__ void clip_boxes_kernel(const int nthreads, float *boxes,
const float height, const float width) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= nthreads) {
return;
}
// refine x_min, x_max to be in [0, img_width)
if (index % 4 == 0 || index % 4 == 2) {
if (boxes[index] < 0) {
boxes[index] = 0;
} else if (boxes[index] > width - 1) {
boxes[index] = width - 1;
}
} else { // refine y_min, y_max to be in [0, img_height)
if (boxes[index] < 0) {
boxes[index] = 0;
} else if (boxes[index] > height - 1) {
boxes[index] = height - 1;
}
}
}
// boxes dims: [N, num_box, num_channel, 4],
// filtered_boxes dims: [N, num_box, 4]
// scores dims: [N, num_box, num_class], filtered_scores dims: [N, num_box]
// all_probs dims: [N, num_box, num_prob],
// filtered_all_probs dims: [N, num_box, num_prob]
// filtered_count dims: [N]
__global__ void filter_boxes_kernel(
const int nthreads, const float *boxes, const float *scores,
const float *all_probs, const int num_box, const int num_channel,
const int num_class, const int num_prob, const int filter_channel,
const int filter_class, const int min_size_mode, const float min_size_h,
const float min_size_w, const float threshold_score, float *filtered_boxes,
float *filtered_scores, float *filtered_all_probs, int *filtered_count) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= nthreads) {
return;
}
int batch_id = index / num_box;
if (scores[index * num_class + filter_class] > threshold_score) {
bool keep = true;
int box_id = index * num_channel + filter_channel;
float w = boxes[box_id * 4 + 2] - boxes[box_id * 4] + 1;
float h = boxes[box_id * 4 + 3] - boxes[box_id * 4 + 1] + 1;
if (min_size_mode == 0) {
// filter boxes with minimum size of height & width
if (h < min_size_h || w < min_size_w) {
keep = false;
}
} else if (min_size_mode == 1) {
// filter boxes with minimum size of height or width
if (h < min_size_h && w < min_size_w) {
keep = false;
}
}
if (keep) {
int counter = atomicAdd(&filtered_count[batch_id], 1);
for (int i = 0; i < 4; ++i) {
filtered_boxes[batch_id * num_box * 4 + counter * 4 + i] =
boxes[box_id * 4 + i];
}
filtered_scores[batch_id * num_box + counter] =
scores[index * num_class + filter_class];
if (all_probs != nullptr && filtered_all_probs != nullptr) {
for (int i = 0; i < num_prob; ++i) {
filtered_all_probs[batch_id * num_box * num_prob +
counter * num_prob + i] =
all_probs[index * num_prob + i];
}
}
}
}
}
// Gather boxes by indexes and keep top N boxes.
// boxes dims: [N, num_box, 4], scores dims: [N, num_box],
// all_probs dims: [N, num_box, num_prob]
// indexes dims: [N, num_box], count dims: [N]
// out_boxes dims: [N, topN, 4], out_scores dims: [N, topN]
// out_all_probs dims: [N, topN, num_prob]
// nthreads = N * max_num_box
__global__ void keep_topN_boxes_kernel(
const int nthreads, const float *boxes, const float *scores,
const float *all_probs, const int *indexes, const int *count,
const bool keep_score, const int num_box, const int num_prob,
const int topN, float *out_boxes, float *out_scores, float *out_all_probs) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= nthreads) {
return;
}
int batch_id = index / topN;
int box_id = index % topN;
if (box_id < count[batch_id]) {
int in_box_id = indexes[batch_id * num_box + box_id];
for (int i = 0; i < 4; ++i) {
out_boxes[index * 4 + i] =
boxes[batch_id * num_box * 4 + in_box_id * 4 + i];
}
if (keep_score) {
out_scores[index] = scores[batch_id * num_box + in_box_id];
for (int i = 0; i < num_prob; ++i) {
out_all_probs[index * num_prob + i] =
all_probs[batch_id * num_box * num_prob + in_box_id * num_prob + i];
}
}
}
}
__global__ void repeatedly_add_kernel(const int nthreads, const float *in_data,
float *out_data, const float *add_vec,
int add_vec_size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
out_data[index] = in_data[index] + add_vec[index % add_vec_size];
}
}
__global__ void repeatedly_mul_kernel(const int nthreads, const float *in_data,
float *out_data, const float *mul_vec,
int mul_vec_size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
out_data[index] = in_data[index] * mul_vec[index % mul_vec_size];
}
}
// input dims: [N, C], output dims: [N, C_sliced]
__global__ void slice2d_kernel(const int nthreads, const float *in_data,
float *out_data, const int *slice_axises,
int slice_axis_num, int input_axis_size) {
int out_index = threadIdx.x + blockIdx.x * blockDim.x;
if (out_index < nthreads) {
int id = out_index / slice_axis_num;
int slice_axis_id = out_index % slice_axis_num;
int in_index = slice_axises[slice_axis_id] + id * input_axis_size;
out_data[out_index] = in_data[in_index];
}
}
void bbox_transform_inv_cuda(int block_size, int thread_size, int shared_mem,
hipStream_t stream, const int nthreads,
const float *boxes, const float *deltas,
const int num_box, const int num_channel,
float *out_boxes) {
hipLaunchKernelGGL(( bbox_transform_inv_kernel), dim3(block_size), dim3(thread_size), shared_mem, stream,
nthreads, boxes, deltas, num_box, num_channel, out_boxes);
}
void clip_boxes_cuda(int block_size, int thread_size, int shared_mem,
hipStream_t stream, const int nthreads, float *boxes,
const float height, const float width) {
hipLaunchKernelGGL(( clip_boxes_kernel), dim3(block_size), dim3(thread_size), shared_mem, stream,
nthreads, boxes, height, width);
}
void filter_boxes_cuda(
int block_size, int thread_size, int shared_mem, hipStream_t stream,
const int nthreads, const float *boxes, const float *scores,
const float *all_probs, const int num_box, const int num_channel,
const int num_class, const int num_prob, const int filter_channel,
const int filter_class, const int min_size_mode, const float min_size_h,
const float min_size_w, const float threshold_score, float *filtered_boxes,
float *filtered_scores, float *filtered_all_probs, int *filtered_count) {
hipLaunchKernelGGL(( filter_boxes_kernel), dim3(block_size), dim3(thread_size), shared_mem, stream,
nthreads, boxes, scores, all_probs, num_box, num_channel, num_class,
num_prob, filter_channel, filter_class, min_size_mode, min_size_h,
min_size_w, threshold_score, filtered_boxes, filtered_scores,
filtered_all_probs, filtered_count);
}
void keep_topN_boxes_cuda(int block_size, int thread_size, int shared_mem,
hipStream_t stream, const int nthreads,
const float *boxes, const float *scores,
const float *all_probs, const int *indexes,
const int *count, const bool keep_score,
const int num_box, const int num_prob, const int topN,
float *out_boxes, float *out_scores,
float *out_all_probs) {
hipLaunchKernelGGL(( keep_topN_boxes_kernel), dim3(block_size), dim3(thread_size), shared_mem, stream,
nthreads, boxes, scores, all_probs, indexes, count, keep_score, num_box,
num_prob, topN, out_boxes, out_scores, out_all_probs);
}
void repeatedly_add_cuda(int block_size, int thread_size, int shared_mem,
hipStream_t stream, const int nthreads,
const float *in_data, float *out_data,
const float *add_vec, int add_vec_size) {
hipLaunchKernelGGL(( repeatedly_add_kernel), dim3(block_size), dim3(thread_size), shared_mem, stream,
nthreads, in_data, out_data, add_vec, add_vec_size);
}
void repeatedly_mul_cuda(int block_size, int thread_size, int shared_mem,
hipStream_t stream, const int nthreads,
const float *in_data, float *out_data,
const float *mul_vec, int mul_vec_size) {
hipLaunchKernelGGL(( repeatedly_mul_kernel), dim3(block_size), dim3(thread_size), shared_mem, stream,
nthreads, in_data, out_data, mul_vec, mul_vec_size);
}
void slice2d_cuda(int block_size, int thread_size, int shared_mem,
hipStream_t stream, const int nthreads, const float *in_data,
float *out_data, const int *slice_axises, int slice_axis_num,
int input_axis_size) {
hipLaunchKernelGGL(( slice2d_kernel), dim3(block_size), dim3(thread_size), shared_mem, stream,
nthreads, in_data, out_data, slice_axises, slice_axis_num,
input_axis_size);
}
} // namespace inference
} // namespace perception
} // namespace apollo
| 6212aef98ece50fbc6ee76c0713ab457dac5e0fb.cu | /******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/perception/inference/tensorrt/plugins/kernels.h"
namespace apollo {
namespace perception {
namespace inference {
// Decode bbox.
// boxes dims: [num_box, 4], deltas dims: [N, num_box, C, 4],
// out_boxes dims: [N, num_box, C, 4]
// nthreads = N * num_box * C
__global__ void bbox_transform_inv_kernel(
const int nthreads, const float *boxes, const float *deltas,
const int num_box, const int num_channel, float *out_boxes) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= nthreads) {
return;
}
int box_id = (index / num_channel) % num_box;
float x_min = boxes[box_id * 4];
float y_min = boxes[box_id * 4 + 1];
float x_max = boxes[box_id * 4 + 2];
float y_max = boxes[box_id * 4 + 3];
float w = x_max - x_min + 1;
float h = y_max - y_min + 1;
float x_ctr = x_min + 0.5 * (w - 1);
float y_ctr = y_min + 0.5 * (h - 1);
float dx = deltas[index * 4];
float dy = deltas[index * 4 + 1];
float dw = deltas[index * 4 + 2];
float dh = deltas[index * 4 + 3];
float pred_x_ctr = dx * w + x_ctr;
float pred_y_ctr = dy * h + y_ctr;
float pred_w = std::exp(dw) * w;
float pred_h = std::exp(dh) * h;
out_boxes[index * 4] = pred_x_ctr - 0.5 * (pred_w - 1); // pred x_min
out_boxes[index * 4 + 1] = pred_y_ctr - 0.5 * (pred_h - 1); // pred y_min
out_boxes[index * 4 + 2] = pred_x_ctr + 0.5 * (pred_w - 1); // pred x_max
out_boxes[index * 4 + 3] = pred_y_ctr + 0.5 * (pred_h - 1); // pred y_max
}
// boxes dim: [N, num_box, 4], nthreads = N * num_box * 4
__global__ void clip_boxes_kernel(const int nthreads, float *boxes,
const float height, const float width) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= nthreads) {
return;
}
// refine x_min, x_max to be in [0, img_width)
if (index % 4 == 0 || index % 4 == 2) {
if (boxes[index] < 0) {
boxes[index] = 0;
} else if (boxes[index] > width - 1) {
boxes[index] = width - 1;
}
} else { // refine y_min, y_max to be in [0, img_height)
if (boxes[index] < 0) {
boxes[index] = 0;
} else if (boxes[index] > height - 1) {
boxes[index] = height - 1;
}
}
}
// boxes dims: [N, num_box, num_channel, 4],
// filtered_boxes dims: [N, num_box, 4]
// scores dims: [N, num_box, num_class], filtered_scores dims: [N, num_box]
// all_probs dims: [N, num_box, num_prob],
// filtered_all_probs dims: [N, num_box, num_prob]
// filtered_count dims: [N]
__global__ void filter_boxes_kernel(
const int nthreads, const float *boxes, const float *scores,
const float *all_probs, const int num_box, const int num_channel,
const int num_class, const int num_prob, const int filter_channel,
const int filter_class, const int min_size_mode, const float min_size_h,
const float min_size_w, const float threshold_score, float *filtered_boxes,
float *filtered_scores, float *filtered_all_probs, int *filtered_count) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= nthreads) {
return;
}
int batch_id = index / num_box;
if (scores[index * num_class + filter_class] > threshold_score) {
bool keep = true;
int box_id = index * num_channel + filter_channel;
float w = boxes[box_id * 4 + 2] - boxes[box_id * 4] + 1;
float h = boxes[box_id * 4 + 3] - boxes[box_id * 4 + 1] + 1;
if (min_size_mode == 0) {
// filter boxes with minimum size of height & width
if (h < min_size_h || w < min_size_w) {
keep = false;
}
} else if (min_size_mode == 1) {
// filter boxes with minimum size of height or width
if (h < min_size_h && w < min_size_w) {
keep = false;
}
}
if (keep) {
int counter = atomicAdd(&filtered_count[batch_id], 1);
for (int i = 0; i < 4; ++i) {
filtered_boxes[batch_id * num_box * 4 + counter * 4 + i] =
boxes[box_id * 4 + i];
}
filtered_scores[batch_id * num_box + counter] =
scores[index * num_class + filter_class];
if (all_probs != nullptr && filtered_all_probs != nullptr) {
for (int i = 0; i < num_prob; ++i) {
filtered_all_probs[batch_id * num_box * num_prob +
counter * num_prob + i] =
all_probs[index * num_prob + i];
}
}
}
}
}
// Gather boxes by indexes and keep top N boxes.
// boxes dims: [N, num_box, 4], scores dims: [N, num_box],
// all_probs dims: [N, num_box, num_prob]
// indexes dims: [N, num_box], count dims: [N]
// out_boxes dims: [N, topN, 4], out_scores dims: [N, topN]
// out_all_probs dims: [N, topN, num_prob]
// nthreads = N * max_num_box
__global__ void keep_topN_boxes_kernel(
const int nthreads, const float *boxes, const float *scores,
const float *all_probs, const int *indexes, const int *count,
const bool keep_score, const int num_box, const int num_prob,
const int topN, float *out_boxes, float *out_scores, float *out_all_probs) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= nthreads) {
return;
}
int batch_id = index / topN;
int box_id = index % topN;
if (box_id < count[batch_id]) {
int in_box_id = indexes[batch_id * num_box + box_id];
for (int i = 0; i < 4; ++i) {
out_boxes[index * 4 + i] =
boxes[batch_id * num_box * 4 + in_box_id * 4 + i];
}
if (keep_score) {
out_scores[index] = scores[batch_id * num_box + in_box_id];
for (int i = 0; i < num_prob; ++i) {
out_all_probs[index * num_prob + i] =
all_probs[batch_id * num_box * num_prob + in_box_id * num_prob + i];
}
}
}
}
__global__ void repeatedly_add_kernel(const int nthreads, const float *in_data,
float *out_data, const float *add_vec,
int add_vec_size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
out_data[index] = in_data[index] + add_vec[index % add_vec_size];
}
}
__global__ void repeatedly_mul_kernel(const int nthreads, const float *in_data,
float *out_data, const float *mul_vec,
int mul_vec_size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < nthreads) {
out_data[index] = in_data[index] * mul_vec[index % mul_vec_size];
}
}
// input dims: [N, C], output dims: [N, C_sliced]
__global__ void slice2d_kernel(const int nthreads, const float *in_data,
float *out_data, const int *slice_axises,
int slice_axis_num, int input_axis_size) {
int out_index = threadIdx.x + blockIdx.x * blockDim.x;
if (out_index < nthreads) {
int id = out_index / slice_axis_num;
int slice_axis_id = out_index % slice_axis_num;
int in_index = slice_axises[slice_axis_id] + id * input_axis_size;
out_data[out_index] = in_data[in_index];
}
}
void bbox_transform_inv_cuda(int block_size, int thread_size, int shared_mem,
cudaStream_t stream, const int nthreads,
const float *boxes, const float *deltas,
const int num_box, const int num_channel,
float *out_boxes) {
bbox_transform_inv_kernel<<<block_size, thread_size, shared_mem, stream>>>(
nthreads, boxes, deltas, num_box, num_channel, out_boxes);
}
void clip_boxes_cuda(int block_size, int thread_size, int shared_mem,
cudaStream_t stream, const int nthreads, float *boxes,
const float height, const float width) {
clip_boxes_kernel<<<block_size, thread_size, shared_mem, stream>>>(
nthreads, boxes, height, width);
}
void filter_boxes_cuda(
int block_size, int thread_size, int shared_mem, cudaStream_t stream,
const int nthreads, const float *boxes, const float *scores,
const float *all_probs, const int num_box, const int num_channel,
const int num_class, const int num_prob, const int filter_channel,
const int filter_class, const int min_size_mode, const float min_size_h,
const float min_size_w, const float threshold_score, float *filtered_boxes,
float *filtered_scores, float *filtered_all_probs, int *filtered_count) {
filter_boxes_kernel<<<block_size, thread_size, shared_mem, stream>>>(
nthreads, boxes, scores, all_probs, num_box, num_channel, num_class,
num_prob, filter_channel, filter_class, min_size_mode, min_size_h,
min_size_w, threshold_score, filtered_boxes, filtered_scores,
filtered_all_probs, filtered_count);
}
void keep_topN_boxes_cuda(int block_size, int thread_size, int shared_mem,
cudaStream_t stream, const int nthreads,
const float *boxes, const float *scores,
const float *all_probs, const int *indexes,
const int *count, const bool keep_score,
const int num_box, const int num_prob, const int topN,
float *out_boxes, float *out_scores,
float *out_all_probs) {
keep_topN_boxes_kernel<<<block_size, thread_size, shared_mem, stream>>>(
nthreads, boxes, scores, all_probs, indexes, count, keep_score, num_box,
num_prob, topN, out_boxes, out_scores, out_all_probs);
}
void repeatedly_add_cuda(int block_size, int thread_size, int shared_mem,
cudaStream_t stream, const int nthreads,
const float *in_data, float *out_data,
const float *add_vec, int add_vec_size) {
repeatedly_add_kernel<<<block_size, thread_size, shared_mem, stream>>>(
nthreads, in_data, out_data, add_vec, add_vec_size);
}
void repeatedly_mul_cuda(int block_size, int thread_size, int shared_mem,
cudaStream_t stream, const int nthreads,
const float *in_data, float *out_data,
const float *mul_vec, int mul_vec_size) {
repeatedly_mul_kernel<<<block_size, thread_size, shared_mem, stream>>>(
nthreads, in_data, out_data, mul_vec, mul_vec_size);
}
void slice2d_cuda(int block_size, int thread_size, int shared_mem,
cudaStream_t stream, const int nthreads, const float *in_data,
float *out_data, const int *slice_axises, int slice_axis_num,
int input_axis_size) {
slice2d_kernel<<<block_size, thread_size, shared_mem, stream>>>(
nthreads, in_data, out_data, slice_axises, slice_axis_num,
input_axis_size);
}
} // namespace inference
} // namespace perception
} // namespace apollo
|
a2b02dbea4988bfebe55e7c43b96ab599509cd8e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdlib>
#include <cassert>
// CUDA runtime
// #include <hip/hip_runtime.h>
#define cimg_use_jpeg
#include "../cimg/CImg-2.8.3/CImg.h"
#include "../cpp/convolutionSeparable_gold.hpp"
#define RUNTIME_ENABLE_JIT
#include <anydsl_runtime.h>
// Generated from convolutionSeparable.impala
#include "convolutionSeparable.inc"
//timers
#include "../include/timer.h"
//arg parsing
#include "../include/cxxopts.hpp"
int main(int argc, char** argv) {
cxxopts::Options options("as", " - example command line options");
options.add_options()("f,filename","path to image to convolve",cxxopts::value<std::string>())
("o,outfile","path to save convolved image",cxxopts::value<std::string>())
("i,isize","size of the image to generate : isize x isize",cxxopts::value<int>())
("s,fsize","size of the filter to convolve with",cxxopts::value<int>())
("c,static","whether to use static filters or not : 0 for not, 1 is default",cxxopts::value<int>())
("t,test","assert correctness of the filter",cxxopts::value<int>());
auto result = options.parse(argc, argv);
int flag = 0;
int test = 0;
int KERNEL_LENGTH = 3;
int imageH = 0;
int imageW = 0;
if(result.count("fsize")){
KERNEL_LENGTH = result["fsize"].as<int>();
assert(KERNEL_LENGTH % 2 == 1);
}else{
std::cout << "filter size if required" << "\n";
return 0;
}
std::string img_path;
int KERNEL_RADIUS = (KERNEL_LENGTH - 1) / 2;
int image = 0;
if(result.count("filename")){
img_path = result["filename"].as<std::string>();
image = 1;
}else if(result.count("isize")){
imageH = imageW = result["isize"].as<int>();
}else{
std::cout << "Either input image or its size is required" << "\n";
return 0;
}
if(result.count("static")){
flag = result["static"].as<int>();
}
if(result.count("test")){
test = result["test"].as<int>();
}
int iterations = 1;
// cimg_library::CImg<float> img1("/home/alekseytyurinspb_gmail_com/specialization/spec/convolution/images/graytussaint100.jpg");
srand(200);
float* h_Input;
if(image){
cimg_library::CImg<float> img1(img_path.c_str());
imageW = img1.width();
imageH = img1.height();
h_Input = new float [imageH * imageW];
for (int i = 0; i < imageW * imageH; i++)
{
h_Input[i] = img1.data()[i];
}
}else{
long size = imageH * imageW;
h_Input = new float [size];
for (long i = 0; i < imageW * imageH; i++)
{
h_Input[i] = (float)(rand() % 16);
}
}
std::cout << "image size is " << imageW << "x" << imageH <<"\n";
float* h_Kernel = new float[KERNEL_LENGTH];
float* h_Output = new float[imageW * imageH];
float *d_Input,
*d_Buffer,
*d_Output;
size_t pitch;
hipMallocPitch((void**)&d_Input,&pitch,imageW * sizeof(float),imageH);
hipMallocPitch((void**)&d_Buffer,&pitch,imageW * sizeof(float),imageH);
hipMallocPitch((void**)&d_Output,&pitch,imageW * sizeof(float),imageH);
hipMemcpy2D(d_Input, pitch, h_Input, imageW*sizeof(float), imageW*sizeof(float), imageH, hipMemcpyHostToDevice);
for (unsigned int i = 0; i < KERNEL_LENGTH; i++) {
h_Kernel[i] = (float)(rand() % 16);
}
std::string kernel_string;
for (int i = 0; i < KERNEL_LENGTH - 1; i++) {
kernel_string += std::to_string(h_Kernel[i]);
kernel_string += "f32, ";
}
kernel_string += std::to_string(h_Kernel[KERNEL_LENGTH - 1]) + "f32";
int block_sizeX = 32;
int block_sizeY = 16;
int result_step = 8;
if(KERNEL_LENGTH <= 63 ){ //radius < 31
block_sizeX = 32;
block_sizeY = 16;
}else if(KERNEL_LENGTH <= 127){ //radius is 63
block_sizeX = 64;
block_sizeY = 8;
}else if (KERNEL_LENGTH <= 255){
block_sizeX = 128;
block_sizeY = 4;
}else{
std::cout << "Too huge kernel length, maximum supported is 255" << "\n";
return 0;
}
std::string dummy = "extern fn dummy(d_Src: &[f32],d_Buf : &mut[f32],d_Dst: &mut[f32])-> (){\n";
dummy += " convolveImpala(d_Src, d_Buf, d_Dst, [" +
kernel_string + "], " +
std::to_string((KERNEL_LENGTH - 1) / 2) + "i32, " +
std::to_string(imageH) + "i32, " +
std::to_string(imageW) + "i32, " +
std::to_string(pitch / sizeof(float)) + "i32, " +
std::to_string(block_sizeX) + "i32, " +
std::to_string(block_sizeY) + "i32, " +
std::to_string(result_step) + "i32)\n }";
std::string program = std::string((char*)convolutionSeparable_impala) + dummy;
std::cout << "Compiling ..." << "\n";
am::timer time;
time.start();
auto key = anydsl_compile(program.c_str(),program.size(),0);
time.stop();
std::cout << "compilation time " << time.milliseconds() << std::endl;
time.reset();
typedef void (*function) (const float*,const float* ,const float *);
auto call = reinterpret_cast<function>(anydsl_lookup_function(key,"dummy"));
if (call == nullptr) {
std::cout << "compilation failed\n";
return 0;
} else {
std::cout << "succesfully compiled\n";
}
for (int j = 0; j < iterations; j++){
call(d_Input,d_Buffer,d_Output);
}
hipDeviceSynchronize();
hipMemcpy2D(h_Output, imageW * sizeof(float), d_Output, pitch, imageW*sizeof(float), imageH, hipMemcpyDeviceToHost);
//gold
if(test){
float* h_OutputGold = new float[imageW * imageH];
float* h_BufferGold = new float[imageW * imageH];
convolutionRowCPU(h_BufferGold,h_Input,h_Kernel,imageW,imageH,(KERNEL_LENGTH - 1) /2);
convolutionColumnCPU(h_OutputGold,h_BufferGold,h_Kernel,imageW,imageH,(KERNEL_LENGTH - 1) /2);
for (long i = 0; i < imageH * imageW; i++) {
assert(h_OutputGold[i] == h_Output[i]);
}
delete[] (h_OutputGold);
delete[] (h_BufferGold);
}
// cimg_library::CImg<float> output(h_Output,img1.width(),img1.height(),1,1);
// cimg_library::CImg<float> convolved(h_OutputGold,img1.width(),img1.height(),1,1);
//Tests whether convolution is correct
// assert(convolved == output);
// output.save("impala-convolved.jpg");
// convolved.save("manually-convolved.jpg");
// std::cout << "pitch = " << pitch << "\n";
delete[] (h_Input);
delete[] (h_Kernel);
delete[] (h_Output);
hipFree(d_Input);
hipFree(d_Buffer);
hipFree(d_Output);
} | a2b02dbea4988bfebe55e7c43b96ab599509cd8e.cu | #include <iostream>
#include <cstdlib>
#include <cassert>
// CUDA runtime
// #include <cuda_runtime.h>
#define cimg_use_jpeg
#include "../cimg/CImg-2.8.3/CImg.h"
#include "../cpp/convolutionSeparable_gold.hpp"
#define RUNTIME_ENABLE_JIT
#include <anydsl_runtime.h>
// Generated from convolutionSeparable.impala
#include "convolutionSeparable.inc"
//timers
#include "../include/timer.h"
//arg parsing
#include "../include/cxxopts.hpp"
int main(int argc, char** argv) {
cxxopts::Options options("as", " - example command line options");
options.add_options()("f,filename","path to image to convolve",cxxopts::value<std::string>())
("o,outfile","path to save convolved image",cxxopts::value<std::string>())
("i,isize","size of the image to generate : isize x isize",cxxopts::value<int>())
("s,fsize","size of the filter to convolve with",cxxopts::value<int>())
("c,static","whether to use static filters or not : 0 for not, 1 is default",cxxopts::value<int>())
("t,test","assert correctness of the filter",cxxopts::value<int>());
auto result = options.parse(argc, argv);
int flag = 0;
int test = 0;
int KERNEL_LENGTH = 3;
int imageH = 0;
int imageW = 0;
if(result.count("fsize")){
KERNEL_LENGTH = result["fsize"].as<int>();
assert(KERNEL_LENGTH % 2 == 1);
}else{
std::cout << "filter size if required" << "\n";
return 0;
}
std::string img_path;
int KERNEL_RADIUS = (KERNEL_LENGTH - 1) / 2;
int image = 0;
if(result.count("filename")){
img_path = result["filename"].as<std::string>();
image = 1;
}else if(result.count("isize")){
imageH = imageW = result["isize"].as<int>();
}else{
std::cout << "Either input image or its size is required" << "\n";
return 0;
}
if(result.count("static")){
flag = result["static"].as<int>();
}
if(result.count("test")){
test = result["test"].as<int>();
}
int iterations = 1;
// cimg_library::CImg<float> img1("/home/alekseytyurinspb_gmail_com/specialization/spec/convolution/images/graytussaint100.jpg");
srand(200);
float* h_Input;
if(image){
cimg_library::CImg<float> img1(img_path.c_str());
imageW = img1.width();
imageH = img1.height();
h_Input = new float [imageH * imageW];
for (int i = 0; i < imageW * imageH; i++)
{
h_Input[i] = img1.data()[i];
}
}else{
long size = imageH * imageW;
h_Input = new float [size];
for (long i = 0; i < imageW * imageH; i++)
{
h_Input[i] = (float)(rand() % 16);
}
}
std::cout << "image size is " << imageW << "x" << imageH <<"\n";
float* h_Kernel = new float[KERNEL_LENGTH];
float* h_Output = new float[imageW * imageH];
float *d_Input,
*d_Buffer,
*d_Output;
size_t pitch;
cudaMallocPitch((void**)&d_Input,&pitch,imageW * sizeof(float),imageH);
cudaMallocPitch((void**)&d_Buffer,&pitch,imageW * sizeof(float),imageH);
cudaMallocPitch((void**)&d_Output,&pitch,imageW * sizeof(float),imageH);
cudaMemcpy2D(d_Input, pitch, h_Input, imageW*sizeof(float), imageW*sizeof(float), imageH, cudaMemcpyHostToDevice);
for (unsigned int i = 0; i < KERNEL_LENGTH; i++) {
h_Kernel[i] = (float)(rand() % 16);
}
std::string kernel_string;
for (int i = 0; i < KERNEL_LENGTH - 1; i++) {
kernel_string += std::to_string(h_Kernel[i]);
kernel_string += "f32, ";
}
kernel_string += std::to_string(h_Kernel[KERNEL_LENGTH - 1]) + "f32";
int block_sizeX = 32;
int block_sizeY = 16;
int result_step = 8;
if(KERNEL_LENGTH <= 63 ){ //radius < 31
block_sizeX = 32;
block_sizeY = 16;
}else if(KERNEL_LENGTH <= 127){ //radius is 63
block_sizeX = 64;
block_sizeY = 8;
}else if (KERNEL_LENGTH <= 255){
block_sizeX = 128;
block_sizeY = 4;
}else{
std::cout << "Too huge kernel length, maximum supported is 255" << "\n";
return 0;
}
std::string dummy = "extern fn dummy(d_Src: &[f32],d_Buf : &mut[f32],d_Dst: &mut[f32])-> (){\n";
dummy += " convolveImpala(d_Src, d_Buf, d_Dst, [" +
kernel_string + "], " +
std::to_string((KERNEL_LENGTH - 1) / 2) + "i32, " +
std::to_string(imageH) + "i32, " +
std::to_string(imageW) + "i32, " +
std::to_string(pitch / sizeof(float)) + "i32, " +
std::to_string(block_sizeX) + "i32, " +
std::to_string(block_sizeY) + "i32, " +
std::to_string(result_step) + "i32)\n }";
std::string program = std::string((char*)convolutionSeparable_impala) + dummy;
std::cout << "Compiling ..." << "\n";
am::timer time;
time.start();
auto key = anydsl_compile(program.c_str(),program.size(),0);
time.stop();
std::cout << "compilation time " << time.milliseconds() << std::endl;
time.reset();
typedef void (*function) (const float*,const float* ,const float *);
auto call = reinterpret_cast<function>(anydsl_lookup_function(key,"dummy"));
if (call == nullptr) {
std::cout << "compilation failed\n";
return 0;
} else {
std::cout << "succesfully compiled\n";
}
for (int j = 0; j < iterations; j++){
call(d_Input,d_Buffer,d_Output);
}
cudaDeviceSynchronize();
cudaMemcpy2D(h_Output, imageW * sizeof(float), d_Output, pitch, imageW*sizeof(float), imageH, cudaMemcpyDeviceToHost);
//gold
if(test){
float* h_OutputGold = new float[imageW * imageH];
float* h_BufferGold = new float[imageW * imageH];
convolutionRowCPU(h_BufferGold,h_Input,h_Kernel,imageW,imageH,(KERNEL_LENGTH - 1) /2);
convolutionColumnCPU(h_OutputGold,h_BufferGold,h_Kernel,imageW,imageH,(KERNEL_LENGTH - 1) /2);
for (long i = 0; i < imageH * imageW; i++) {
assert(h_OutputGold[i] == h_Output[i]);
}
delete[] (h_OutputGold);
delete[] (h_BufferGold);
}
// cimg_library::CImg<float> output(h_Output,img1.width(),img1.height(),1,1);
// cimg_library::CImg<float> convolved(h_OutputGold,img1.width(),img1.height(),1,1);
//Tests whether convolution is correct
// assert(convolved == output);
// output.save("impala-convolved.jpg");
// convolved.save("manually-convolved.jpg");
// std::cout << "pitch = " << pitch << "\n";
delete[] (h_Input);
delete[] (h_Kernel);
delete[] (h_Output);
cudaFree(d_Input);
cudaFree(d_Buffer);
cudaFree(d_Output);
} |
27e1019b529f5fdabfcd85fac1915704174da9b4.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by Lee James O'Riordan on 12/6/16.
//
#include "nDcuFFT.h"
#include <stdio.h>
#include <cstdlib>
#include <hipfftXt.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <cmath>
#include <vector>
//#####################################################################
//#####################################################################
// These are adapted from macros in the CUDA samples and are exempt
// from my licensing choices.
#define ERR_CHECK(err_val) { \
hipError_t err = err_val; \
if (err != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(err), __LINE__, __FILE__); \
exit(1); \
} \
}
#define FFT_ERR_CHECK(err_val) { \
hipfftResult err = err_val; \
if (err != HIPFFT_SUCCESS) { \
fprintf(stderr, "Error %d at line %d in file %s\n", \
err, __LINE__, __FILE__); \
exit(1); \
} \
}
//#####################################################################
//#####################################################################
void ftParamsInit(int numDims, int* dimSize, FTParams *params){
int i = 0; //Loop variable, to be used in two separate loops
int totalElements = 1;
int *prodVals = (int*) malloc((2*numDims)*sizeof(int)); //2N-1 pairings + 1 for unity at index [0]
prodVals[0] = 1; //Begin by declaring [0] as unity, to determine higher indices based on previous
params->numDims = numDims;
//Determine maximum number of elements in dataset
for (i = 0; i < numDims; ++i)
totalElements *= dimSize[i];
//Calculate the products of adjacent data sizes
for (i = 1; i <= (2*numDims-1); ++i) {
if( i <= numDims ){ prodVals[i] = prodVals[i-1] * dimSize[i-1]; }
else { prodVals[i] = ::ceil( prodVals[numDims] / prodVals[i - numDims] ); }
}
/* Populate the parameter struct array with the appropriate values per dimension.
* I should write a blog post on how this works.
* Here's hoping that I remember to do so. Falls out of above though. */
params->dims = dimSize;
params->numElem = totalElements;
for( i = 0; i < numDims; ++i ){
params->numTransforms[i] = (i == 0) ? totalElements/dimSize[i] : prodVals[i];
params->numLoops[i] = (i == 0) ? prodVals[0] : prodVals[(numDims+1 + i)%(2*numDims)]; // Start/End no trans
params->stride[i] = prodVals[i];
params->dist[i] = (i == 0) ? prodVals[1] : prodVals[0]; //element 1 for first entry, element 0 otherwise
params->offset[i] = ( (i == numDims-1) || (i == 0) ) ? 0 : prodVals[i+1]; //0 for first and last entries
std::cout << "PARAMS[" << i << "]\n";
std::cout << params->numTransforms[i]<< "\t"
<< params->numLoops[i] << "\t"
<< params->stride[i] << "\t"
<< params->dist[i] << "\t"
<< params->offset[i] << "\t"
<< params->dims[i] << "\n";
}
}
/*
* Here we do the magical transformation
*/
void fft_HDH(FTParams *params, int tDim, double2 *dataIn, double2 *dataOut){
hipfftHandle plan;
double2 *data_D;
ERR_CHECK(
hipMalloc((void**) &data_D, sizeof(double2) * params->numElem) );
ERR_CHECK(
hipMemcpy(data_D, dataIn, sizeof(double2) * params->numElem, hipMemcpyHostToDevice) );
FFT_ERR_CHECK(
hipfftPlanMany(&plan, 1,
params->dims, params->dims, params->stride[ tDim ],
params->dist[ tDim ], params->dims, params->stride[ tDim ],
params->dist[ tDim ], HIPFFT_Z2Z, params->numTransforms[ tDim ]
)
);
for ( int i = 0; i < params->numTransforms[ tDim ]; ++i ){
FFT_ERR_CHECK(
hipfftExecZ2Z( plan,
&data_D[ i * params->offset[tDim] ],
&data_D[ i * params->offset[tDim] ],
HIPFFT_FORWARD
)
);
}
ERR_CHECK( hipMemcpy(dataOut, data_D, sizeof(double2) * params->numElem, hipMemcpyDeviceToHost) );
hipFree(data_D);
}
int main(){
int numDims = 2;
int dimSize[] = {9,9,3};
FTParams params;
params.numTransforms = (int*) malloc(sizeof(int)*numDims);
params.numLoops = (int*) malloc(sizeof(int)*numDims);
params.stride = (int*) malloc(sizeof(int)*numDims);
params.dist = (int*) malloc(sizeof(int)*numDims);
params.offset = (int*) malloc(sizeof(int)*numDims);
ftParamsInit(numDims,dimSize, ¶ms);
double2 *dataIn, *dataOut;
dataIn = (double2*) malloc(sizeof(double2) * params.numElem);
dataOut = (double2*) malloc(sizeof(double2) * params.numElem);
// ******************************************************************************** //
// Create the input data
// ******************************************************************************** //
std::cout << "INPUT:\n";
for( int ii=0; ii < params.dims[0]; ++ii ){
std::cout << "C(:,:," << ii+1 << ")=[";
for( int jj=0; jj < params.dims[1]; ++jj ){
for( int kk=0; kk < params.dims[2]; ++kk ){
dataIn[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].x = (double) ii;
dataIn[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].y = (double) jj;
std::cout << dataIn[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].x << " + 1i*" << dataIn[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].y << "\t";
}
std::cout << "\n";
}
std::cout << "]\n";
}
std::cout << "\n --- \n";
//ERR_CHECK( hipMalloc( (double2**) &dataD, sizeof(double2) * params.numElem) );
//ERR_CHECK( hipMemcpy(dataD, dataIn, sizeof(double2) * params.numElem, hipMemcpyHostToDevice) );
//ERR_CHECK( hipMemcpy(dataOut, dataD, sizeof(double2) * params.numElem, hipMemcpyDeviceToHost) );
// ******************************************************************************** //
// Perform FFT
// ******************************************************************************** //
fft_HDH(¶ms, 1, dataIn, dataOut);
// ******************************************************************************** //
// Show FFT results
// ******************************************************************************** //
std::cout << "OUTPUT:\n";
for( int ii=0; ii < params.dims[0]; ++ii ){
std::cout << "C(:,:," << ii+1 << ")=[";
for( int jj=0; jj < params.dims[1]; ++jj ){
for( int kk=0; kk < params.dims[2]; ++kk ){
dataOut[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].x = (double) ii;
dataOut[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].y = (double) jj;
std::cout << dataOut[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].x << " + 1i*" << dataOut[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].y << "\t";
}
std::cout << "\n";
}
std::cout << "]\n";
}
std::cout << "\n --- \n";
free (dataIn); free (dataOut);
}
| 27e1019b529f5fdabfcd85fac1915704174da9b4.cu | //
// Created by Lee James O'Riordan on 12/6/16.
//
#include "nDcuFFT.h"
#include <stdio.h>
#include <cstdlib>
#include <cufftXt.h>
#include <cuda.h>
#include <iostream>
#include <cmath>
#include <vector>
//#####################################################################
//#####################################################################
// These are adapted from macros in the CUDA samples and are exempt
// from my licensing choices.
#define ERR_CHECK(err_val) { \
cudaError_t err = err_val; \
if (err != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(err), __LINE__, __FILE__); \
exit(1); \
} \
}
#define FFT_ERR_CHECK(err_val) { \
cufftResult err = err_val; \
if (err != CUFFT_SUCCESS) { \
fprintf(stderr, "Error %d at line %d in file %s\n", \
err, __LINE__, __FILE__); \
exit(1); \
} \
}
//#####################################################################
//#####################################################################
void ftParamsInit(int numDims, int* dimSize, FTParams *params){
int i = 0; //Loop variable, to be used in two separate loops
int totalElements = 1;
int *prodVals = (int*) malloc((2*numDims)*sizeof(int)); //2N-1 pairings + 1 for unity at index [0]
prodVals[0] = 1; //Begin by declaring [0] as unity, to determine higher indices based on previous
params->numDims = numDims;
//Determine maximum number of elements in dataset
for (i = 0; i < numDims; ++i)
totalElements *= dimSize[i];
//Calculate the products of adjacent data sizes
for (i = 1; i <= (2*numDims-1); ++i) {
if( i <= numDims ){ prodVals[i] = prodVals[i-1] * dimSize[i-1]; }
else { prodVals[i] = std::ceil( prodVals[numDims] / prodVals[i - numDims] ); }
}
/* Populate the parameter struct array with the appropriate values per dimension.
* I should write a blog post on how this works.
* Here's hoping that I remember to do so. Falls out of above though. */
params->dims = dimSize;
params->numElem = totalElements;
for( i = 0; i < numDims; ++i ){
params->numTransforms[i] = (i == 0) ? totalElements/dimSize[i] : prodVals[i];
params->numLoops[i] = (i == 0) ? prodVals[0] : prodVals[(numDims+1 + i)%(2*numDims)]; // Start/End no trans
params->stride[i] = prodVals[i];
params->dist[i] = (i == 0) ? prodVals[1] : prodVals[0]; //element 1 for first entry, element 0 otherwise
params->offset[i] = ( (i == numDims-1) || (i == 0) ) ? 0 : prodVals[i+1]; //0 for first and last entries
std::cout << "PARAMS[" << i << "]\n";
std::cout << params->numTransforms[i]<< "\t"
<< params->numLoops[i] << "\t"
<< params->stride[i] << "\t"
<< params->dist[i] << "\t"
<< params->offset[i] << "\t"
<< params->dims[i] << "\n";
}
}
/*
* Here we do the magical transformation
*/
void fft_HDH(FTParams *params, int tDim, double2 *dataIn, double2 *dataOut){
cufftHandle plan;
double2 *data_D;
ERR_CHECK(
cudaMalloc((void**) &data_D, sizeof(double2) * params->numElem) );
ERR_CHECK(
cudaMemcpy(data_D, dataIn, sizeof(double2) * params->numElem, cudaMemcpyHostToDevice) );
FFT_ERR_CHECK(
cufftPlanMany(&plan, 1,
params->dims, params->dims, params->stride[ tDim ],
params->dist[ tDim ], params->dims, params->stride[ tDim ],
params->dist[ tDim ], CUFFT_Z2Z, params->numTransforms[ tDim ]
)
);
for ( int i = 0; i < params->numTransforms[ tDim ]; ++i ){
FFT_ERR_CHECK(
cufftExecZ2Z( plan,
&data_D[ i * params->offset[tDim] ],
&data_D[ i * params->offset[tDim] ],
CUFFT_FORWARD
)
);
}
ERR_CHECK( cudaMemcpy(dataOut, data_D, sizeof(double2) * params->numElem, cudaMemcpyDeviceToHost) );
cudaFree(data_D);
}
int main(){
int numDims = 2;
int dimSize[] = {9,9,3};
FTParams params;
params.numTransforms = (int*) malloc(sizeof(int)*numDims);
params.numLoops = (int*) malloc(sizeof(int)*numDims);
params.stride = (int*) malloc(sizeof(int)*numDims);
params.dist = (int*) malloc(sizeof(int)*numDims);
params.offset = (int*) malloc(sizeof(int)*numDims);
ftParamsInit(numDims,dimSize, ¶ms);
double2 *dataIn, *dataOut;
dataIn = (double2*) malloc(sizeof(double2) * params.numElem);
dataOut = (double2*) malloc(sizeof(double2) * params.numElem);
// ******************************************************************************** //
// Create the input data
// ******************************************************************************** //
std::cout << "INPUT:\n";
for( int ii=0; ii < params.dims[0]; ++ii ){
std::cout << "C(:,:," << ii+1 << ")=[";
for( int jj=0; jj < params.dims[1]; ++jj ){
for( int kk=0; kk < params.dims[2]; ++kk ){
dataIn[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].x = (double) ii;
dataIn[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].y = (double) jj;
std::cout << dataIn[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].x << " + 1i*" << dataIn[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].y << "\t";
}
std::cout << "\n";
}
std::cout << "]\n";
}
std::cout << "\n --- \n";
//ERR_CHECK( cudaMalloc( (double2**) &dataD, sizeof(double2) * params.numElem) );
//ERR_CHECK( cudaMemcpy(dataD, dataIn, sizeof(double2) * params.numElem, cudaMemcpyHostToDevice) );
//ERR_CHECK( cudaMemcpy(dataOut, dataD, sizeof(double2) * params.numElem, cudaMemcpyDeviceToHost) );
// ******************************************************************************** //
// Perform FFT
// ******************************************************************************** //
fft_HDH(¶ms, 1, dataIn, dataOut);
// ******************************************************************************** //
// Show FFT results
// ******************************************************************************** //
std::cout << "OUTPUT:\n";
for( int ii=0; ii < params.dims[0]; ++ii ){
std::cout << "C(:,:," << ii+1 << ")=[";
for( int jj=0; jj < params.dims[1]; ++jj ){
for( int kk=0; kk < params.dims[2]; ++kk ){
dataOut[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].x = (double) ii;
dataOut[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].y = (double) jj;
std::cout << dataOut[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].x << " + 1i*" << dataOut[ kk + params.dims[1] * ( jj + ii * params.dims[0] ) ].y << "\t";
}
std::cout << "\n";
}
std::cout << "]\n";
}
std::cout << "\n --- \n";
free (dataIn); free (dataOut);
}
|
866357adbfa08f78a10b2fbab0eb6dc97b617ec6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_fp16.h"
#include <cstdint>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <stdio.h>
using namespace std;
#include <sys/time.h>
#include <unistd.h>
#include <stdlib.h>
#include <cstdlib>
union FP32
{
unsigned int i;
float f;
};
union FP16
{
unsigned short int i;
__half f;
};
void InitOne(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 1.0;
}
}
void InitZero(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void InitZero_float(float* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void show(float * a, const int n) {
std::cout << std::endl;
for ( int i=0; i<n; i++){
std::cout<<a[i] << std::endl;
}
std::cout << std::endl;
}
__global__ void wgmma_test1(float *gm_cd, __half *a_desc, __half *b_desc) {
float d_array[4];
for (int i = 0; i < 4; ++i) {
d_array[i] = gm_cd[i];
}
printf("hello");
asm volatile("{\n\t"
".reg .b64 a_d, b_d;\n\t"
"ld.param.u64 %rd1, [_Z11wgmma_test1PfP6__halfS1__param_1];\n\t"
"ld.param.u64 %rd2, [_Z11wgmma_test1PfP6__halfS1__param_2];\n\t"
"ld.param.u64 %rd3, [_Z11wgmma_test1PfP6__halfS1__param_0];\n\t"
"wgmma.mma_async.sync.aligned.m64n8k16.f32.f16.f16\n\t"
"{%0, %1, %2, %3}, %4, %5,1,1,1,0,0;\n\t"
"}\n\t"
: "+f"(d_array[0]), "+f"(d_array[1]), "+f"(d_array[2]),
"+f"(d_array[3])
: "l"(a_desc), "l"(b_desc)
:);
for (int i = 0; i < 4; ++i) {
gm_cd[i] = d_array[i];
}
}
int main(int argc, char** argv){
int size = 256;
__half* host_a=(__half*)malloc(sizeof(__half) * size);
__half* host_b=(__half*)malloc(sizeof(__half) * size);
//float* host_c=(float*)malloc(sizeof(float) * size);
float* host_d=(float*)malloc(sizeof(float) * size);
__half* device_a=NULL;
__half* device_b=NULL;
//float* device_c=NULL;
float* device_d=NULL;
hipMalloc((void**)(&device_a), sizeof(__half) * size);
hipMalloc((void**)(&device_b), sizeof(__half) * size);
//hipMalloc((void**)(&device_c), sizeof(float) * size);
hipMalloc((void**)(&device_d), sizeof(float) * size);
InitZero(host_a, size);
InitOne(host_b, size);
//InitZero_float(host_c, size);
InitZero_float(host_d, size);
FP16 fp16;
fp16.i = 0x7000; host_a[0]=fp16.f;
fp16.i = 0x0c00; host_a[1]=fp16.f;
fp16.i = 0xffff; host_a[2]=fp16.f;
fp16.i = 0xffff; host_a[3]=fp16.f;
fp16.i = 0xffff; host_a[4]=fp16.f;
fp16.i = 0xffff; host_a[5]=fp16.f;
fp16.i = 0xffff; host_a[6]=fp16.f;
fp16.i = 0xffff; host_a[7]=fp16.f;
hipMemcpy((void*)device_a, (void*)host_a, sizeof(__half)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_b, (void*)host_b, sizeof(__half)* size, hipMemcpyHostToDevice);
//hipMemcpy((void*)device_c, (void*)host_c, sizeof(float)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_d, (void*)host_d, sizeof(float)* size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( wgmma_test1), dim3(1),dim3(32), 0, 0, device_d, device_a, device_b);
hipDeviceSynchronize();
hipMemcpy((void*)host_d, (void*)device_d, sizeof(float) * size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
FP32 fp32;
fp32.f=host_d[0];
//std::cout<< host_d[0] << std::endl;
std::cout<< hex << fp32.i << std::endl;
//show(host_d, size);
}
| 866357adbfa08f78a10b2fbab0eb6dc97b617ec6.cu | #include "cuda_fp16.h"
#include <cstdint>
#include <iostream>
#include <cuda.h>
#include <cuda_fp16.h>
#include <stdio.h>
using namespace std;
#include <sys/time.h>
#include <unistd.h>
#include <stdlib.h>
#include <cstdlib>
union FP32
{
unsigned int i;
float f;
};
union FP16
{
unsigned short int i;
__half f;
};
void InitOne(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 1.0;
}
}
void InitZero(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void InitZero_float(float* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void show(float * a, const int n) {
std::cout << std::endl;
for ( int i=0; i<n; i++){
std::cout<<a[i] << std::endl;
}
std::cout << std::endl;
}
__global__ void wgmma_test1(float *gm_cd, __half *a_desc, __half *b_desc) {
float d_array[4];
for (int i = 0; i < 4; ++i) {
d_array[i] = gm_cd[i];
}
printf("hello");
asm volatile("{\n\t"
".reg .b64 a_d, b_d;\n\t"
"ld.param.u64 %rd1, [_Z11wgmma_test1PfP6__halfS1__param_1];\n\t"
"ld.param.u64 %rd2, [_Z11wgmma_test1PfP6__halfS1__param_2];\n\t"
"ld.param.u64 %rd3, [_Z11wgmma_test1PfP6__halfS1__param_0];\n\t"
"wgmma.mma_async.sync.aligned.m64n8k16.f32.f16.f16\n\t"
"{%0, %1, %2, %3}, %4, %5,1,1,1,0,0;\n\t"
"}\n\t"
: "+f"(d_array[0]), "+f"(d_array[1]), "+f"(d_array[2]),
"+f"(d_array[3])
: "l"(a_desc), "l"(b_desc)
:);
for (int i = 0; i < 4; ++i) {
gm_cd[i] = d_array[i];
}
}
int main(int argc, char** argv){
int size = 256;
__half* host_a=(__half*)malloc(sizeof(__half) * size);
__half* host_b=(__half*)malloc(sizeof(__half) * size);
//float* host_c=(float*)malloc(sizeof(float) * size);
float* host_d=(float*)malloc(sizeof(float) * size);
__half* device_a=NULL;
__half* device_b=NULL;
//float* device_c=NULL;
float* device_d=NULL;
cudaMalloc((void**)(&device_a), sizeof(__half) * size);
cudaMalloc((void**)(&device_b), sizeof(__half) * size);
//cudaMalloc((void**)(&device_c), sizeof(float) * size);
cudaMalloc((void**)(&device_d), sizeof(float) * size);
InitZero(host_a, size);
InitOne(host_b, size);
//InitZero_float(host_c, size);
InitZero_float(host_d, size);
FP16 fp16;
fp16.i = 0x7000; host_a[0]=fp16.f;
fp16.i = 0x0c00; host_a[1]=fp16.f;
fp16.i = 0xffff; host_a[2]=fp16.f;
fp16.i = 0xffff; host_a[3]=fp16.f;
fp16.i = 0xffff; host_a[4]=fp16.f;
fp16.i = 0xffff; host_a[5]=fp16.f;
fp16.i = 0xffff; host_a[6]=fp16.f;
fp16.i = 0xffff; host_a[7]=fp16.f;
cudaMemcpy((void*)device_a, (void*)host_a, sizeof(__half)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_b, (void*)host_b, sizeof(__half)* size, cudaMemcpyHostToDevice);
//cudaMemcpy((void*)device_c, (void*)host_c, sizeof(float)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_d, (void*)host_d, sizeof(float)* size, cudaMemcpyHostToDevice);
wgmma_test1<<<1,32>>>(device_d, device_a, device_b);
cudaDeviceSynchronize();
cudaMemcpy((void*)host_d, (void*)device_d, sizeof(float) * size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
FP32 fp32;
fp32.f=host_d[0];
//std::cout<< host_d[0] << std::endl;
std::cout<< hex << fp32.i << std::endl;
//show(host_d, size);
}
|
bddd6cfd631dbd5437558ce4347de3afae88a14d.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#define GRAPH_SIZE 1999
#define EDGE_COST(graph, graph_size, a, b) graph[a * graph_size + b]
#define D(a, b) EDGE_COST(output, graph_size, a, b)
#define INF 0x1fffffff
void generate_random_graph(int *output, int graph_size) {
int i, j;
int counter = 0;
srand(0xdadadada);
for (i = 0; i < graph_size; i++) {
for (j = 0; j < graph_size; j++) {
if (i == j) {
D(i, j) = 0;
} else {
int r;
r = rand() % 40;
if (r > 20) {
//r = INF;
}
D(i, j) = r;
if(r == 0){
counter++;
D(i, j) = 1;
}
}
}
}
printf("counter:%d\n", counter);
}
int gcd(int a, int b) {
if (b == 0) {
return a;
}
return gcd(b, a % b);
}
__global__ void gpu_calculate(int k, int graph_size, int *output, int threads) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < GRAPH_SIZE && j < GRAPH_SIZE){
if (D(i, k) + D(k, j) < D(i, j)) {
D(i, j) = D(i, k) + D(k, j);
}
}
}
void floyd_warshall_gpu(const int *graph, int graph_size, int *output) {
int threads = gcd(GRAPH_SIZE,32);
int blocks;
if(threads == 1){
threads = 16;//ceil(aux);
double aux = (double)GRAPH_SIZE / (double)threads;
blocks = ceil(aux);
} else {
blocks = GRAPH_SIZE / threads;
}
//threads = 16;
printf("threads per block %d x %d\n",threads,threads);
printf("numBlocks %d x %d\n",blocks,blocks);
printf("total threads %d\n",blocks*blocks*threads*threads);
printf("total matrix entries %d\n",GRAPH_SIZE*GRAPH_SIZE);
dim3 threadsPerBlock(threads, threads);
dim3 numBlocks(blocks, blocks);
//dim3 numBlocks(GRAPH_SIZE / threadsPerBlock.x, GRAPH_SIZE / threadsPerBlock.y);
int *dev;
int size = sizeof(int) * graph_size * graph_size;
hipMalloc(&dev, size);
hipMemcpy(dev, graph, size, hipMemcpyHostToDevice);
for (int k = 0; k < graph_size; k++) {
hipLaunchKernelGGL(( gpu_calculate), dim3(numBlocks), dim3(threadsPerBlock), sizeof(int) * threads * 2, 0, k, graph_size, dev, threads);
}
hipMemcpy(output, dev, size, hipMemcpyDeviceToHost);
hipFree(dev);
}
void floyd_warshall_cpu(const int *graph, int graph_size, int *output) {
int i, j, k;
memcpy(output, graph, sizeof(int) * graph_size * graph_size);
for (k = 0; k < graph_size; k++) {
for (i = 0; i < graph_size; i++) {
for (j = 0; j < graph_size; j++) {
if (D(i, k) + D(k, j) < D(i, j)) {
D(i, j) = D(i, k) + D(k, j);
}
}
}
}
}
int main(int argc, char **argv) {
#define TIMER_START() gettimeofday(&tv1, NULL)
#define TIMER_STOP() \
gettimeofday(&tv2, NULL); \
timersub(&tv2, &tv1, &tv); \
time_delta = (float)tv.tv_sec + tv.tv_usec / 1000000.0
struct timeval tv1, tv2, tv;
float time_delta;
int *graph, *output_cpu, *output_gpu;
int size;
size = sizeof(int) * GRAPH_SIZE * GRAPH_SIZE;
graph = (int *)malloc(size);
assert(graph);
output_cpu = (int *)malloc(size);
assert(output_cpu);
memset(output_cpu, 0, size);
output_gpu = (int *)malloc(size);
assert(output_gpu);
generate_random_graph(graph, GRAPH_SIZE);
fprintf(stderr, "running on cpu...\n");
TIMER_START();
floyd_warshall_cpu(graph, GRAPH_SIZE, output_cpu);
TIMER_STOP();
fprintf(stderr, "%f secs\n", time_delta);
fprintf(stderr, "running on gpu...\n");
TIMER_START();
floyd_warshall_gpu(graph, GRAPH_SIZE, output_gpu);
TIMER_STOP();
fprintf(stderr, "%f secs\n", time_delta);
if (memcmp(output_cpu, output_gpu, size) != 0) {
fprintf(stderr, "FAIL!\n");
} else {
/*for (int k = 500; k < 550; k++) {
printf("cpu:%d gpu:%d origin:%d\n", output_cpu[k], output_gpu[k], graph[k]);
}*/
printf("OK\n");
}
return 0;
}
| bddd6cfd631dbd5437558ce4347de3afae88a14d.cu | #include <assert.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#define GRAPH_SIZE 1999
#define EDGE_COST(graph, graph_size, a, b) graph[a * graph_size + b]
#define D(a, b) EDGE_COST(output, graph_size, a, b)
#define INF 0x1fffffff
void generate_random_graph(int *output, int graph_size) {
int i, j;
int counter = 0;
srand(0xdadadada);
for (i = 0; i < graph_size; i++) {
for (j = 0; j < graph_size; j++) {
if (i == j) {
D(i, j) = 0;
} else {
int r;
r = rand() % 40;
if (r > 20) {
//r = INF;
}
D(i, j) = r;
if(r == 0){
counter++;
D(i, j) = 1;
}
}
}
}
printf("counter:%d\n", counter);
}
int gcd(int a, int b) {
if (b == 0) {
return a;
}
return gcd(b, a % b);
}
__global__ void gpu_calculate(int k, int graph_size, int *output, int threads) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < GRAPH_SIZE && j < GRAPH_SIZE){
if (D(i, k) + D(k, j) < D(i, j)) {
D(i, j) = D(i, k) + D(k, j);
}
}
}
void floyd_warshall_gpu(const int *graph, int graph_size, int *output) {
int threads = gcd(GRAPH_SIZE,32);
int blocks;
if(threads == 1){
threads = 16;//ceil(aux);
double aux = (double)GRAPH_SIZE / (double)threads;
blocks = ceil(aux);
} else {
blocks = GRAPH_SIZE / threads;
}
//threads = 16;
printf("threads per block %d x %d\n",threads,threads);
printf("numBlocks %d x %d\n",blocks,blocks);
printf("total threads %d\n",blocks*blocks*threads*threads);
printf("total matrix entries %d\n",GRAPH_SIZE*GRAPH_SIZE);
dim3 threadsPerBlock(threads, threads);
dim3 numBlocks(blocks, blocks);
//dim3 numBlocks(GRAPH_SIZE / threadsPerBlock.x, GRAPH_SIZE / threadsPerBlock.y);
int *dev;
int size = sizeof(int) * graph_size * graph_size;
cudaMalloc(&dev, size);
cudaMemcpy(dev, graph, size, cudaMemcpyHostToDevice);
for (int k = 0; k < graph_size; k++) {
gpu_calculate<<<numBlocks, threadsPerBlock, sizeof(int) * threads * 2>>>(k, graph_size, dev, threads);
}
cudaMemcpy(output, dev, size, cudaMemcpyDeviceToHost);
cudaFree(dev);
}
void floyd_warshall_cpu(const int *graph, int graph_size, int *output) {
int i, j, k;
memcpy(output, graph, sizeof(int) * graph_size * graph_size);
for (k = 0; k < graph_size; k++) {
for (i = 0; i < graph_size; i++) {
for (j = 0; j < graph_size; j++) {
if (D(i, k) + D(k, j) < D(i, j)) {
D(i, j) = D(i, k) + D(k, j);
}
}
}
}
}
int main(int argc, char **argv) {
#define TIMER_START() gettimeofday(&tv1, NULL)
#define TIMER_STOP() \
gettimeofday(&tv2, NULL); \
timersub(&tv2, &tv1, &tv); \
time_delta = (float)tv.tv_sec + tv.tv_usec / 1000000.0
struct timeval tv1, tv2, tv;
float time_delta;
int *graph, *output_cpu, *output_gpu;
int size;
size = sizeof(int) * GRAPH_SIZE * GRAPH_SIZE;
graph = (int *)malloc(size);
assert(graph);
output_cpu = (int *)malloc(size);
assert(output_cpu);
memset(output_cpu, 0, size);
output_gpu = (int *)malloc(size);
assert(output_gpu);
generate_random_graph(graph, GRAPH_SIZE);
fprintf(stderr, "running on cpu...\n");
TIMER_START();
floyd_warshall_cpu(graph, GRAPH_SIZE, output_cpu);
TIMER_STOP();
fprintf(stderr, "%f secs\n", time_delta);
fprintf(stderr, "running on gpu...\n");
TIMER_START();
floyd_warshall_gpu(graph, GRAPH_SIZE, output_gpu);
TIMER_STOP();
fprintf(stderr, "%f secs\n", time_delta);
if (memcmp(output_cpu, output_gpu, size) != 0) {
fprintf(stderr, "FAIL!\n");
} else {
/*for (int k = 500; k < 550; k++) {
printf("cpu:%d gpu:%d origin:%d\n", output_cpu[k], output_gpu[k], graph[k]);
}*/
printf("OK\n");
}
return 0;
}
|
c5a576647676179f72ce399292ff77ae20736af8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cutil_inline.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
// 2D float texture
texture<uchar4, 2, hipReadModeElementType> texRef;
// Simple scaling kernel
__global__ void scaleKernel(uchar4* output, int width, int height, int scale, int newW, int newH)
{
// Calculate normalized texture coordinates
unsigned int x = threadIdx.x * scale;
unsigned int y = blockIdx.y * scale;
float4 result = { 0.0, 0.0, 0.0, 0.0 };
uchar4 tmp;
for (unsigned int i=0;i<scale;i++) {
for (unsigned int j=0;j<scale;j++) {
tmp = tex2D(texRef, x+j, y+i);
result.x += tmp.x;
result.y += tmp.y;
result.z += tmp.z;
result.w += tmp.w;
}
}
float sqr = scale*scale;
tmp.x = result.x / sqr;
tmp.y = result.y / sqr;
tmp.z = result.z / sqr;
tmp.w = result.w / sqr;
output[blockIdx.y*newW + threadIdx.x] = tmp;
}
// Simple scaling kernel
__global__ void scaleKernel16(uchar4* output, int width, int height)
{
// TODO
}
extern "C" {
int cudaScale(unsigned char* in, int w, int h, int scale, unsigned char *out)
{
// Describe the texture as 4 unsigned bytes per element
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8, 8, 8, 8, hipChannelFormatKindUnsigned);
// Copy the input data into the device
hipArray* inArray;
hipMallocArray(&inArray, &channelDesc, w, h);
hipMemcpyToArray(inArray, 0, 0, in, w*h*4, hipMemcpyHostToDevice);
// Set texture parameters (clamp the coordinates, return single points, and do not normalize)
texRef.addressMode[0] = hipAddressModeClamp;
texRef.addressMode[1] = hipAddressModeClamp;
texRef.filterMode = hipFilterModePoint;
texRef.normalized = false;
// Bind the array to the texture
hipBindTextureToArray(texRef, inArray, channelDesc);
int newW = w/scale;
int newH = h/scale;
// Allocate result output array in device memory
uchar4* output;
hipMalloc((void**)&output, newW * newH * 4);
// Invoke kernel
dim3 dimBlock(newW, 1);
dim3 dimGrid(1, newH);
hipLaunchKernelGGL(( scaleKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, output, w, h, scale, newW, newH);
hipMemcpy(out, output, newW*newH*4, hipMemcpyDeviceToHost);
// check if kernel invocation generated an error
cutilCheckMsg("Kernel execution failed");
// Free device memory
hipFreeArray(inArray);
hipFree(output);
return 0;
}
}
| c5a576647676179f72ce399292ff77ae20736af8.cu | #include <cutil_inline.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
// 2D float texture
texture<uchar4, 2, cudaReadModeElementType> texRef;
// Simple scaling kernel
__global__ void scaleKernel(uchar4* output, int width, int height, int scale, int newW, int newH)
{
// Calculate normalized texture coordinates
unsigned int x = threadIdx.x * scale;
unsigned int y = blockIdx.y * scale;
float4 result = { 0.0, 0.0, 0.0, 0.0 };
uchar4 tmp;
for (unsigned int i=0;i<scale;i++) {
for (unsigned int j=0;j<scale;j++) {
tmp = tex2D(texRef, x+j, y+i);
result.x += tmp.x;
result.y += tmp.y;
result.z += tmp.z;
result.w += tmp.w;
}
}
float sqr = scale*scale;
tmp.x = result.x / sqr;
tmp.y = result.y / sqr;
tmp.z = result.z / sqr;
tmp.w = result.w / sqr;
output[blockIdx.y*newW + threadIdx.x] = tmp;
}
// Simple scaling kernel
__global__ void scaleKernel16(uchar4* output, int width, int height)
{
// TODO
}
extern "C" {
int cudaScale(unsigned char* in, int w, int h, int scale, unsigned char *out)
{
// Describe the texture as 4 unsigned bytes per element
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned);
// Copy the input data into the device
cudaArray* inArray;
cudaMallocArray(&inArray, &channelDesc, w, h);
cudaMemcpyToArray(inArray, 0, 0, in, w*h*4, cudaMemcpyHostToDevice);
// Set texture parameters (clamp the coordinates, return single points, and do not normalize)
texRef.addressMode[0] = cudaAddressModeClamp;
texRef.addressMode[1] = cudaAddressModeClamp;
texRef.filterMode = cudaFilterModePoint;
texRef.normalized = false;
// Bind the array to the texture
cudaBindTextureToArray(texRef, inArray, channelDesc);
int newW = w/scale;
int newH = h/scale;
// Allocate result output array in device memory
uchar4* output;
cudaMalloc((void**)&output, newW * newH * 4);
// Invoke kernel
dim3 dimBlock(newW, 1);
dim3 dimGrid(1, newH);
scaleKernel<<<dimGrid, dimBlock>>>(output, w, h, scale, newW, newH);
cudaMemcpy(out, output, newW*newH*4, cudaMemcpyDeviceToHost);
// check if kernel invocation generated an error
cutilCheckMsg("Kernel execution failed");
// Free device memory
cudaFreeArray(inArray);
cudaFree(output);
return 0;
}
}
|
b14da10f8d31ce3d91688bce07180604f561f6bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <algorithm>
#include <chrono>
#include <thrust/sort.h>
#include "fingerprint_structure.hpp"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
const int BLOCKSIZE = 36;
// Constant weights
const float w1 = 0.16f;
const float w2 = 0.37f;
const float w3 = 0.16f;
const float w4 = 0.31f;
__host__ __device__ unsigned char dperiod_to_byte(float period) {
float fresult = period/period_unit;
unsigned char result = (char)fresult;
return result;
}
__host__ __device__ float dbyte_to_period(unsigned char c) {
float result = period_unit*(int)c;
return result;
}
__host__ __device__ unsigned char dfrequency_to_byte(float frequency) {
if (frequency == 0) {
return dperiod_to_byte(frequency);
} else {
return dperiod_to_byte(1.0f/frequency);
}
}
__host__ __device__ float dbyte_to_frequency(unsigned char c) {
float result = dbyte_to_period(c);
if (result == 0) return result;
else return 1/result;
}
__device__ float dbyte_to_coherence(unsigned char c) {
float result = (float)c/coherence_unit;
return result;
}
__device__ float dbyte_to_orientation(unsigned char c) {
float result = orientation_unit*(int)c;
return result;
}
__global__ void calculate_s1(fingerprint* db, fingerprint* fp, float* result) {
__shared__ float ss, scos, ssin;
int j = blockIdx.x;
int i = threadIdx.x;
if (i == 0) {
ss = 0;
scos = 0;
ssin = 0;
}
__syncthreads();
float s = dbyte_to_coherence(fp->local_coherence[i])*dbyte_to_coherence((db+j)->local_coherence[i]);
float d = M_PI/180.0f * 2 * (dbyte_to_orientation(fp->local_orientation[i])-dbyte_to_orientation((db+j)->local_orientation[i]));
float tcos = s*cos(d);
float tsin = s*sin(d);
atomicAdd(&ss, s);
atomicAdd(&scos, tcos);
atomicAdd(&ssin, tsin);
__syncthreads();
if (i == 0) {
if (ss != 0) result[j] = sqrt(pow(scos,2)+pow(ssin,2))/ss;
else result[j] = 0;
}
}
__global__ void get_best_core_s1(fingerprint* db, float* result, int* mapping) {
int i = blockIdx.x;
if ((db+i)->id%5 == 1) {
int max_idx = i;
for (int j=1 ; j<5 ; j++) {
if ((db+i+j)->id%5 == 1) break;
else {
if (result[i+j] > result[max_idx]) {
max_idx = i+j;
}
}
}
mapping[((db+i)->id-1)/5] = max_idx;
}
}
__global__ void calculate_s2(fingerprint* db, fingerprint* fp, float* result, int* mapping) {
__shared__ float s_addition, s_absdiff;
int j = mapping[blockIdx.x];
int i = threadIdx.x;
if (i == 0) {
s_addition = 0.0f;
s_absdiff = 0.0f;
}
float t_addition = dbyte_to_frequency(fp->local_frequency[i]) + dbyte_to_frequency((db+j)->local_frequency[i]);
float t_absdiff = abs(dbyte_to_frequency(fp->local_frequency[i]) - dbyte_to_frequency((db+j)->local_frequency[i]));
atomicAdd(&s_addition, t_addition);
atomicAdd(&s_absdiff, t_absdiff);
__syncthreads();
if (i == 0) {
result[blockIdx.x] = 1 - (s_absdiff/s_addition);
}
}
__global__ void calculate_s3(fingerprint* db, fingerprint* fp, float* result, int* mapping) {
int j = mapping[blockIdx.x];
result[blockIdx.x] = 1 - (abs(dbyte_to_frequency(fp->avg_frequency)-dbyte_to_frequency((db+j)->avg_frequency))/max(dbyte_to_frequency(fp->avg_frequency), dbyte_to_frequency((db+j)->avg_frequency)));
}
__global__ void calculate_s4(fingerprint* db, fingerprint* fp, float* result, int* mapping) {
int j = mapping[blockIdx.x];
result[blockIdx.x] = 1-(abs(dbyte_to_orientation(fp->avg_orientation)-dbyte_to_orientation((db+j)->avg_orientation))/180.0f);
}
__global__ void calculate_s(float* s1, float* s2, float*s3, float* s4, float* result, int* mapping) {
int i = blockIdx.x;
result[i] = w1*s1[mapping[i]] + w2*s2[i] + w3*s3[i] + w4*s4[i];
}
__global__ void get_top_fingerprints(float* s, float* result, int* mapping) {
int i = threadIdx.x;
result[i] = s[mapping[i]];
}
int main(int argc, char** argv) {
if (argc < 3) {
std::cerr << "Usage : ./parallel_indexing fingerprint-to-be-searched fingerprint-db\n";
return 0;
}
std::string fp_filename = argv[1];
std::string db_filename = argv[2];
// Read the fingerprint to be searched
std::vector<struct fingerprint> fp;
int count_fp = read_from_file(fp, fp_filename);
// Read the database
std::vector<struct fingerprint> db;
int count_db = read_from_file(db, db_filename);
std::cerr << "Fingerprint core database count : " << count_db << std::endl;
std::cerr << "Last fingerprint ID : " << db[count_db-1].id << std::endl;
int count_db_fingerprint = (db[count_db-1].id-1)/5+1;
std::cerr << "Fingerprint database count : " << count_db_fingerprint << std::endl;
auto timer_start = std::chrono::steady_clock::now();
// Preparing memory
fingerprint *d_fp, *d_db;
std::vector<float> result(count_db_fingerprint, 0);
float *d_s1_result, *d_s2_result, *d_s3_result, *d_s4_result, *d_result;
hipMalloc((void **)&d_fp, sizeof(fingerprint));
hipMalloc((void **)&d_db, count_db*sizeof(fingerprint));
hipMalloc((void **)&d_s1_result, count_db*sizeof(float));
hipMalloc((void **)&d_s2_result, count_db_fingerprint*sizeof(float));
hipMalloc((void **)&d_s3_result, count_db_fingerprint*sizeof(float));
hipMalloc((void **)&d_s4_result, count_db_fingerprint*sizeof(float));
hipMalloc((void **)&d_result, count_db_fingerprint*sizeof(float));
//Mapping for fingerprint to fingerprint core idx
int *d_mapping;
hipMalloc((void **)&d_mapping, count_db_fingerprint*sizeof(int));
hipMemcpy(d_db, &db[0], count_db*sizeof(fingerprint), hipMemcpyHostToDevice);
hipMemcpy(d_fp, &fp[0], sizeof(fingerprint), hipMemcpyHostToDevice);
// S1
hipLaunchKernelGGL(( calculate_s1), dim3(count_db),dim3(BLOCKSIZE), 0, 0, d_db, d_fp, d_s1_result);
hipLaunchKernelGGL(( get_best_core_s1), dim3(count_db), dim3(1), 0, 0, d_db, d_s1_result, d_mapping);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
std::vector<int> mapping(count_db_fingerprint, 0);
hipMemcpy(&mapping[0], d_mapping, count_db_fingerprint*sizeof(int), hipMemcpyDeviceToHost);
// std::vector<float> s1_result;
// s1_result.resize(count_db, 0);
// hipMemcpy(&s1_result[0], d_s1_result, count_db*sizeof(float), hipMemcpyDeviceToHost);
// S2 until S4 using stream
hipStream_t streams[3];
hipStreamCreate(&streams[0]);
hipStreamCreate(&streams[1]);
hipStreamCreate(&streams[2]);
// Only calculate for 1 core per fingerprint using mapping
hipLaunchKernelGGL(( calculate_s2), dim3(count_db_fingerprint),dim3(BLOCKSIZE), 8, streams[0], d_db, d_fp, d_s2_result, d_mapping);
// hipMemcpy(&s2_result[0], d_s2_result, count_db_fingerprint*sizeof(float), hipMemcpyDeviceToHost);
// S3
hipLaunchKernelGGL(( calculate_s3), dim3(count_db_fingerprint),dim3(1), 0, streams[1], d_db, d_fp, d_s3_result,d_mapping);
// hipMemcpy(&s3_result[0], d_s3_result, count_db*sizeof(float), hipMemcpyDeviceToHost);
// S4
hipLaunchKernelGGL(( calculate_s4), dim3(count_db_fingerprint),dim3(1), 0, streams[2], d_db, d_fp, d_s4_result, d_mapping);
// hipMemcpy(&s4_result[0], d_s4_result, count_db*sizeof(float), hipMemcpyDeviceToHost);
// S
hipLaunchKernelGGL(( calculate_s), dim3(count_db_fingerprint), dim3(1), 0, 0, d_s1_result, d_s2_result, d_s3_result, d_s4_result, d_result, d_mapping);
// hipMemcpy(&result[0], d_result, count_db_fingerprint*sizeof(float), hipMemcpyDeviceToHost);
// ID for identifying fingerprint during sort
int* ids = new int[count_db_fingerprint];
for (int i=0 ; i<count_db_fingerprint ; i++) {
ids[i] = db[mapping[i]].id;
}
int* d_ids;
hipMalloc((void **)&d_ids, count_db_fingerprint*sizeof(int));
hipMemcpy(d_ids, &ids[0], count_db_fingerprint*sizeof(int), hipMemcpyHostToDevice);
auto sort_start = std::chrono::steady_clock::now();
thrust::sort_by_key(thrust::device, d_result, d_result+count_db_fingerprint, d_ids);
auto sort_end = std::chrono::steady_clock::now();
hipMemcpy(&result[0], d_result, count_db_fingerprint*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&ids[0], d_ids, count_db_fingerprint*sizeof(int), hipMemcpyDeviceToHost);
/*for (int i=count_db_fingerprint-1 ; i>=0 ; i--) {
std::cout << "ID " << ids[i] << "-"<< ids[i]/5 <<"\t: " << result[i];
std::cout << std::endl;
}*/
auto timer_end = std::chrono::steady_clock::now();
std::chrono::duration<double> diff = timer_end - timer_start;
std::chrono::duration<double> sort_time = sort_end - sort_start;
std::cerr << "Time to get indexing result for " << count_db << " fingerprints in DB : " << diff.count() << std::endl;
std::cerr << "Time for sorting " << sort_time.count() << std::endl;
hipFree(d_fp);
hipFree(d_db);
hipFree(d_result);
hipFree(d_mapping);
hipFree(d_s1_result);
hipFree(d_s2_result);
hipFree(d_s3_result);
hipFree(d_s4_result);
hipFree(d_ids);
return 0;
}
// nvcc -o parallel_indexing_stream parallel_indexing_stream.cu fingerprint_structure.cpp -std=c++11 -lineinfo
| b14da10f8d31ce3d91688bce07180604f561f6bc.cu | #include <stdio.h>
#include <iostream>
#include <algorithm>
#include <chrono>
#include <thrust/sort.h>
#include "fingerprint_structure.hpp"
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
const int BLOCKSIZE = 36;
// Constant weights
const float w1 = 0.16f;
const float w2 = 0.37f;
const float w3 = 0.16f;
const float w4 = 0.31f;
__host__ __device__ unsigned char dperiod_to_byte(float period) {
float fresult = period/period_unit;
unsigned char result = (char)fresult;
return result;
}
__host__ __device__ float dbyte_to_period(unsigned char c) {
float result = period_unit*(int)c;
return result;
}
__host__ __device__ unsigned char dfrequency_to_byte(float frequency) {
if (frequency == 0) {
return dperiod_to_byte(frequency);
} else {
return dperiod_to_byte(1.0f/frequency);
}
}
__host__ __device__ float dbyte_to_frequency(unsigned char c) {
float result = dbyte_to_period(c);
if (result == 0) return result;
else return 1/result;
}
__device__ float dbyte_to_coherence(unsigned char c) {
float result = (float)c/coherence_unit;
return result;
}
__device__ float dbyte_to_orientation(unsigned char c) {
float result = orientation_unit*(int)c;
return result;
}
__global__ void calculate_s1(fingerprint* db, fingerprint* fp, float* result) {
__shared__ float ss, scos, ssin;
int j = blockIdx.x;
int i = threadIdx.x;
if (i == 0) {
ss = 0;
scos = 0;
ssin = 0;
}
__syncthreads();
float s = dbyte_to_coherence(fp->local_coherence[i])*dbyte_to_coherence((db+j)->local_coherence[i]);
float d = M_PI/180.0f * 2 * (dbyte_to_orientation(fp->local_orientation[i])-dbyte_to_orientation((db+j)->local_orientation[i]));
float tcos = s*cos(d);
float tsin = s*sin(d);
atomicAdd(&ss, s);
atomicAdd(&scos, tcos);
atomicAdd(&ssin, tsin);
__syncthreads();
if (i == 0) {
if (ss != 0) result[j] = sqrt(pow(scos,2)+pow(ssin,2))/ss;
else result[j] = 0;
}
}
__global__ void get_best_core_s1(fingerprint* db, float* result, int* mapping) {
int i = blockIdx.x;
if ((db+i)->id%5 == 1) {
int max_idx = i;
for (int j=1 ; j<5 ; j++) {
if ((db+i+j)->id%5 == 1) break;
else {
if (result[i+j] > result[max_idx]) {
max_idx = i+j;
}
}
}
mapping[((db+i)->id-1)/5] = max_idx;
}
}
__global__ void calculate_s2(fingerprint* db, fingerprint* fp, float* result, int* mapping) {
__shared__ float s_addition, s_absdiff;
int j = mapping[blockIdx.x];
int i = threadIdx.x;
if (i == 0) {
s_addition = 0.0f;
s_absdiff = 0.0f;
}
float t_addition = dbyte_to_frequency(fp->local_frequency[i]) + dbyte_to_frequency((db+j)->local_frequency[i]);
float t_absdiff = abs(dbyte_to_frequency(fp->local_frequency[i]) - dbyte_to_frequency((db+j)->local_frequency[i]));
atomicAdd(&s_addition, t_addition);
atomicAdd(&s_absdiff, t_absdiff);
__syncthreads();
if (i == 0) {
result[blockIdx.x] = 1 - (s_absdiff/s_addition);
}
}
__global__ void calculate_s3(fingerprint* db, fingerprint* fp, float* result, int* mapping) {
int j = mapping[blockIdx.x];
result[blockIdx.x] = 1 - (abs(dbyte_to_frequency(fp->avg_frequency)-dbyte_to_frequency((db+j)->avg_frequency))/max(dbyte_to_frequency(fp->avg_frequency), dbyte_to_frequency((db+j)->avg_frequency)));
}
__global__ void calculate_s4(fingerprint* db, fingerprint* fp, float* result, int* mapping) {
int j = mapping[blockIdx.x];
result[blockIdx.x] = 1-(abs(dbyte_to_orientation(fp->avg_orientation)-dbyte_to_orientation((db+j)->avg_orientation))/180.0f);
}
__global__ void calculate_s(float* s1, float* s2, float*s3, float* s4, float* result, int* mapping) {
int i = blockIdx.x;
result[i] = w1*s1[mapping[i]] + w2*s2[i] + w3*s3[i] + w4*s4[i];
}
__global__ void get_top_fingerprints(float* s, float* result, int* mapping) {
int i = threadIdx.x;
result[i] = s[mapping[i]];
}
int main(int argc, char** argv) {
if (argc < 3) {
std::cerr << "Usage : ./parallel_indexing fingerprint-to-be-searched fingerprint-db\n";
return 0;
}
std::string fp_filename = argv[1];
std::string db_filename = argv[2];
// Read the fingerprint to be searched
std::vector<struct fingerprint> fp;
int count_fp = read_from_file(fp, fp_filename);
// Read the database
std::vector<struct fingerprint> db;
int count_db = read_from_file(db, db_filename);
std::cerr << "Fingerprint core database count : " << count_db << std::endl;
std::cerr << "Last fingerprint ID : " << db[count_db-1].id << std::endl;
int count_db_fingerprint = (db[count_db-1].id-1)/5+1;
std::cerr << "Fingerprint database count : " << count_db_fingerprint << std::endl;
auto timer_start = std::chrono::steady_clock::now();
// Preparing memory
fingerprint *d_fp, *d_db;
std::vector<float> result(count_db_fingerprint, 0);
float *d_s1_result, *d_s2_result, *d_s3_result, *d_s4_result, *d_result;
cudaMalloc((void **)&d_fp, sizeof(fingerprint));
cudaMalloc((void **)&d_db, count_db*sizeof(fingerprint));
cudaMalloc((void **)&d_s1_result, count_db*sizeof(float));
cudaMalloc((void **)&d_s2_result, count_db_fingerprint*sizeof(float));
cudaMalloc((void **)&d_s3_result, count_db_fingerprint*sizeof(float));
cudaMalloc((void **)&d_s4_result, count_db_fingerprint*sizeof(float));
cudaMalloc((void **)&d_result, count_db_fingerprint*sizeof(float));
//Mapping for fingerprint to fingerprint core idx
int *d_mapping;
cudaMalloc((void **)&d_mapping, count_db_fingerprint*sizeof(int));
cudaMemcpy(d_db, &db[0], count_db*sizeof(fingerprint), cudaMemcpyHostToDevice);
cudaMemcpy(d_fp, &fp[0], sizeof(fingerprint), cudaMemcpyHostToDevice);
// S1
calculate_s1<<<count_db,BLOCKSIZE>>>(d_db, d_fp, d_s1_result);
get_best_core_s1<<<count_db, 1>>>(d_db, d_s1_result, d_mapping);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
std::vector<int> mapping(count_db_fingerprint, 0);
cudaMemcpy(&mapping[0], d_mapping, count_db_fingerprint*sizeof(int), cudaMemcpyDeviceToHost);
// std::vector<float> s1_result;
// s1_result.resize(count_db, 0);
// cudaMemcpy(&s1_result[0], d_s1_result, count_db*sizeof(float), cudaMemcpyDeviceToHost);
// S2 until S4 using stream
cudaStream_t streams[3];
cudaStreamCreate(&streams[0]);
cudaStreamCreate(&streams[1]);
cudaStreamCreate(&streams[2]);
// Only calculate for 1 core per fingerprint using mapping
calculate_s2<<<count_db_fingerprint,BLOCKSIZE, 8, streams[0]>>>(d_db, d_fp, d_s2_result, d_mapping);
// cudaMemcpy(&s2_result[0], d_s2_result, count_db_fingerprint*sizeof(float), cudaMemcpyDeviceToHost);
// S3
calculate_s3<<<count_db_fingerprint,1, 0, streams[1]>>>(d_db, d_fp, d_s3_result,d_mapping);
// cudaMemcpy(&s3_result[0], d_s3_result, count_db*sizeof(float), cudaMemcpyDeviceToHost);
// S4
calculate_s4<<<count_db_fingerprint,1, 0, streams[2]>>>(d_db, d_fp, d_s4_result, d_mapping);
// cudaMemcpy(&s4_result[0], d_s4_result, count_db*sizeof(float), cudaMemcpyDeviceToHost);
// S
calculate_s<<<count_db_fingerprint, 1>>>(d_s1_result, d_s2_result, d_s3_result, d_s4_result, d_result, d_mapping);
// cudaMemcpy(&result[0], d_result, count_db_fingerprint*sizeof(float), cudaMemcpyDeviceToHost);
// ID for identifying fingerprint during sort
int* ids = new int[count_db_fingerprint];
for (int i=0 ; i<count_db_fingerprint ; i++) {
ids[i] = db[mapping[i]].id;
}
int* d_ids;
cudaMalloc((void **)&d_ids, count_db_fingerprint*sizeof(int));
cudaMemcpy(d_ids, &ids[0], count_db_fingerprint*sizeof(int), cudaMemcpyHostToDevice);
auto sort_start = std::chrono::steady_clock::now();
thrust::sort_by_key(thrust::device, d_result, d_result+count_db_fingerprint, d_ids);
auto sort_end = std::chrono::steady_clock::now();
cudaMemcpy(&result[0], d_result, count_db_fingerprint*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&ids[0], d_ids, count_db_fingerprint*sizeof(int), cudaMemcpyDeviceToHost);
/*for (int i=count_db_fingerprint-1 ; i>=0 ; i--) {
std::cout << "ID " << ids[i] << "-"<< ids[i]/5 <<"\t: " << result[i];
std::cout << std::endl;
}*/
auto timer_end = std::chrono::steady_clock::now();
std::chrono::duration<double> diff = timer_end - timer_start;
std::chrono::duration<double> sort_time = sort_end - sort_start;
std::cerr << "Time to get indexing result for " << count_db << " fingerprints in DB : " << diff.count() << std::endl;
std::cerr << "Time for sorting " << sort_time.count() << std::endl;
cudaFree(d_fp);
cudaFree(d_db);
cudaFree(d_result);
cudaFree(d_mapping);
cudaFree(d_s1_result);
cudaFree(d_s2_result);
cudaFree(d_s3_result);
cudaFree(d_s4_result);
cudaFree(d_ids);
return 0;
}
// nvcc -o parallel_indexing_stream parallel_indexing_stream.cu fingerprint_structure.cpp -std=c++11 -lineinfo
|
d44b31581f4e7c2ddbd33a4f2b2c4c5f22fb154a.hip | // !!! This is a file automatically generated by hipify!!!
/* Includes, system */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
/* Includes, cuda */
#include <rocblas.h>
#include <hip/hip_runtime.h>
//#include "rocblas.h"
/* Number of columns & rows in dictionary */
// TODO: get as input
#define M 300 // num of Dictionary columns
#define N 50 // num of Dictionary rows
#define X 25// number of signals
/* Number of non-zero elements in signal */
int K = 4;
/* Residual error */
double epsilon = 1.0e-7;
/* Max num of iterations - assume as same as num of elements in signal */
int T = N;
/* Sign function */
double sign(double x){return (x>=0) - (x<0);}
/* Matrix indexing convention */
#define id(m, n, ld) (((n) * (ld) + (m)))
int main(int argc, char** argv)
{
cublasStatus status;
double *h_D, *h_X, *h_C, *c; //host memory pointers
double *d_D = 0, *d_S = 0, *d_R = 0; //device memory pointers
int i;
int MX = M*X;
int NX = M*X;
int MN = M*N, m, n, k, q, t;
double norm = sqrt(N), normi, normf, a, dtime;
printf("\nDictionary dimensions: N x M = %d x %d, K = %d, Number of Signals = %d", N, M, K, X);
/* Initialize srand and clock */
srand(time(NULL));
clock_t start = clock();
/* Initialize cublas */
status = hipblasInit();
if (status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr,"CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Initialize dictionary on host */
h_D = (double*)malloc(MN * sizeof(h_D[0]));
if(h_D == 0){
fprintf(stderr, " host memory allocation error (dictionary)\n");
return EXIT_FAILURE;
}
for(n = 0; n < N; n++){
for(m = 0; m < M; m++){
a = sign(2.0*rand()/(double)RAND_MAX-1.0)/norm;
h_D[id(m, n, M)] = a;
}
}
/* Create X random K-sparse signals */
h_X = (double*)calloc(M*X, sizeof(h_X[0])); // X initialized with zeros
if(h_X == 0){
fprintf(stderr, " host memory allocation error (signal)\n");
return EXIT_FAILURE;
}
for (i = 0;i < X;i++){
for(k = 0; k < K; k++){
a = 2.0*rand()/(double)RAND_MAX - 1.0;
h_X[(rand()%M)+i*M] = a;}
}
/* Allocate solution memory on host */
h_C = (double*)calloc(M*X, sizeof(h_C[0]));
if(h_C == 0){
fprintf(stderr, " host memory allocation error (solution)\n");
return EXIT_FAILURE;
}
c = (double*)calloc(1, sizeof(c));
if(c == 0){
fprintf(stderr, " host memory allocation error (c)\n");
return EXIT_FAILURE;
}
/* Host to device data transfer: dictionary */
status = hipblasAlloc(MN, sizeof(d_D[0]),(void**)&d_D);
if(status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, " device memory allocation error (dictionary)\n");
return EXIT_FAILURE;
}
//trasnfer the Host dictionary to Device dictionary
status = hipblasSetVector(MN, sizeof(h_D[0]),h_D, 1, d_D, 1);
if(status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "! device access error (write dictionary)\n");
return EXIT_FAILURE;
}
/* Host to device data transfer: signal */
status = hipblasAlloc(MX, sizeof(d_R[0]),(void**)&d_R);
if(status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "! device memory allocation error (signal)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(MX, sizeof(h_X[0]),h_X, 1, d_R, 1);
if(status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "! device access error (write signal)\n");
return EXIT_FAILURE;
}
/*Allocate device memory for Signal Solution */
status = hipblasAlloc(NX, sizeof(d_S[0]),(void**)&d_S);
if(status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "! device memory allocation error (projected vector)\n");
return EXIT_FAILURE;
}
/* Encoding the signal on device*/
for (i = 0;i<X;i++) {
hipblasDgemv('t', M, N, 1.0, d_D, M,d_R+i*M, 1, 0.0, d_S+i*N, 1);
status = hipblasGetError();
if(status != HIPBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "! kernel execution error (encoding)\n");
return EXIT_FAILURE;
}
}
//dtime = ((double)clock()-start)/CLOCKS_PER_SEC; // TODO : need to remove
//printf("\nTime for encoding: %f(s)",dtime);
/* Decoding the signal on device*/
start = clock();
for (i = 0;i<X;i++) {
normi = hipblasDnrm2 (N, d_S+i*N, 1);
epsilon = sqrt(epsilon*normi);
normf = normi;
t = 0;
while(normf > epsilon && t < T){
//printf("\n %f",normf);
hipblasDgemv('n', M, N, 1.0, d_D, M,d_S+i*N, 1, 0.0, d_R+i*M, 1);
q = hipblasIdamax (M, d_R+i*M, 1) - 1;
hipblasGetVector(1, sizeof(c),&d_R[q+i*M], 1, c, 1);
h_C[q+i*M] = *c + h_C[q+i*M];
hipblasDaxpy (N, -(*c), &d_D[q], M, d_S+i*N, 1);
normf = hipblasDnrm2 (N, d_S+i*N, 1);
t++;
}
/*
status = hipblasGetError();
if(status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "! kernel execution error (decoding)\n");
return EXIT_FAILURE;
*/
a = 100.0*(normf*normf)/(normi*normi);
// printf("\nComputation residual error: %f",a);
a=0; q=0; *c=0;
epsilon=1.0e-7;
}
dtime = (((double)clock()-start)*1000)/CLOCKS_PER_SEC;
printf("\n Total time : %f(ms) ",dtime);
/* Check the solution */
/*
printf("\nSolution (first column),Reference (second column):");
getchar(); // Wait for key ...
for(m=0; m<M; m++)
{
printf("\n%f\t%f\t%f\t%f", h_C[m], h_X[m],h_C[m+M],h_X[m+M]);
}
normi = 0; normf = 0;
for(m=0; m<M; m++)
{
normi = normi + h_X[m]*h_X[m];
normf = normf +
(h_C[m] - h_X[m])*(h_C[m] - h_X[m]);
}
printf("\nSolution residual error:%f", 100.0*normf/normi);
*/
/* Memory clean up */
free(h_D); free(h_X); free(h_C);
status = hipblasFree(d_D); status = hipblasFree(d_S); status = hipblasFree(d_R);
if(status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr,"! device memory free error\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = hipblasShutdown();
if(status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr,"! cublas shutdown error\n");
return EXIT_FAILURE;
}
if(argc<=1 || strcmp(argv[1],"-noprompt")){
printf("\nPress ENTER to exit...\n");
getchar();
}
return EXIT_SUCCESS;
}
| d44b31581f4e7c2ddbd33a4f2b2c4c5f22fb154a.cu | /* Includes, system */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
/* Includes, cuda */
#include <cublas.h>
#include <cuda_runtime.h>
//#include "cublas_v2.h"
/* Number of columns & rows in dictionary */
// TODO: get as input
#define M 300 // num of Dictionary columns
#define N 50 // num of Dictionary rows
#define X 25// number of signals
/* Number of non-zero elements in signal */
int K = 4;
/* Residual error */
double epsilon = 1.0e-7;
/* Max num of iterations - assume as same as num of elements in signal */
int T = N;
/* Sign function */
double sign(double x){return (x>=0) - (x<0);}
/* Matrix indexing convention */
#define id(m, n, ld) (((n) * (ld) + (m)))
int main(int argc, char** argv)
{
cublasStatus status;
double *h_D, *h_X, *h_C, *c; //host memory pointers
double *d_D = 0, *d_S = 0, *d_R = 0; //device memory pointers
int i;
int MX = M*X;
int NX = M*X;
int MN = M*N, m, n, k, q, t;
double norm = sqrt(N), normi, normf, a, dtime;
printf("\nDictionary dimensions: N x M = %d x %d, K = %d, Number of Signals = %d", N, M, K, X);
/* Initialize srand and clock */
srand(time(NULL));
clock_t start = clock();
/* Initialize cublas */
status = cublasInit();
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr,"CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Initialize dictionary on host */
h_D = (double*)malloc(MN * sizeof(h_D[0]));
if(h_D == 0){
fprintf(stderr, " host memory allocation error (dictionary)\n");
return EXIT_FAILURE;
}
for(n = 0; n < N; n++){
for(m = 0; m < M; m++){
a = sign(2.0*rand()/(double)RAND_MAX-1.0)/norm;
h_D[id(m, n, M)] = a;
}
}
/* Create X random K-sparse signals */
h_X = (double*)calloc(M*X, sizeof(h_X[0])); // X initialized with zeros
if(h_X == 0){
fprintf(stderr, " host memory allocation error (signal)\n");
return EXIT_FAILURE;
}
for (i = 0;i < X;i++){
for(k = 0; k < K; k++){
a = 2.0*rand()/(double)RAND_MAX - 1.0;
h_X[(rand()%M)+i*M] = a;}
}
/* Allocate solution memory on host */
h_C = (double*)calloc(M*X, sizeof(h_C[0]));
if(h_C == 0){
fprintf(stderr, " host memory allocation error (solution)\n");
return EXIT_FAILURE;
}
c = (double*)calloc(1, sizeof(c));
if(c == 0){
fprintf(stderr, " host memory allocation error (c)\n");
return EXIT_FAILURE;
}
/* Host to device data transfer: dictionary */
status = cublasAlloc(MN, sizeof(d_D[0]),(void**)&d_D);
if(status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, " device memory allocation error (dictionary)\n");
return EXIT_FAILURE;
}
//trasnfer the Host dictionary to Device dictionary
status = cublasSetVector(MN, sizeof(h_D[0]),h_D, 1, d_D, 1);
if(status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "! device access error (write dictionary)\n");
return EXIT_FAILURE;
}
/* Host to device data transfer: signal */
status = cublasAlloc(MX, sizeof(d_R[0]),(void**)&d_R);
if(status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "! device memory allocation error (signal)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(MX, sizeof(h_X[0]),h_X, 1, d_R, 1);
if(status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "! device access error (write signal)\n");
return EXIT_FAILURE;
}
/*Allocate device memory for Signal Solution */
status = cublasAlloc(NX, sizeof(d_S[0]),(void**)&d_S);
if(status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "! device memory allocation error (projected vector)\n");
return EXIT_FAILURE;
}
/* Encoding the signal on device*/
for (i = 0;i<X;i++) {
cublasDgemv('t', M, N, 1.0, d_D, M,d_R+i*M, 1, 0.0, d_S+i*N, 1);
status = cublasGetError();
if(status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "! kernel execution error (encoding)\n");
return EXIT_FAILURE;
}
}
//dtime = ((double)clock()-start)/CLOCKS_PER_SEC; // TODO : need to remove
//printf("\nTime for encoding: %f(s)",dtime);
/* Decoding the signal on device*/
start = clock();
for (i = 0;i<X;i++) {
normi = cublasDnrm2 (N, d_S+i*N, 1);
epsilon = sqrt(epsilon*normi);
normf = normi;
t = 0;
while(normf > epsilon && t < T){
//printf("\n %f",normf);
cublasDgemv('n', M, N, 1.0, d_D, M,d_S+i*N, 1, 0.0, d_R+i*M, 1);
q = cublasIdamax (M, d_R+i*M, 1) - 1;
cublasGetVector(1, sizeof(c),&d_R[q+i*M], 1, c, 1);
h_C[q+i*M] = *c + h_C[q+i*M];
cublasDaxpy (N, -(*c), &d_D[q], M, d_S+i*N, 1);
normf = cublasDnrm2 (N, d_S+i*N, 1);
t++;
}
/*
status = cublasGetError();
if(status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "! kernel execution error (decoding)\n");
return EXIT_FAILURE;
*/
a = 100.0*(normf*normf)/(normi*normi);
// printf("\nComputation residual error: %f",a);
a=0; q=0; *c=0;
epsilon=1.0e-7;
}
dtime = (((double)clock()-start)*1000)/CLOCKS_PER_SEC;
printf("\n Total time : %f(ms) ",dtime);
/* Check the solution */
/*
printf("\nSolution (first column),Reference (second column):");
getchar(); // Wait for key ...
for(m=0; m<M; m++)
{
printf("\n%f\t%f\t%f\t%f", h_C[m], h_X[m],h_C[m+M],h_X[m+M]);
}
normi = 0; normf = 0;
for(m=0; m<M; m++)
{
normi = normi + h_X[m]*h_X[m];
normf = normf +
(h_C[m] - h_X[m])*(h_C[m] - h_X[m]);
}
printf("\nSolution residual error:%f", 100.0*normf/normi);
*/
/* Memory clean up */
free(h_D); free(h_X); free(h_C);
status = cublasFree(d_D); status = cublasFree(d_S); status = cublasFree(d_R);
if(status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr,"! device memory free error\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = cublasShutdown();
if(status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr,"! cublas shutdown error\n");
return EXIT_FAILURE;
}
if(argc<=1 || strcmp(argv[1],"-noprompt")){
printf("\nPress ENTER to exit...\n");
getchar();
}
return EXIT_SUCCESS;
}
|
55ee581885265d1f627457230df0eef5313ae4b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void rdiv_double(int n, double *a, double *b, double *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
sum[i] = b[i] / a[i];
} | 55ee581885265d1f627457230df0eef5313ae4b1.cu | extern "C"
__global__ void rdiv_double(int n, double *a, double *b, double *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
sum[i] = b[i] / a[i];
} |
1133125c56996c8e1ff5bb654ba24e0a3f57f708.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <global.h>
#include "AdvectMode.h"
#include "BrickIndexGPU.h"
#include "BrickRequestsGPU.h"
#include "IntegrationParamsGPU.h"
#include "LineInfoGPU.h"
#include "TextureFilterMode.h"
#include "TracingCommon.h"
#include "VolumeInfoGPU.h"
#include "AdvectDense.cuh"
#include "Coords.cuh"
#include "IntegratorCommon.cuh"
#include "TextureFilter.cuh"
#include "Jacobian_hip.cuh"
//extern __constant__ VolumeInfoGPU c_volumeInfo;
//extern __constant__ BrickIndexGPU c_brickIndex;
//extern __constant__ BrickRequestsGPU c_brickRequests;
extern __constant__ IntegrationParamsGPU c_integrationParams;
//extern __constant__ LineInfoGPU c_lineInfo;
extern texture<float4, hipTextureType3D, hipReadModeElementType> g_texVolume1;
template<eAdvectMode advectMode, eTextureFilterMode filterMode>
__global__ void integrateStreamLinesDenseKernel(LineInfoGPU c_lineInfo, VolumeInfoGPU c_volumeInfo, BrickIndexGPU c_brickIndex, BrickRequestsGPU c_brickRequests)
{
uint lineIndex = blockIdx.x * blockDim.x + threadIdx.x;
if(lineIndex >= c_lineInfo.lineCount)
return;
uint lineLength = c_lineInfo.pVertexCounts[lineIndex];
if(lineLength >= c_lineInfo.lineLengthMax)
return;
LineVertex vertex;
// get initial position from checkpoints array
vertex.Position = c_lineInfo.pCheckpoints[lineIndex].Position;
vertex.Time = c_lineInfo.pCheckpoints[lineIndex].Time;
vertex.SeedPosition = c_lineInfo.pCheckpoints[lineIndex].SeedPosition;
if(vertex.Time >= c_integrationParams.timeMax || c_volumeInfo.isOutsideOfDomain(vertex.Position))
return;
// find brick we're in
float3 brickBoxMin;
float3 brickBoxMax;
float3 world2texOffset;
float3 world2texScale;
if (!findBrick(c_volumeInfo, c_brickIndex, c_brickRequests, vertex.Position, brickBoxMin, brickBoxMax, world2texOffset, world2texScale)) {
return;
}
// get velocity at initial position
float4 vel4 = sampleVolume<filterMode, float4, float4>(g_texVolume1, w2t(vertex.Position));
vertex.Velocity = c_volumeInfo.velocityScale * make_float3(vel4.x, vel4.y, vel4.z);
vertex.LineID = lineIndex;
// this always points to the next vertex to be written out
LineVertex* pVertices = c_lineInfo.pVertices + lineIndex * c_lineInfo.vertexStride + lineLength;
if(lineLength == 0) {
// new line - build normal: arbitrary vector perpendicular to velocity
float3 tangent = normalize(vertex.Velocity);
vertex.Normal = cross(tangent, make_float3(1.0f, 0.0f, 0.0f));
if(length(vertex.Normal) < 0.01f) vertex.Normal = cross(tangent, make_float3(0.0f, 1.0f, 0.0f));
vertex.Normal = normalize(vertex.Normal);
vertex.Jacobian = getJacobian<filterMode>(g_texVolume1, w2t(vertex.Position), c_integrationParams.gridSpacing);
float3 gradT = sampleScalarGradient<filterMode>(g_texVolume1, w2t(vertex.Position), c_integrationParams.gridSpacing);
vertex.Heat = vel4.w;
vertex.HeatCurrent = gradT;
// write out initial vertex
*pVertices++ = vertex;
++lineLength;
} else {
// existing line - get old normal
vertex.Normal = c_lineInfo.pCheckpoints[lineIndex].Normal;
}
// get the last vertex that was written out
float3 lastOutPos = (pVertices - 1)->Position;
float lastOutTime = (pVertices - 1)->Time;
float deltaTime = c_lineInfo.pCheckpoints[lineIndex].DeltaT;
uint step = 0;
uint stepsAccepted = 0;
// dense output
const uint coeffCount = advectDenseInfo<advectMode>::OutputCoeffCount;
float3 outputCoeffs[coeffCount];
bool stayedInAvailableBrick = true;
while(step < c_integrationParams.stepCountMax &&
vertex.Time < c_integrationParams.timeMax &&
lineLength < c_lineInfo.lineLengthMax)
{
float deltaTimeBak = deltaTime;
// limit deltaTime ..
// .. so we don't integrate past timeMax
deltaTime = min(deltaTime, c_integrationParams.timeMax - vertex.Time);
// .. so we don't leave the current brick's safe region
float distMax = c_integrationParams.brickSafeMarginWorld + distanceToBrickBorder(vertex.Position, brickBoxMin, brickBoxMax);
deltaTime = min(deltaTime, distMax / c_integrationParams.velocityMaxWorld);
// integrate
float deltaTimeThisStep = deltaTime;
bool stepAccepted = advectDense<advectMode, filterMode>(
g_texVolume1,
vertex.Position, vertex.Time, vertex.Velocity,
deltaTime,
outputCoeffs,
world2texOffset, world2texScale,
c_volumeInfo.velocityScale);
++step;
if(stepAccepted) {
++stepsAccepted;
// if we artificially limited deltaTime earlier, reset it now
// (if we didn't, the new deltaTime is larger than the backup anyway)
deltaTime = fmax(deltaTime, deltaTimeBak);
float3 posDiff = vertex.Position - lastOutPos;
float timeDiff = vertex.Time - lastOutTime;
float posDiffSqr = dot(posDiff, posDiff);
if((posDiffSqr >= c_integrationParams.outputPosDiffSquared) || (timeDiff >= c_integrationParams.outputTimeDiff)) {
//get jacobian and heat for measures
vertex.Jacobian = getJacobian<filterMode>(g_texVolume1, w2t(vertex.Position), c_integrationParams.gridSpacing);
vel4 = sampleVolume<filterMode, float4, float4>(g_texVolume1, w2t(vertex.Position));
float3 gradT = sampleScalarGradient<filterMode>(g_texVolume1, w2t(vertex.Position), c_integrationParams.gridSpacing);
vertex.Heat = vel4.w;
vertex.HeatCurrent = gradT;
// write out interpolated positions
uint intervalCount = max(1, uint(sqrt(posDiffSqr / c_integrationParams.outputPosDiffSquared)));
intervalCount = min(intervalCount, c_lineInfo.lineLengthMax - lineLength);
// interval == 0 corresponds to the old position, interval == intervalCount to the new one
LineVertex tmpVertex = vertex;
for(uint interval = 1; interval < intervalCount; ++interval) {
float3 tmp[coeffCount];
// position:
// copy coefficients
for(uint i = 0; i < coeffCount; ++i) {
tmp[i] = outputCoeffs[i];
}
// evaluate bezier segment using de Casteljau's scheme
float t = float(interval) / float(intervalCount);
for(uint l = 1; l < coeffCount; ++l) {
for(uint i = coeffCount - 1; i >= l; --i) {
tmp[i] = (1.0f - t) * tmp[i - 1] + t * tmp[i];
}
}
tmpVertex.Position = tmp[coeffCount - 1];
tmpVertex.Time = vertex.Time - (1.0f - t) * deltaTimeThisStep;
// velocity:
for(uint i = 0; i < coeffCount - 1; ++i) {
tmp[i] = outputCoeffs[i+1] - outputCoeffs[i];
}
for(uint l = 1; l < coeffCount - 1; ++l) {
for(uint i = coeffCount - 2; i >= l; --i) {
tmp[i] = (1.0f - t) * tmp[i - 1] + t * tmp[i];
}
}
tmpVertex.Velocity = float(coeffCount - 1) * tmp[coeffCount - 2] / deltaTimeThisStep;
// re-orthogonalize normal wrt. tangent == velocity direction
float3 binormal = cross(tmpVertex.Velocity, tmpVertex.Normal);
tmpVertex.Normal = normalize(cross(binormal, tmpVertex.Velocity));
// and write out the interpolated vertex
*pVertices++ = tmpVertex;
++lineLength;
}
// re-orthogonalize normal wrt. tangent == velocity direction
float3 binormal = cross(vertex.Velocity, tmpVertex.Normal);
vertex.Normal = normalize(cross(binormal, vertex.Velocity));
// write out final step position
*pVertices++ = vertex;
++lineLength;
lastOutPos = vertex.Position;
lastOutTime = vertex.Time;
} else {
// even if we don't output anything, we still need to
// re-orthogonalize normal wrt. tangent == velocity direction
float3 binormal = cross(vertex.Velocity, vertex.Normal);
vertex.Normal = normalize(cross(binormal, vertex.Velocity));
}
// check if we left the current brick
if(!isInBrick(vertex.Position, brickBoxMin, brickBoxMax)) {
bool isOutOfDomain = c_volumeInfo.isOutsideOfDomain(vertex.Position);
if(isOutOfDomain) {
// write out final position
*pVertices++ = vertex;
++lineLength;
lastOutPos = vertex.Position;
lastOutTime = vertex.Time;
}
if (isOutOfDomain || !findBrick(c_volumeInfo, c_brickIndex, c_brickRequests, vertex.Position, brickBoxMin, brickBoxMax, world2texOffset, world2texScale)) {
// new brick isn't available (or we went out of the domain) - get outta here
// (if we're still inside the domain, the new brick has already been requested in findBrick!)
stayedInAvailableBrick = false;
break;
} else {
// semi-HACK: update velocity from new brick (can be different to previous one because of lossy compression)
// this avoids excessively small time steps at some brick boundaries
vertex.Velocity = c_volumeInfo.velocityScale * sampleVolume<filterMode, float4, float3>(g_texVolume1, w2t(vertex.Position));
}
}
}
}
c_lineInfo.pVertexCounts[lineIndex] = lineLength;
//assert(c_lineInfo.pVertexCounts[lineIndex] < lineLengthMax);
// update checkpoint for next integration round
c_lineInfo.pCheckpoints[lineIndex].Position = vertex.Position;
c_lineInfo.pCheckpoints[lineIndex].Time = vertex.Time;
c_lineInfo.pCheckpoints[lineIndex].Normal = vertex.Normal;
c_lineInfo.pCheckpoints[lineIndex].DeltaT = deltaTime;
c_lineInfo.pCheckpoints[lineIndex].StepsAccepted += stepsAccepted;
c_lineInfo.pCheckpoints[lineIndex].StepsTotal += step;
// if the line is still alive and in an available brick, request it again for next round
if(vertex.Time < c_integrationParams.timeMax &&
lineLength < c_lineInfo.lineLengthMax &&
stayedInAvailableBrick)
{
// find out which brick we're in now
uint3 brickIndex = c_volumeInfo.getBrickIndex(vertex.Position);
uint brickLinearIndex = c_volumeInfo.getBrickLinearIndex(brickIndex);
// request it to be loaded
c_brickRequests.requestBrick(brickLinearIndex);
}
}
#include "cudaUtil.h"
#include "IntegratorKernelDefines.h"
void integratorKernelStreamLinesDense(LineInfoGPU lineInfo, VolumeInfoGPU volumeInfo, BrickIndexGPU brickIndex, BrickRequestsGPU brickRequests, eAdvectMode advectMode, eTextureFilterMode filterMode)
{
uint blockSize = 128; //TODO try out different values
uint blockCount = (lineInfo.lineCount + blockSize - 1) / blockSize;
#define INTEGRATE(advect, filter)hipLaunchKernelGGL(( integrateStreamLinesDenseKernel <advect, filter>) , dim3(blockCount), dim3(blockSize), 0, 0, lineInfo, volumeInfo, brickIndex, brickRequests)
ADVECT_DENSE_SWITCH;
cudaCheckMsg("integrateStreamLinesDenseKernel execution failed");
#undef INTEGRATE
}
| 1133125c56996c8e1ff5bb654ba24e0a3f57f708.cu | #include <global.h>
#include "AdvectMode.h"
#include "BrickIndexGPU.h"
#include "BrickRequestsGPU.h"
#include "IntegrationParamsGPU.h"
#include "LineInfoGPU.h"
#include "TextureFilterMode.h"
#include "TracingCommon.h"
#include "VolumeInfoGPU.h"
#include "AdvectDense.cuh"
#include "Coords.cuh"
#include "IntegratorCommon.cuh"
#include "TextureFilter.cuh"
#include "Jacobian.cuh"
//extern __constant__ VolumeInfoGPU c_volumeInfo;
//extern __constant__ BrickIndexGPU c_brickIndex;
//extern __constant__ BrickRequestsGPU c_brickRequests;
extern __constant__ IntegrationParamsGPU c_integrationParams;
//extern __constant__ LineInfoGPU c_lineInfo;
extern texture<float4, cudaTextureType3D, cudaReadModeElementType> g_texVolume1;
template<eAdvectMode advectMode, eTextureFilterMode filterMode>
__global__ void integrateStreamLinesDenseKernel(LineInfoGPU c_lineInfo, VolumeInfoGPU c_volumeInfo, BrickIndexGPU c_brickIndex, BrickRequestsGPU c_brickRequests)
{
uint lineIndex = blockIdx.x * blockDim.x + threadIdx.x;
if(lineIndex >= c_lineInfo.lineCount)
return;
uint lineLength = c_lineInfo.pVertexCounts[lineIndex];
if(lineLength >= c_lineInfo.lineLengthMax)
return;
LineVertex vertex;
// get initial position from checkpoints array
vertex.Position = c_lineInfo.pCheckpoints[lineIndex].Position;
vertex.Time = c_lineInfo.pCheckpoints[lineIndex].Time;
vertex.SeedPosition = c_lineInfo.pCheckpoints[lineIndex].SeedPosition;
if(vertex.Time >= c_integrationParams.timeMax || c_volumeInfo.isOutsideOfDomain(vertex.Position))
return;
// find brick we're in
float3 brickBoxMin;
float3 brickBoxMax;
float3 world2texOffset;
float3 world2texScale;
if (!findBrick(c_volumeInfo, c_brickIndex, c_brickRequests, vertex.Position, brickBoxMin, brickBoxMax, world2texOffset, world2texScale)) {
return;
}
// get velocity at initial position
float4 vel4 = sampleVolume<filterMode, float4, float4>(g_texVolume1, w2t(vertex.Position));
vertex.Velocity = c_volumeInfo.velocityScale * make_float3(vel4.x, vel4.y, vel4.z);
vertex.LineID = lineIndex;
// this always points to the next vertex to be written out
LineVertex* pVertices = c_lineInfo.pVertices + lineIndex * c_lineInfo.vertexStride + lineLength;
if(lineLength == 0) {
// new line - build normal: arbitrary vector perpendicular to velocity
float3 tangent = normalize(vertex.Velocity);
vertex.Normal = cross(tangent, make_float3(1.0f, 0.0f, 0.0f));
if(length(vertex.Normal) < 0.01f) vertex.Normal = cross(tangent, make_float3(0.0f, 1.0f, 0.0f));
vertex.Normal = normalize(vertex.Normal);
vertex.Jacobian = getJacobian<filterMode>(g_texVolume1, w2t(vertex.Position), c_integrationParams.gridSpacing);
float3 gradT = sampleScalarGradient<filterMode>(g_texVolume1, w2t(vertex.Position), c_integrationParams.gridSpacing);
vertex.Heat = vel4.w;
vertex.HeatCurrent = gradT;
// write out initial vertex
*pVertices++ = vertex;
++lineLength;
} else {
// existing line - get old normal
vertex.Normal = c_lineInfo.pCheckpoints[lineIndex].Normal;
}
// get the last vertex that was written out
float3 lastOutPos = (pVertices - 1)->Position;
float lastOutTime = (pVertices - 1)->Time;
float deltaTime = c_lineInfo.pCheckpoints[lineIndex].DeltaT;
uint step = 0;
uint stepsAccepted = 0;
// dense output
const uint coeffCount = advectDenseInfo<advectMode>::OutputCoeffCount;
float3 outputCoeffs[coeffCount];
bool stayedInAvailableBrick = true;
while(step < c_integrationParams.stepCountMax &&
vertex.Time < c_integrationParams.timeMax &&
lineLength < c_lineInfo.lineLengthMax)
{
float deltaTimeBak = deltaTime;
// limit deltaTime ..
// .. so we don't integrate past timeMax
deltaTime = min(deltaTime, c_integrationParams.timeMax - vertex.Time);
// .. so we don't leave the current brick's safe region
float distMax = c_integrationParams.brickSafeMarginWorld + distanceToBrickBorder(vertex.Position, brickBoxMin, brickBoxMax);
deltaTime = min(deltaTime, distMax / c_integrationParams.velocityMaxWorld);
// integrate
float deltaTimeThisStep = deltaTime;
bool stepAccepted = advectDense<advectMode, filterMode>(
g_texVolume1,
vertex.Position, vertex.Time, vertex.Velocity,
deltaTime,
outputCoeffs,
world2texOffset, world2texScale,
c_volumeInfo.velocityScale);
++step;
if(stepAccepted) {
++stepsAccepted;
// if we artificially limited deltaTime earlier, reset it now
// (if we didn't, the new deltaTime is larger than the backup anyway)
deltaTime = fmax(deltaTime, deltaTimeBak);
float3 posDiff = vertex.Position - lastOutPos;
float timeDiff = vertex.Time - lastOutTime;
float posDiffSqr = dot(posDiff, posDiff);
if((posDiffSqr >= c_integrationParams.outputPosDiffSquared) || (timeDiff >= c_integrationParams.outputTimeDiff)) {
//get jacobian and heat for measures
vertex.Jacobian = getJacobian<filterMode>(g_texVolume1, w2t(vertex.Position), c_integrationParams.gridSpacing);
vel4 = sampleVolume<filterMode, float4, float4>(g_texVolume1, w2t(vertex.Position));
float3 gradT = sampleScalarGradient<filterMode>(g_texVolume1, w2t(vertex.Position), c_integrationParams.gridSpacing);
vertex.Heat = vel4.w;
vertex.HeatCurrent = gradT;
// write out interpolated positions
uint intervalCount = max(1, uint(sqrt(posDiffSqr / c_integrationParams.outputPosDiffSquared)));
intervalCount = min(intervalCount, c_lineInfo.lineLengthMax - lineLength);
// interval == 0 corresponds to the old position, interval == intervalCount to the new one
LineVertex tmpVertex = vertex;
for(uint interval = 1; interval < intervalCount; ++interval) {
float3 tmp[coeffCount];
// position:
// copy coefficients
for(uint i = 0; i < coeffCount; ++i) {
tmp[i] = outputCoeffs[i];
}
// evaluate bezier segment using de Casteljau's scheme
float t = float(interval) / float(intervalCount);
for(uint l = 1; l < coeffCount; ++l) {
for(uint i = coeffCount - 1; i >= l; --i) {
tmp[i] = (1.0f - t) * tmp[i - 1] + t * tmp[i];
}
}
tmpVertex.Position = tmp[coeffCount - 1];
tmpVertex.Time = vertex.Time - (1.0f - t) * deltaTimeThisStep;
// velocity:
for(uint i = 0; i < coeffCount - 1; ++i) {
tmp[i] = outputCoeffs[i+1] - outputCoeffs[i];
}
for(uint l = 1; l < coeffCount - 1; ++l) {
for(uint i = coeffCount - 2; i >= l; --i) {
tmp[i] = (1.0f - t) * tmp[i - 1] + t * tmp[i];
}
}
tmpVertex.Velocity = float(coeffCount - 1) * tmp[coeffCount - 2] / deltaTimeThisStep;
// re-orthogonalize normal wrt. tangent == velocity direction
float3 binormal = cross(tmpVertex.Velocity, tmpVertex.Normal);
tmpVertex.Normal = normalize(cross(binormal, tmpVertex.Velocity));
// and write out the interpolated vertex
*pVertices++ = tmpVertex;
++lineLength;
}
// re-orthogonalize normal wrt. tangent == velocity direction
float3 binormal = cross(vertex.Velocity, tmpVertex.Normal);
vertex.Normal = normalize(cross(binormal, vertex.Velocity));
// write out final step position
*pVertices++ = vertex;
++lineLength;
lastOutPos = vertex.Position;
lastOutTime = vertex.Time;
} else {
// even if we don't output anything, we still need to
// re-orthogonalize normal wrt. tangent == velocity direction
float3 binormal = cross(vertex.Velocity, vertex.Normal);
vertex.Normal = normalize(cross(binormal, vertex.Velocity));
}
// check if we left the current brick
if(!isInBrick(vertex.Position, brickBoxMin, brickBoxMax)) {
bool isOutOfDomain = c_volumeInfo.isOutsideOfDomain(vertex.Position);
if(isOutOfDomain) {
// write out final position
*pVertices++ = vertex;
++lineLength;
lastOutPos = vertex.Position;
lastOutTime = vertex.Time;
}
if (isOutOfDomain || !findBrick(c_volumeInfo, c_brickIndex, c_brickRequests, vertex.Position, brickBoxMin, brickBoxMax, world2texOffset, world2texScale)) {
// new brick isn't available (or we went out of the domain) - get outta here
// (if we're still inside the domain, the new brick has already been requested in findBrick!)
stayedInAvailableBrick = false;
break;
} else {
// semi-HACK: update velocity from new brick (can be different to previous one because of lossy compression)
// this avoids excessively small time steps at some brick boundaries
vertex.Velocity = c_volumeInfo.velocityScale * sampleVolume<filterMode, float4, float3>(g_texVolume1, w2t(vertex.Position));
}
}
}
}
c_lineInfo.pVertexCounts[lineIndex] = lineLength;
//assert(c_lineInfo.pVertexCounts[lineIndex] < lineLengthMax);
// update checkpoint for next integration round
c_lineInfo.pCheckpoints[lineIndex].Position = vertex.Position;
c_lineInfo.pCheckpoints[lineIndex].Time = vertex.Time;
c_lineInfo.pCheckpoints[lineIndex].Normal = vertex.Normal;
c_lineInfo.pCheckpoints[lineIndex].DeltaT = deltaTime;
c_lineInfo.pCheckpoints[lineIndex].StepsAccepted += stepsAccepted;
c_lineInfo.pCheckpoints[lineIndex].StepsTotal += step;
// if the line is still alive and in an available brick, request it again for next round
if(vertex.Time < c_integrationParams.timeMax &&
lineLength < c_lineInfo.lineLengthMax &&
stayedInAvailableBrick)
{
// find out which brick we're in now
uint3 brickIndex = c_volumeInfo.getBrickIndex(vertex.Position);
uint brickLinearIndex = c_volumeInfo.getBrickLinearIndex(brickIndex);
// request it to be loaded
c_brickRequests.requestBrick(brickLinearIndex);
}
}
#include "cudaUtil.h"
#include "IntegratorKernelDefines.h"
void integratorKernelStreamLinesDense(LineInfoGPU lineInfo, VolumeInfoGPU volumeInfo, BrickIndexGPU brickIndex, BrickRequestsGPU brickRequests, eAdvectMode advectMode, eTextureFilterMode filterMode)
{
uint blockSize = 128; //TODO try out different values
uint blockCount = (lineInfo.lineCount + blockSize - 1) / blockSize;
#define INTEGRATE(advect, filter) integrateStreamLinesDenseKernel <advect, filter> <<<blockCount, blockSize>>> (lineInfo, volumeInfo, brickIndex, brickRequests)
ADVECT_DENSE_SWITCH;
cudaCheckMsg("integrateStreamLinesDenseKernel execution failed");
#undef INTEGRATE
}
|
0bf48e3f079ceb539ebe1cc46c68e06982370d82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "prroi_pooling_gpu_impl.cuh"
#include <cstdio>
#include <cfloat>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
#define CUDA_POST_KERNEL_CHECK \
do { \
hipError_t err = hipGetLastError(); \
if (hipSuccess != err) { \
fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); \
exit(-1); \
} \
} while(0)
#define CUDA_NUM_THREADS 512
namespace {
static int CUDA_NUM_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
__device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width)
{
bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);
float retVal = overflow ? 0.0f : data[h * width + w];
return retVal;
}
__device__ static float PrRoIPoolingGetCoeff(float dh, float dw){
dw = dw > 0 ? dw : -dw;
dh = dh > 0 ? dh : -dh;
return (1.0f - dh) * (1.0f - dw);
}
__device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) {
return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1;
}
__device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){
float retVal = 0.0f;
int h1 = floorf(h);
int w1 = floorf(w);
retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));
h1 = floorf(h)+1;
w1 = floorf(w);
retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));
h1 = floorf(h);
w1 = floorf(w)+1;
retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));
h1 = floorf(h)+1;
w1 = floorf(w)+1;
retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));
return retVal;
}
__device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w,
const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)
{
float alpha, beta, lim_alpha, lim_beta, tmp;
float sum_out = 0;
alpha = x0 - float(s_w);
beta = y0 - float(s_h);
lim_alpha = x1 - float(s_w);
lim_beta = y1 - float(s_h);
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp;
alpha = float(e_w) - x1;
lim_alpha = float(e_w) - x0;
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp;
alpha = x0 - float(s_w);
beta = float(e_h) - y1;
lim_alpha = x1 - float(s_w);
lim_beta = float(e_h) - y0;
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp;
alpha = float(e_w) - x1;
lim_alpha = float(e_w) - x0;
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp;
return sum_out;
}
__device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff)
{
bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);
if (!overflow)
atomicAdd(diff + h * width + w, top_diff * coeff);
}
__device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w,
const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)
{
float alpha, beta, lim_alpha, lim_beta, tmp;
alpha = x0 - float(s_w);
beta = y0 - float(s_h);
lim_alpha = x1 - float(s_w);
lim_beta = y1 - float(s_h);
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp);
alpha = float(e_w) - x1;
lim_alpha = float(e_w) - x0;
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp);
alpha = x0 - float(s_w);
beta = float(e_h) - y1;
lim_alpha = x1 - float(s_w);
lim_beta = float(e_h) - y0;
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp);
alpha = float(e_w) - x1;
lim_alpha = float(e_w) - x0;
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp);
}
__global__ void PrRoIPoolingForward(
const int nthreads,
F_DEVPTR_IN bottom_data,
F_DEVPTR_IN bottom_rois,
F_DEVPTR_OUT top_data,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const float spatial_scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
float roi_start_w = bottom_rois[1] * spatial_scale;
float roi_start_h = bottom_rois[2] * spatial_scale;
float roi_end_w = bottom_rois[3] * spatial_scale;
float roi_end_h = bottom_rois[4] * spatial_scale;
float roi_width = max(roi_end_w - roi_start_w, ((float)0.0));
float roi_height = max(roi_end_h - roi_start_h, ((float)0.0));
float bin_size_h = roi_height / static_cast<float>(pooled_height);
float bin_size_w = roi_width / static_cast<float>(pooled_width);
const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
float *this_out = top_data + index;
float win_start_w = roi_start_w + bin_size_w * pw;
float win_start_h = roi_start_h + bin_size_h * ph;
float win_end_w = win_start_w + bin_size_w;
float win_end_h = win_start_h + bin_size_h;
float win_size = max(float(0.0), bin_size_w * bin_size_h);
if (win_size == 0) {
*this_out = 0;
return;
}
float sum_out = 0;
int s_w, s_h, e_w, e_h;
s_w = floorf(win_start_w);
e_w = ceilf(win_end_w);
s_h = floorf(win_start_h);
e_h = ceilf(win_end_h);
for (int w_iter = s_w; w_iter < e_w; ++w_iter)
for (int h_iter = s_h; h_iter < e_h; ++h_iter)
sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1,
max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),
min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),
height, width);
*this_out = sum_out / win_size;
}
}
__global__ void PrRoIPoolingBackward(
const int nthreads,
F_DEVPTR_IN bottom_rois,
F_DEVPTR_IN top_diff,
F_DEVPTR_OUT bottom_diff,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const float spatial_scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
float roi_start_w = bottom_rois[1] * spatial_scale;
float roi_start_h = bottom_rois[2] * spatial_scale;
float roi_end_w = bottom_rois[3] * spatial_scale;
float roi_end_h = bottom_rois[4] * spatial_scale;
float roi_width = max(roi_end_w - roi_start_w, (float)0);
float roi_height = max(roi_end_h - roi_start_h, (float)0);
float bin_size_h = roi_height / static_cast<float>(pooled_height);
float bin_size_w = roi_width / static_cast<float>(pooled_width);
const float *this_out_grad = top_diff + index;
float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width;
float win_start_w = roi_start_w + bin_size_w * pw;
float win_start_h = roi_start_h + bin_size_h * ph;
float win_end_w = win_start_w + bin_size_w;
float win_end_h = win_start_h + bin_size_h;
float win_size = max(float(0.0), bin_size_w * bin_size_h);
float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;
int s_w, s_h, e_w, e_h;
s_w = floorf(win_start_w);
e_w = ceilf(win_end_w);
s_h = floorf(win_start_h);
e_h = ceilf(win_end_h);
for (int w_iter = s_w; w_iter < e_w; ++w_iter)
for (int h_iter = s_h; h_iter < e_h; ++h_iter)
PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1,
max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),
min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),
height, width);
}
}
__global__ void PrRoIPoolingCoorBackward(
const int nthreads,
F_DEVPTR_IN bottom_data,
F_DEVPTR_IN bottom_rois,
F_DEVPTR_IN top_data,
F_DEVPTR_IN top_diff,
F_DEVPTR_OUT bottom_diff,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const float spatial_scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
float roi_start_w = bottom_rois[1] * spatial_scale;
float roi_start_h = bottom_rois[2] * spatial_scale;
float roi_end_w = bottom_rois[3] * spatial_scale;
float roi_end_h = bottom_rois[4] * spatial_scale;
float roi_width = max(roi_end_w - roi_start_w, (float)0);
float roi_height = max(roi_end_h - roi_start_h, (float)0);
float bin_size_h = roi_height / static_cast<float>(pooled_height);
float bin_size_w = roi_width / static_cast<float>(pooled_width);
const float *this_out_grad = top_diff + index;
const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
const float *this_top_data = top_data + index;
float *this_data_grad = bottom_diff + n * 5;
float win_start_w = roi_start_w + bin_size_w * pw;
float win_start_h = roi_start_h + bin_size_h * ph;
float win_end_w = win_start_w + bin_size_w;
float win_end_h = win_start_h + bin_size_h;
float win_size = max(float(0.0), bin_size_w * bin_size_h);
float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;
// WARNING: to be discussed
if (sum_out == 0)
return;
int s_w, s_h, e_w, e_h;
s_w = floorf(win_start_w);
e_w = ceilf(win_end_w);
s_h = floorf(win_start_h);
e_h = ceilf(win_end_h);
float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0;
for (int h_iter = s_h; h_iter < e_h; ++h_iter) {
g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,
min(win_end_h, float(h_iter + 1)) - h_iter,
PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width),
PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width));
g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,
min(win_end_h, float(h_iter + 1)) - h_iter,
PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width),
PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width));
}
for (int w_iter = s_w; w_iter < e_w; ++w_iter) {
g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,
min(win_end_w, float(w_iter + 1)) - w_iter,
PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width),
PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width));
g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,
min(win_end_w, float(w_iter + 1)) - w_iter,
PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width),
PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width));
}
float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data);
float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data);
float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data);
float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data);
partial_x1 = partial_x1 / win_size * spatial_scale;
partial_x2 = partial_x2 / win_size * spatial_scale;
partial_y1 = partial_y1 / win_size * spatial_scale;
partial_y2 = partial_y2 / win_size * spatial_scale;
// (b, x1, y1, x2, y2)
this_data_grad[0] = 0;
atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width))
* (*this_out_grad));
atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height))
* (*this_out_grad));
atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width)
* (*this_out_grad));
atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height)
* (*this_out_grad));
}
}
} /* !anonymous namespace */
#ifdef __cplusplus
extern "C" {
#endif
void PrRoIPoolingForwardGpu(
hipStream_t stream,
F_DEVPTR_IN bottom_data,
F_DEVPTR_IN bottom_rois,
F_DEVPTR_OUT top_data,
const int channels_, const int height_, const int width_,
const int pooled_height_, const int pooled_width_,
const float spatial_scale_,
const int top_count) {
hipLaunchKernelGGL(( PrRoIPoolingForward), dim3(CUDA_NUM_BLOCKS(top_count)), dim3(CUDA_NUM_THREADS), 0, stream,
top_count, bottom_data, bottom_rois, top_data,
channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);
CUDA_POST_KERNEL_CHECK;
}
void PrRoIPoolingBackwardGpu(
hipStream_t stream,
F_DEVPTR_IN bottom_data,
F_DEVPTR_IN bottom_rois,
F_DEVPTR_IN top_data,
F_DEVPTR_IN top_diff,
F_DEVPTR_OUT bottom_diff,
const int channels_, const int height_, const int width_,
const int pooled_height_, const int pooled_width_,
const float spatial_scale_,
const int top_count, const int bottom_count) {
hipMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);
hipLaunchKernelGGL(( PrRoIPoolingBackward), dim3(CUDA_NUM_BLOCKS(top_count)), dim3(CUDA_NUM_THREADS), 0, stream,
top_count, bottom_rois, top_diff, bottom_diff,
channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);
CUDA_POST_KERNEL_CHECK;
}
void PrRoIPoolingCoorBackwardGpu(
hipStream_t stream,
F_DEVPTR_IN bottom_data,
F_DEVPTR_IN bottom_rois,
F_DEVPTR_IN top_data,
F_DEVPTR_IN top_diff,
F_DEVPTR_OUT bottom_diff,
const int channels_, const int height_, const int width_,
const int pooled_height_, const int pooled_width_,
const float spatial_scale_,
const int top_count, const int bottom_count) {
hipMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);
hipLaunchKernelGGL(( PrRoIPoolingCoorBackward), dim3(CUDA_NUM_BLOCKS(top_count)), dim3(CUDA_NUM_THREADS), 0, stream,
top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff,
channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);
CUDA_POST_KERNEL_CHECK;
}
} /* !extern "C" */ | 0bf48e3f079ceb539ebe1cc46c68e06982370d82.cu | #include "prroi_pooling_gpu_impl.cuh"
#include <cstdio>
#include <cfloat>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
#define CUDA_POST_KERNEL_CHECK \
do { \
cudaError_t err = cudaGetLastError(); \
if (cudaSuccess != err) { \
fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); \
exit(-1); \
} \
} while(0)
#define CUDA_NUM_THREADS 512
namespace {
static int CUDA_NUM_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
__device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width)
{
bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);
float retVal = overflow ? 0.0f : data[h * width + w];
return retVal;
}
__device__ static float PrRoIPoolingGetCoeff(float dh, float dw){
dw = dw > 0 ? dw : -dw;
dh = dh > 0 ? dh : -dh;
return (1.0f - dh) * (1.0f - dw);
}
__device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) {
return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1;
}
__device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){
float retVal = 0.0f;
int h1 = floorf(h);
int w1 = floorf(w);
retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));
h1 = floorf(h)+1;
w1 = floorf(w);
retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));
h1 = floorf(h);
w1 = floorf(w)+1;
retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));
h1 = floorf(h)+1;
w1 = floorf(w)+1;
retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));
return retVal;
}
__device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w,
const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)
{
float alpha, beta, lim_alpha, lim_beta, tmp;
float sum_out = 0;
alpha = x0 - float(s_w);
beta = y0 - float(s_h);
lim_alpha = x1 - float(s_w);
lim_beta = y1 - float(s_h);
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp;
alpha = float(e_w) - x1;
lim_alpha = float(e_w) - x0;
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp;
alpha = x0 - float(s_w);
beta = float(e_h) - y1;
lim_alpha = x1 - float(s_w);
lim_beta = float(e_h) - y0;
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp;
alpha = float(e_w) - x1;
lim_alpha = float(e_w) - x0;
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp;
return sum_out;
}
__device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff)
{
bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);
if (!overflow)
atomicAdd(diff + h * width + w, top_diff * coeff);
}
__device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w,
const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)
{
float alpha, beta, lim_alpha, lim_beta, tmp;
alpha = x0 - float(s_w);
beta = y0 - float(s_h);
lim_alpha = x1 - float(s_w);
lim_beta = y1 - float(s_h);
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp);
alpha = float(e_w) - x1;
lim_alpha = float(e_w) - x0;
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp);
alpha = x0 - float(s_w);
beta = float(e_h) - y1;
lim_alpha = x1 - float(s_w);
lim_beta = float(e_h) - y0;
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp);
alpha = float(e_w) - x1;
lim_alpha = float(e_w) - x0;
tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)
* (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);
PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp);
}
__global__ void PrRoIPoolingForward(
const int nthreads,
F_DEVPTR_IN bottom_data,
F_DEVPTR_IN bottom_rois,
F_DEVPTR_OUT top_data,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const float spatial_scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
float roi_start_w = bottom_rois[1] * spatial_scale;
float roi_start_h = bottom_rois[2] * spatial_scale;
float roi_end_w = bottom_rois[3] * spatial_scale;
float roi_end_h = bottom_rois[4] * spatial_scale;
float roi_width = max(roi_end_w - roi_start_w, ((float)0.0));
float roi_height = max(roi_end_h - roi_start_h, ((float)0.0));
float bin_size_h = roi_height / static_cast<float>(pooled_height);
float bin_size_w = roi_width / static_cast<float>(pooled_width);
const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
float *this_out = top_data + index;
float win_start_w = roi_start_w + bin_size_w * pw;
float win_start_h = roi_start_h + bin_size_h * ph;
float win_end_w = win_start_w + bin_size_w;
float win_end_h = win_start_h + bin_size_h;
float win_size = max(float(0.0), bin_size_w * bin_size_h);
if (win_size == 0) {
*this_out = 0;
return;
}
float sum_out = 0;
int s_w, s_h, e_w, e_h;
s_w = floorf(win_start_w);
e_w = ceilf(win_end_w);
s_h = floorf(win_start_h);
e_h = ceilf(win_end_h);
for (int w_iter = s_w; w_iter < e_w; ++w_iter)
for (int h_iter = s_h; h_iter < e_h; ++h_iter)
sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1,
max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),
min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),
height, width);
*this_out = sum_out / win_size;
}
}
__global__ void PrRoIPoolingBackward(
const int nthreads,
F_DEVPTR_IN bottom_rois,
F_DEVPTR_IN top_diff,
F_DEVPTR_OUT bottom_diff,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const float spatial_scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
float roi_start_w = bottom_rois[1] * spatial_scale;
float roi_start_h = bottom_rois[2] * spatial_scale;
float roi_end_w = bottom_rois[3] * spatial_scale;
float roi_end_h = bottom_rois[4] * spatial_scale;
float roi_width = max(roi_end_w - roi_start_w, (float)0);
float roi_height = max(roi_end_h - roi_start_h, (float)0);
float bin_size_h = roi_height / static_cast<float>(pooled_height);
float bin_size_w = roi_width / static_cast<float>(pooled_width);
const float *this_out_grad = top_diff + index;
float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width;
float win_start_w = roi_start_w + bin_size_w * pw;
float win_start_h = roi_start_h + bin_size_h * ph;
float win_end_w = win_start_w + bin_size_w;
float win_end_h = win_start_h + bin_size_h;
float win_size = max(float(0.0), bin_size_w * bin_size_h);
float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;
int s_w, s_h, e_w, e_h;
s_w = floorf(win_start_w);
e_w = ceilf(win_end_w);
s_h = floorf(win_start_h);
e_h = ceilf(win_end_h);
for (int w_iter = s_w; w_iter < e_w; ++w_iter)
for (int h_iter = s_h; h_iter < e_h; ++h_iter)
PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1,
max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),
min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),
height, width);
}
}
__global__ void PrRoIPoolingCoorBackward(
const int nthreads,
F_DEVPTR_IN bottom_data,
F_DEVPTR_IN bottom_rois,
F_DEVPTR_IN top_data,
F_DEVPTR_IN top_diff,
F_DEVPTR_OUT bottom_diff,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const float spatial_scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
float roi_start_w = bottom_rois[1] * spatial_scale;
float roi_start_h = bottom_rois[2] * spatial_scale;
float roi_end_w = bottom_rois[3] * spatial_scale;
float roi_end_h = bottom_rois[4] * spatial_scale;
float roi_width = max(roi_end_w - roi_start_w, (float)0);
float roi_height = max(roi_end_h - roi_start_h, (float)0);
float bin_size_h = roi_height / static_cast<float>(pooled_height);
float bin_size_w = roi_width / static_cast<float>(pooled_width);
const float *this_out_grad = top_diff + index;
const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
const float *this_top_data = top_data + index;
float *this_data_grad = bottom_diff + n * 5;
float win_start_w = roi_start_w + bin_size_w * pw;
float win_start_h = roi_start_h + bin_size_h * ph;
float win_end_w = win_start_w + bin_size_w;
float win_end_h = win_start_h + bin_size_h;
float win_size = max(float(0.0), bin_size_w * bin_size_h);
float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;
// WARNING: to be discussed
if (sum_out == 0)
return;
int s_w, s_h, e_w, e_h;
s_w = floorf(win_start_w);
e_w = ceilf(win_end_w);
s_h = floorf(win_start_h);
e_h = ceilf(win_end_h);
float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0;
for (int h_iter = s_h; h_iter < e_h; ++h_iter) {
g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,
min(win_end_h, float(h_iter + 1)) - h_iter,
PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width),
PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width));
g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,
min(win_end_h, float(h_iter + 1)) - h_iter,
PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width),
PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width));
}
for (int w_iter = s_w; w_iter < e_w; ++w_iter) {
g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,
min(win_end_w, float(w_iter + 1)) - w_iter,
PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width),
PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width));
g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,
min(win_end_w, float(w_iter + 1)) - w_iter,
PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width),
PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width));
}
float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data);
float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data);
float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data);
float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data);
partial_x1 = partial_x1 / win_size * spatial_scale;
partial_x2 = partial_x2 / win_size * spatial_scale;
partial_y1 = partial_y1 / win_size * spatial_scale;
partial_y2 = partial_y2 / win_size * spatial_scale;
// (b, x1, y1, x2, y2)
this_data_grad[0] = 0;
atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width))
* (*this_out_grad));
atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height))
* (*this_out_grad));
atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width)
* (*this_out_grad));
atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height)
* (*this_out_grad));
}
}
} /* !anonymous namespace */
#ifdef __cplusplus
extern "C" {
#endif
void PrRoIPoolingForwardGpu(
cudaStream_t stream,
F_DEVPTR_IN bottom_data,
F_DEVPTR_IN bottom_rois,
F_DEVPTR_OUT top_data,
const int channels_, const int height_, const int width_,
const int pooled_height_, const int pooled_width_,
const float spatial_scale_,
const int top_count) {
PrRoIPoolingForward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(
top_count, bottom_data, bottom_rois, top_data,
channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);
CUDA_POST_KERNEL_CHECK;
}
void PrRoIPoolingBackwardGpu(
cudaStream_t stream,
F_DEVPTR_IN bottom_data,
F_DEVPTR_IN bottom_rois,
F_DEVPTR_IN top_data,
F_DEVPTR_IN top_diff,
F_DEVPTR_OUT bottom_diff,
const int channels_, const int height_, const int width_,
const int pooled_height_, const int pooled_width_,
const float spatial_scale_,
const int top_count, const int bottom_count) {
cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);
PrRoIPoolingBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(
top_count, bottom_rois, top_diff, bottom_diff,
channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);
CUDA_POST_KERNEL_CHECK;
}
void PrRoIPoolingCoorBackwardGpu(
cudaStream_t stream,
F_DEVPTR_IN bottom_data,
F_DEVPTR_IN bottom_rois,
F_DEVPTR_IN top_data,
F_DEVPTR_IN top_diff,
F_DEVPTR_OUT bottom_diff,
const int channels_, const int height_, const int width_,
const int pooled_height_, const int pooled_width_,
const float spatial_scale_,
const int top_count, const int bottom_count) {
cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);
PrRoIPoolingCoorBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(
top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff,
channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);
CUDA_POST_KERNEL_CHECK;
}
} /* !extern "C" */ |
e402bd3ecb90bde52f21614bb677eb7a41d3954c.hip | // !!! This is a file automatically generated by hipify!!!
//Vector Addition using CUDA.
//Winter 2020
//High Performance Computing.
#include <string> //For stoi.
#include <iostream> //For stdout.
#include <cstdlib> //For random number generator.
#include <chrono> //For getting time.
#include <climits> //For maximum n.
#include <cmath>
#include "hip/hip_runtime.h" //For Windows support.
#include "device_launch_parameters.h"
//The type that is used for the calculations.
typedef int type;
//Define constants for min/max.
#define RANDOMIZE_MIN -10
#define RANDOMIZE_MAX 10
//Cuda calculator which will run in each thread.
__global__ void cuda_calculator(type* a, type* b, type* c, int num_calcs)
{
//Calculate the starting index.
int start_index = (threadIdx.x + blockIdx.x * blockDim.x) * num_calcs;
int end_index = start_index + num_calcs;
//Capture the start index as current.
int curr_index = start_index;
//Unroll the loop 5 times if necessary.
while(curr_index < end_index - 5)
{
//Calculate the increased indexes to avoid calculating it 3 times.
int idx1 = curr_index + 1, idx2 = curr_index + 2,
idx3 = curr_index + 3, idx4 = curr_index + 4;
//Perform the additions.
c[curr_index] = a[curr_index] + b[curr_index];
c[idx1] = a[idx1] + b[idx1];
c[idx2] = a[idx2] + b[idx2];
c[idx3] = a[idx3] + b[idx3];
c[idx4] = a[idx4] + b[idx4];
curr_index += 5; //Increase the index by five.
}
//After the unrolled is over, run a regular loop, to finish it.
while(curr_index < end_index)
{
c[curr_index] = a[curr_index] + b[curr_index];
curr_index++;
}
}
//Cuda addition which runs the cuda program.
int cuda_addition(type* a, type* b, type* c, int n, int blocks,
int threads, double times[3])
{
//Create pointers for the GPU memory allocation
type* cu_vec_a;
type* cu_vec_b;
type* cu_vec_c;
//Calculate the number of elements that each kernel will handle (round up).
int num_calcs = ::ceil((double) n / (((double) blocks) * ((double) threads)));
//Calculate the padding (for output matrix to avoid conditionals in kernel.
int padding_size = (int)(num_calcs * blocks * threads) - n ;
//Allocate memory on the device for the arrays.
hipMalloc((void**) &cu_vec_a, sizeof(type) * (n + padding_size));
hipMalloc((void**) &cu_vec_b, sizeof(type) * (n + padding_size));
hipMalloc((void**) &cu_vec_c, sizeof(type) * (n + padding_size));
//Wait for the thread to finish execution.
hipDeviceSynchronize();
//Capture the beginning time before the data transfer (from host).
auto begin_transfer_to = std::chrono::high_resolution_clock::now();
//Copy the data, and the size from the main memory to VRAM.
hipMemcpy(cu_vec_a, a, ((int) sizeof(type)) * n, hipMemcpyHostToDevice);
hipMemcpy(cu_vec_b, b, ((int) sizeof(type)) * n, hipMemcpyHostToDevice);
//Wait for the thread to finish execution.
hipDeviceSynchronize();
//Calculate the total time in seconds that it took to transfer data to the device
auto total_transfer_to = std::chrono::high_resolution_clock::now() - begin_transfer_to;
times[0] = std::chrono::duration<double> (total_transfer_to).count();
//Capture the beginning time before the calculations.
auto begin_calcs_only = std::chrono::high_resolution_clock::now();
//Launch the addition kernel on the device.
hipLaunchKernelGGL(( cuda_calculator), dim3(blocks), dim3(threads), 0, 0, cu_vec_a, cu_vec_b, cu_vec_c, num_calcs);
//Check if we got any errors.
if(hipGetLastError() != hipSuccess)
return EXIT_FAILURE;
//Wait for the thread to finish execution.
hipDeviceSynchronize();
//Calculate the total time in seconds that it took to calculate.
auto total_calcs_only = std::chrono::high_resolution_clock::now() - begin_calcs_only;
times[1] = std::chrono::duration<double> (total_calcs_only).count();
//Capture the beginning time before the calculations.
auto begin_transfer_from = std::chrono::high_resolution_clock::now();
//Copy the results back from Vram to main ram.
hipMemcpy(c, cu_vec_c, ((int) sizeof(type)) * n, hipMemcpyDeviceToHost);
//Wait for the thread to finish execution.
hipDeviceSynchronize();
//Calculate the total time in seconds that it took to transfer back to host.
auto total_transfer_from = std::chrono::high_resolution_clock::now() - begin_transfer_from;
times[2] = std::chrono::duration<double> (total_transfer_from).count();
//Deallocate memory in the GPU.
hipFree(cu_vec_a);
hipFree(cu_vec_b);
hipFree(cu_vec_c);
//Wait for the thread to finish execution.
hipDeviceSynchronize();
return EXIT_SUCCESS;
}
//Sequential addition function.
double seq_addition(type* a, type* b, type* c, int size)
{
//Capture the beginning time before the calculations.
auto begin = std::chrono::high_resolution_clock::now();
//Iterate over the vectors and add the elements.
for(int i = 0; i < size; i++)
c[i] = a[i] + b[i];
//Calculate and return the total time in seconds that it took to compute.
auto total = std::chrono::high_resolution_clock::now() - begin;
return std::chrono::duration<double> (total).count();;
}
//Sequential subtraction function (used for residual matrix).
void seq_subtraction(type* a, type* b, type* c, int size)
{
//Iterate over the vectors and subtract the elements.
for(int i = 0; i < size; i++)
c[i] = a[i] - b[i];
}
//Returns false if first and second aren't equal, true otherwise.
bool are_equal(type* first, type* second, int size)
{
//Iterate over and return false if not equal.
for(int i = 0; i < size; i++)
if(first[i] != second[i])
return false;
//If we get here, they were equal.
return true;
}
//A function which randomizes the vector, by defualt it only uses values between -10 - 10
void randomize(type* vec, int size, int min = RANDOMIZE_MIN, int max = RANDOMIZE_MAX)
{
//Perform this to ensure the random number generation is truly random.
std::srand(std::chrono::system_clock::now().time_since_epoch().count());
//Iterate through, and generate random numbers for each index.
for(int i = 0; i < size; i++)
vec[i] = ((type) std::rand() %
(type) (RANDOMIZE_MAX * 2) + (type) RANDOMIZE_MIN) % RANDOMIZE_MAX ;
}
//Print the given vector to stdout.
void dump(type* vec, int size)
{
//Iterate through, and generate random numbers for each index.
for(int i = 0; i < size - 1; i++)
std::cout << std::scientific << vec[i] << " | " ;
//Print the last item in a different format and add a new line.
std::cout << std::scientific << vec[size - 1] << std::endl;
}
//A function which will be called when there is an error.
int error(std::string msg)
{
//Print the error message.
std::cout << "Error: " << msg << std::endl;
//Print the usage message.
std::cout << std::endl << "Usage Guide:" << std::endl
<< "\t* ./a.out <Size of Vectors> <Number of Blocks> <Number of Threads>"
<< " <Output Mode>" << std::endl << "\t* Output mode is either \'q\' "
<< "(quiet) or \'v\' (verbose)" << std::endl
<< "\t* Number of blocks and threads are for the GPU." << std::endl;
//Return exit failure for passing it back to the terminal.
return EXIT_FAILURE;
}
//Main method which parses the arguments, and runs the program.
int main(int argc, char** argv)
{
//Define values for parameters.
int n, blocks, threads;
bool verbose;
//Check for invalid number of args.
if(argc != 5)
return error("Invalid number of arguments.");
//Parse the arguments.
try
{
n = std::stoi(argv[1]);
blocks = std::stoi(argv[2]);
threads = std::stoi(argv[3]);
}
catch(...) //If we get here, there was an error in the arguments.
{
return error("Invalid arguments, could not parse.");
}
//Check the print mode.
if(std::string(argv[4]) == "q" || std::string(argv[4]) == "v")
//If the mode is valid and set to v, set verbose to true, false otherwise.
verbose = (std::string(argv[4]) == "v" ? true : false);
else
//If we get here an invalid mode was passed.
return error("Invalid print mode.");
//Check for invalid threads / blocks / n sizes.
if(n < 1 || blocks < 1 || threads < 1)
return error("Invalid arguments. All parameters should be positive.");
//Check if we're gonna get overflow.
if(n > INT_MAX)
return error("Integer Overflow, please reduce N.");
//Allocate memory for the input vectors.
type* vec_a = new type[n];
type* vec_b = new type[n];
//Randomize the input vectors.
randomize(vec_a, n);
randomize(vec_b, n);
//Allocate output matrices for the sequential and cuda executions.
type* vec_c_seq = new type[n];
type* vec_c_cuda = new type[n];
//Perform the sequential addition.
double seq_time = seq_addition(vec_a, vec_b, vec_c_seq, n);
//Perform the cuda addition, and capture the timings.
double times[3];
int stat = cuda_addition(vec_a, vec_b, vec_c_cuda, n, blocks, threads, times);
//Check the status.
if(stat == EXIT_FAILURE)
return error("Failed to execute kernel.");
//Check if the cuda and sequential results are not equal (error).
if(!are_equal(vec_c_seq, vec_c_cuda, n))
{
std::cout << "Error: Output vectors were not equal." << std::endl
<< "ErrorInfo: N=" << n << " Blocks=" << blocks
<< " Threads=" << threads << std::endl;
}
//Print the timing results, and the input arguments.
std::cout << "[Cuda_Transfer_To_Device_Seconds]=" << std::scientific << times[0]
<< " [Cuda_Transfer_To_Host_Seconds]=" << std::scientific << times[2]
<< " [Cuda_Calculation_Time_Seconds]=" << std::scientific << times[1]
<< " [Sequential_Time_Seconds]=" << std::scientific << seq_time
<< " [N]=" << n << " [Blocks]=" << blocks
<< " [Threads]=" << threads
<< std::endl;
//Allocate memory for residual vector.
type* residual = new type[n];
//Check if we're in verbose output mode.
if(verbose)
{
//Calculate residual vector for sequential implementation vs cuda.
seq_subtraction(vec_c_seq, vec_c_cuda, residual, n);
//Print out the inputs, calculations and residual vector.
std::cout << std::endl << "Printing out the First Vector:" << std::endl;
dump(vec_a, n);
std::cout << "\nPrinting out the Second Vector:" << std::endl;
dump(vec_b, n);
std::cout << "\nPrinting out the Addition results (Sequential):" << std::endl;
dump(vec_c_seq, n);
std::cout << "\nPrinting out the Addition results (Cuda):" << std::endl;
dump(vec_c_cuda, n);
std::cout << "\nPrinting out the residual matrix (Seq - Cuda):" << std::endl;
dump(residual, n);
}
//Deallocate the memory in the heap.
delete[] vec_a, vec_b, vec_c_seq, vec_c_cuda, residual;
return EXIT_SUCCESS;
}
| e402bd3ecb90bde52f21614bb677eb7a41d3954c.cu | //Vector Addition using CUDA.
//Winter 2020
//High Performance Computing.
#include <string> //For stoi.
#include <iostream> //For stdout.
#include <cstdlib> //For random number generator.
#include <chrono> //For getting time.
#include <climits> //For maximum n.
#include <cmath>
#include "cuda_runtime.h" //For Windows support.
#include "device_launch_parameters.h"
//The type that is used for the calculations.
typedef int type;
//Define constants for min/max.
#define RANDOMIZE_MIN -10
#define RANDOMIZE_MAX 10
//Cuda calculator which will run in each thread.
__global__ void cuda_calculator(type* a, type* b, type* c, int num_calcs)
{
//Calculate the starting index.
int start_index = (threadIdx.x + blockIdx.x * blockDim.x) * num_calcs;
int end_index = start_index + num_calcs;
//Capture the start index as current.
int curr_index = start_index;
//Unroll the loop 5 times if necessary.
while(curr_index < end_index - 5)
{
//Calculate the increased indexes to avoid calculating it 3 times.
int idx1 = curr_index + 1, idx2 = curr_index + 2,
idx3 = curr_index + 3, idx4 = curr_index + 4;
//Perform the additions.
c[curr_index] = a[curr_index] + b[curr_index];
c[idx1] = a[idx1] + b[idx1];
c[idx2] = a[idx2] + b[idx2];
c[idx3] = a[idx3] + b[idx3];
c[idx4] = a[idx4] + b[idx4];
curr_index += 5; //Increase the index by five.
}
//After the unrolled is over, run a regular loop, to finish it.
while(curr_index < end_index)
{
c[curr_index] = a[curr_index] + b[curr_index];
curr_index++;
}
}
//Cuda addition which runs the cuda program.
int cuda_addition(type* a, type* b, type* c, int n, int blocks,
int threads, double times[3])
{
//Create pointers for the GPU memory allocation
type* cu_vec_a;
type* cu_vec_b;
type* cu_vec_c;
//Calculate the number of elements that each kernel will handle (round up).
int num_calcs = std::ceil((double) n / (((double) blocks) * ((double) threads)));
//Calculate the padding (for output matrix to avoid conditionals in kernel.
int padding_size = (int)(num_calcs * blocks * threads) - n ;
//Allocate memory on the device for the arrays.
cudaMalloc((void**) &cu_vec_a, sizeof(type) * (n + padding_size));
cudaMalloc((void**) &cu_vec_b, sizeof(type) * (n + padding_size));
cudaMalloc((void**) &cu_vec_c, sizeof(type) * (n + padding_size));
//Wait for the thread to finish execution.
cudaDeviceSynchronize();
//Capture the beginning time before the data transfer (from host).
auto begin_transfer_to = std::chrono::high_resolution_clock::now();
//Copy the data, and the size from the main memory to VRAM.
cudaMemcpy(cu_vec_a, a, ((int) sizeof(type)) * n, cudaMemcpyHostToDevice);
cudaMemcpy(cu_vec_b, b, ((int) sizeof(type)) * n, cudaMemcpyHostToDevice);
//Wait for the thread to finish execution.
cudaDeviceSynchronize();
//Calculate the total time in seconds that it took to transfer data to the device
auto total_transfer_to = std::chrono::high_resolution_clock::now() - begin_transfer_to;
times[0] = std::chrono::duration<double> (total_transfer_to).count();
//Capture the beginning time before the calculations.
auto begin_calcs_only = std::chrono::high_resolution_clock::now();
//Launch the addition kernel on the device.
cuda_calculator<<<blocks, threads>>>(cu_vec_a, cu_vec_b, cu_vec_c, num_calcs);
//Check if we got any errors.
if(cudaGetLastError() != cudaSuccess)
return EXIT_FAILURE;
//Wait for the thread to finish execution.
cudaDeviceSynchronize();
//Calculate the total time in seconds that it took to calculate.
auto total_calcs_only = std::chrono::high_resolution_clock::now() - begin_calcs_only;
times[1] = std::chrono::duration<double> (total_calcs_only).count();
//Capture the beginning time before the calculations.
auto begin_transfer_from = std::chrono::high_resolution_clock::now();
//Copy the results back from Vram to main ram.
cudaMemcpy(c, cu_vec_c, ((int) sizeof(type)) * n, cudaMemcpyDeviceToHost);
//Wait for the thread to finish execution.
cudaDeviceSynchronize();
//Calculate the total time in seconds that it took to transfer back to host.
auto total_transfer_from = std::chrono::high_resolution_clock::now() - begin_transfer_from;
times[2] = std::chrono::duration<double> (total_transfer_from).count();
//Deallocate memory in the GPU.
cudaFree(cu_vec_a);
cudaFree(cu_vec_b);
cudaFree(cu_vec_c);
//Wait for the thread to finish execution.
cudaDeviceSynchronize();
return EXIT_SUCCESS;
}
//Sequential addition function.
double seq_addition(type* a, type* b, type* c, int size)
{
//Capture the beginning time before the calculations.
auto begin = std::chrono::high_resolution_clock::now();
//Iterate over the vectors and add the elements.
for(int i = 0; i < size; i++)
c[i] = a[i] + b[i];
//Calculate and return the total time in seconds that it took to compute.
auto total = std::chrono::high_resolution_clock::now() - begin;
return std::chrono::duration<double> (total).count();;
}
//Sequential subtraction function (used for residual matrix).
void seq_subtraction(type* a, type* b, type* c, int size)
{
//Iterate over the vectors and subtract the elements.
for(int i = 0; i < size; i++)
c[i] = a[i] - b[i];
}
//Returns false if first and second aren't equal, true otherwise.
bool are_equal(type* first, type* second, int size)
{
//Iterate over and return false if not equal.
for(int i = 0; i < size; i++)
if(first[i] != second[i])
return false;
//If we get here, they were equal.
return true;
}
//A function which randomizes the vector, by defualt it only uses values between -10 - 10
void randomize(type* vec, int size, int min = RANDOMIZE_MIN, int max = RANDOMIZE_MAX)
{
//Perform this to ensure the random number generation is truly random.
std::srand(std::chrono::system_clock::now().time_since_epoch().count());
//Iterate through, and generate random numbers for each index.
for(int i = 0; i < size; i++)
vec[i] = ((type) std::rand() %
(type) (RANDOMIZE_MAX * 2) + (type) RANDOMIZE_MIN) % RANDOMIZE_MAX ;
}
//Print the given vector to stdout.
void dump(type* vec, int size)
{
//Iterate through, and generate random numbers for each index.
for(int i = 0; i < size - 1; i++)
std::cout << std::scientific << vec[i] << " | " ;
//Print the last item in a different format and add a new line.
std::cout << std::scientific << vec[size - 1] << std::endl;
}
//A function which will be called when there is an error.
int error(std::string msg)
{
//Print the error message.
std::cout << "Error: " << msg << std::endl;
//Print the usage message.
std::cout << std::endl << "Usage Guide:" << std::endl
<< "\t* ./a.out <Size of Vectors> <Number of Blocks> <Number of Threads>"
<< " <Output Mode>" << std::endl << "\t* Output mode is either \'q\' "
<< "(quiet) or \'v\' (verbose)" << std::endl
<< "\t* Number of blocks and threads are for the GPU." << std::endl;
//Return exit failure for passing it back to the terminal.
return EXIT_FAILURE;
}
//Main method which parses the arguments, and runs the program.
int main(int argc, char** argv)
{
//Define values for parameters.
int n, blocks, threads;
bool verbose;
//Check for invalid number of args.
if(argc != 5)
return error("Invalid number of arguments.");
//Parse the arguments.
try
{
n = std::stoi(argv[1]);
blocks = std::stoi(argv[2]);
threads = std::stoi(argv[3]);
}
catch(...) //If we get here, there was an error in the arguments.
{
return error("Invalid arguments, could not parse.");
}
//Check the print mode.
if(std::string(argv[4]) == "q" || std::string(argv[4]) == "v")
//If the mode is valid and set to v, set verbose to true, false otherwise.
verbose = (std::string(argv[4]) == "v" ? true : false);
else
//If we get here an invalid mode was passed.
return error("Invalid print mode.");
//Check for invalid threads / blocks / n sizes.
if(n < 1 || blocks < 1 || threads < 1)
return error("Invalid arguments. All parameters should be positive.");
//Check if we're gonna get overflow.
if(n > INT_MAX)
return error("Integer Overflow, please reduce N.");
//Allocate memory for the input vectors.
type* vec_a = new type[n];
type* vec_b = new type[n];
//Randomize the input vectors.
randomize(vec_a, n);
randomize(vec_b, n);
//Allocate output matrices for the sequential and cuda executions.
type* vec_c_seq = new type[n];
type* vec_c_cuda = new type[n];
//Perform the sequential addition.
double seq_time = seq_addition(vec_a, vec_b, vec_c_seq, n);
//Perform the cuda addition, and capture the timings.
double times[3];
int stat = cuda_addition(vec_a, vec_b, vec_c_cuda, n, blocks, threads, times);
//Check the status.
if(stat == EXIT_FAILURE)
return error("Failed to execute kernel.");
//Check if the cuda and sequential results are not equal (error).
if(!are_equal(vec_c_seq, vec_c_cuda, n))
{
std::cout << "Error: Output vectors were not equal." << std::endl
<< "ErrorInfo: N=" << n << " Blocks=" << blocks
<< " Threads=" << threads << std::endl;
}
//Print the timing results, and the input arguments.
std::cout << "[Cuda_Transfer_To_Device_Seconds]=" << std::scientific << times[0]
<< " [Cuda_Transfer_To_Host_Seconds]=" << std::scientific << times[2]
<< " [Cuda_Calculation_Time_Seconds]=" << std::scientific << times[1]
<< " [Sequential_Time_Seconds]=" << std::scientific << seq_time
<< " [N]=" << n << " [Blocks]=" << blocks
<< " [Threads]=" << threads
<< std::endl;
//Allocate memory for residual vector.
type* residual = new type[n];
//Check if we're in verbose output mode.
if(verbose)
{
//Calculate residual vector for sequential implementation vs cuda.
seq_subtraction(vec_c_seq, vec_c_cuda, residual, n);
//Print out the inputs, calculations and residual vector.
std::cout << std::endl << "Printing out the First Vector:" << std::endl;
dump(vec_a, n);
std::cout << "\nPrinting out the Second Vector:" << std::endl;
dump(vec_b, n);
std::cout << "\nPrinting out the Addition results (Sequential):" << std::endl;
dump(vec_c_seq, n);
std::cout << "\nPrinting out the Addition results (Cuda):" << std::endl;
dump(vec_c_cuda, n);
std::cout << "\nPrinting out the residual matrix (Seq - Cuda):" << std::endl;
dump(residual, n);
}
//Deallocate the memory in the heap.
delete[] vec_a, vec_b, vec_c_seq, vec_c_cuda, residual;
return EXIT_SUCCESS;
}
|
0b4aafd763b44cc80a93bdbb7bf1fac1b1e0b31d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <cstdlib>
#include <cstdio>
#include <chrono>
using namespace std;
__global__ void cudamatmul(float *A, float *B, float *C, int N) {
int i = blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
float sum = 0.0f;
extern __shared__ float A_s[];
for (int ks=0; ks<N; ks+=blockDim.x) {
__syncthreads();
A_s[threadIdx.x] = A[N*i+ks+threadIdx.x];
__syncthreads();
for (int k=ks; k<ks+blockDim.x; k++) {
sum += A_s[k-ks] * B[N*k+j];
}
}
C[N*i+j] = sum;
}
void matmul(float *A, float *B, float *C, int N, int M){
dim3 grid(N/M, N);
auto tic = chrono::steady_clock::now();
hipLaunchKernelGGL(( cudamatmul), dim3(grid),dim3(M),M*sizeof(float), 0, A, B, C, N);
hipDeviceSynchronize();
auto toc = chrono::steady_clock::now();
double time = chrono::duration<double>(toc - tic).count();
printf("N=%d: %lf s (%lf GFlops)\n",N,time,2.*N*N*N/time/1e9);
}
void errorcalc(float *A, float *B, float *C, int N){
#pragma omp parallel for
for (int i=0; i<N; i++)
for (int k=0; k<N; k++)
for (int j=0; j<N; j++)
C[N*i+j] -= A[N*i+k] * B[N*k+j];
double err = 0;
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
err += fabs(C[N*i+j]);
printf("error: %lf\n",err/N/N);
}
int main(int argc, char **argv) {
int N = 2048;
int M = 1024;
if(argc==3){
N = atoi(argv[1]);
M = atoi(argv[2]);
}
int size = N * N * sizeof(float);
float *A, *B, *C;
hipMallocManaged(&A, size);
hipMallocManaged(&B, size);
hipMallocManaged(&C, size);
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
A[N*i+j] = drand48();
B[N*i+j] = drand48();
C[N*i+j] = 0;
}
}
matmul(A,B,C,N,M);
errorcalc(A,B,C,N);
hipFree(A);
hipFree(B);
hipFree(C);
}
| 0b4aafd763b44cc80a93bdbb7bf1fac1b1e0b31d.cu | #include <cmath>
#include <cstdlib>
#include <cstdio>
#include <chrono>
using namespace std;
__global__ void cudamatmul(float *A, float *B, float *C, int N) {
int i = blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
float sum = 0.0f;
extern __shared__ float A_s[];
for (int ks=0; ks<N; ks+=blockDim.x) {
__syncthreads();
A_s[threadIdx.x] = A[N*i+ks+threadIdx.x];
__syncthreads();
for (int k=ks; k<ks+blockDim.x; k++) {
sum += A_s[k-ks] * B[N*k+j];
}
}
C[N*i+j] = sum;
}
void matmul(float *A, float *B, float *C, int N, int M){
dim3 grid(N/M, N);
auto tic = chrono::steady_clock::now();
cudamatmul<<<grid,M,M*sizeof(float)>>>(A, B, C, N);
cudaDeviceSynchronize();
auto toc = chrono::steady_clock::now();
double time = chrono::duration<double>(toc - tic).count();
printf("N=%d: %lf s (%lf GFlops)\n",N,time,2.*N*N*N/time/1e9);
}
void errorcalc(float *A, float *B, float *C, int N){
#pragma omp parallel for
for (int i=0; i<N; i++)
for (int k=0; k<N; k++)
for (int j=0; j<N; j++)
C[N*i+j] -= A[N*i+k] * B[N*k+j];
double err = 0;
for (int i=0; i<N; i++)
for (int j=0; j<N; j++)
err += fabs(C[N*i+j]);
printf("error: %lf\n",err/N/N);
}
int main(int argc, char **argv) {
int N = 2048;
int M = 1024;
if(argc==3){
N = atoi(argv[1]);
M = atoi(argv[2]);
}
int size = N * N * sizeof(float);
float *A, *B, *C;
cudaMallocManaged(&A, size);
cudaMallocManaged(&B, size);
cudaMallocManaged(&C, size);
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
A[N*i+j] = drand48();
B[N*i+j] = drand48();
C[N*i+j] = 0;
}
}
matmul(A,B,C,N,M);
errorcalc(A,B,C,N);
cudaFree(A);
cudaFree(B);
cudaFree(C);
}
|
30ac47e44884ec8df73f530b55e6387c4f13b35c.hip | // !!! This is a file automatically generated by hipify!!!
/*
!=====================================================================
!
! S p e c f e m 3 D V e r s i o n 2 . 1
! ---------------------------------------
!
! Main authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA and CNRS / INRIA / University of Pau
! (c) Princeton University / California Institute of Technology and CNRS / INRIA / University of Pau
! July 2012
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <sys/time.h>
#include <sys/resource.h>
#include "config.h"
#include "mesh_constants_cuda.h"
#ifdef USE_TEXTURES_FIELDS
realw_texture d_displ_tex;
realw_texture d_veloc_tex;
realw_texture d_accel_tex;
//backward/reconstructed
realw_texture d_b_displ_tex;
realw_texture d_b_veloc_tex;
realw_texture d_b_accel_tex;
//note: texture variables are implicitly static, and cannot be passed as arguments to cuda kernels;
// thus, 1) we thus use if-statements (FORWARD_OR_ADJOINT) to determine from which texture to fetch from
// 2) we use templates
// since if-statements are a bit slower as the variable is only known at runtime, we use option 2)
// templates definitions
template<int FORWARD_OR_ADJOINT> __device__ float texfetch_displ(int x);
template<int FORWARD_OR_ADJOINT> __device__ float texfetch_veloc(int x);
template<int FORWARD_OR_ADJOINT> __device__ float texfetch_accel(int x);
// templates for texture fetching
// FORWARD_OR_ADJOINT == 1 <- forward arrays
template<> __device__ float texfetch_displ<1>(int x) { return tex1Dfetch(d_displ_tex, x); }
template<> __device__ float texfetch_veloc<1>(int x) { return tex1Dfetch(d_veloc_tex, x); }
template<> __device__ float texfetch_accel<1>(int x) { return tex1Dfetch(d_accel_tex, x); }
// FORWARD_OR_ADJOINT == 3 <- backward/reconstructed arrays
template<> __device__ float texfetch_displ<3>(int x) { return tex1Dfetch(d_b_displ_tex, x); }
template<> __device__ float texfetch_veloc<3>(int x) { return tex1Dfetch(d_b_veloc_tex, x); }
template<> __device__ float texfetch_accel<3>(int x) { return tex1Dfetch(d_b_accel_tex, x); }
#endif
#ifdef USE_TEXTURES_CONSTANTS
realw_texture d_hprime_xx_tex;
#endif
/* ----------------------------------------------------------------------------------------------- */
// prepares a device array with with all inter-element edge-nodes -- this
// is followed by a memcpy and MPI operations
__global__ void prepare_boundary_accel_on_device(realw* d_accel, realw* d_send_accel_buffer,
int num_interfaces_ext_mesh,
int max_nibool_interfaces_ext_mesh,
int* d_nibool_interfaces_ext_mesh,
int* d_ibool_interfaces_ext_mesh) {
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
int ientry,iglob;
for( int iinterface=0; iinterface < num_interfaces_ext_mesh; iinterface++) {
if( id < d_nibool_interfaces_ext_mesh[iinterface] ) {
// entry in interface array
ientry = id + max_nibool_interfaces_ext_mesh*iinterface;
// global index in wavefield
iglob = d_ibool_interfaces_ext_mesh[ientry] - 1;
d_send_accel_buffer[3*ientry] = d_accel[3*iglob];
d_send_accel_buffer[3*ientry + 1 ] = d_accel[3*iglob + 1];
d_send_accel_buffer[3*ientry + 2 ] = d_accel[3*iglob + 2];
}
}
}
/* ----------------------------------------------------------------------------------------------- */
// prepares and transfers the inter-element edge-nodes to the host to be MPI'd
// (elements on boundary)
extern "C"
void FC_FUNC_(transfer_boun_accel_from_device,
TRANSFER_BOUN_ACCEL_FROM_DEVICE)(long* Mesh_pointer,
realw* accel,
realw* send_accel_buffer,
int* FORWARD_OR_ADJOINT){
TRACE("\ttransfer_boun_accel_from_device");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
// checks if anything to do
if( mp->size_mpi_buffer > 0 ){
int blocksize = BLOCKSIZE_TRANSFER;
int size_padded = ((int)ceil(((double)mp->max_nibool_interfaces_ext_mesh)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
//timing for memory xfer
// hipEvent_t start, stop;
// realw time;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// hipEventRecord( start, 0 );
if(*FORWARD_OR_ADJOINT == 1) {
hipLaunchKernelGGL(( prepare_boundary_accel_on_device), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_accel,mp->d_send_accel_buffer,
mp->num_interfaces_ext_mesh,
mp->max_nibool_interfaces_ext_mesh,
mp->d_nibool_interfaces_ext_mesh,
mp->d_ibool_interfaces_ext_mesh);
// synchronizes
//synchronize_cuda();
// explicitly waits until previous compute stream finishes
// (hipMemcpy implicitly synchronizes all other cuda operations)
hipStreamSynchronize(mp->compute_stream);
// copies buffer from GPU to CPU host
print_CUDA_error_if_any(hipMemcpy(send_accel_buffer,mp->d_send_accel_buffer,
mp->size_mpi_buffer*sizeof(realw),hipMemcpyDeviceToHost),97001);
}
else if(*FORWARD_OR_ADJOINT == 3) {
hipLaunchKernelGGL(( prepare_boundary_accel_on_device), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_b_accel,mp->d_b_send_accel_buffer,
mp->num_interfaces_ext_mesh,
mp->max_nibool_interfaces_ext_mesh,
mp->d_nibool_interfaces_ext_mesh,
mp->d_ibool_interfaces_ext_mesh);
// synchronizes
//synchronize_cuda();
// explicitly waits until previous compute stream finishes
// (hipMemcpy implicitly synchronizes all other cuda operations)
hipStreamSynchronize(mp->compute_stream);
// copies buffer from GPU to CPU host
print_CUDA_error_if_any(hipMemcpy(send_accel_buffer,mp->d_b_send_accel_buffer,
mp->size_mpi_buffer*sizeof(realw),hipMemcpyDeviceToHost),97002);
}
// finish timing of kernel+memcpy
// hipEventRecord( stop, 0 );
// hipEventSynchronize( stop );
// hipEventElapsedTime( &time, start, stop );
// hipEventDestroy( start );
// hipEventDestroy( stop );
// printf("boundary xfer d->h Time: %f ms\n",time);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("transfer_boun_accel_from_device");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(transfer_boundary_from_device_a,
TRANSFER_BOUNDARY_FROM_DEVICE_A)(long* Mesh_pointer,
int* nspec_outer_elastic) {
// asynchronous transfer from device to host
TRACE("\ttransfer_boundary_from_device_a");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
if( mp->size_mpi_buffer > 0 ){
int blocksize = BLOCKSIZE_TRANSFER;
int size_padded = ((int)ceil(((double)mp->max_nibool_interfaces_ext_mesh)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
hipLaunchKernelGGL(( prepare_boundary_accel_on_device), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_accel,mp->d_send_accel_buffer,
mp->num_interfaces_ext_mesh,
mp->max_nibool_interfaces_ext_mesh,
mp->d_nibool_interfaces_ext_mesh,
mp->d_ibool_interfaces_ext_mesh);
// waits until kernel is finished before starting async memcpy
//synchronize_cuda();
// waits until previous compute stream finishes
hipStreamSynchronize(mp->compute_stream);
hipMemcpyAsync(mp->h_send_accel_buffer,mp->d_send_accel_buffer,
mp->size_mpi_buffer*sizeof(realw),hipMemcpyDeviceToHost,mp->copy_stream);
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(transfer_boundary_to_device_a,
TRANSFER_BOUNDARY_TO_DEVICE_A)(long* Mesh_pointer,
realw* buffer_recv_vector_ext_mesh,
int* num_interfaces_ext_mesh,
int* max_nibool_interfaces_ext_mesh) {
// asynchronous transfer from host to device
TRACE("transfer_boundary_to_device_a");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
if( mp->size_mpi_buffer > 0 ){
// copy on host memory
memcpy(mp->h_recv_accel_buffer,buffer_recv_vector_ext_mesh,mp->size_mpi_buffer*sizeof(realw));
// asynchronous copy to GPU using copy_stream
hipMemcpyAsync(mp->d_send_accel_buffer, buffer_recv_vector_ext_mesh,
mp->size_mpi_buffer*sizeof(realw),hipMemcpyHostToDevice,mp->copy_stream);
}
}
/* ----------------------------------------------------------------------------------------------- */
// Assembly
/* ----------------------------------------------------------------------------------------------- */
__global__ void assemble_boundary_accel_on_device(realw* d_accel, realw* d_send_accel_buffer,
int num_interfaces_ext_mesh,
int max_nibool_interfaces_ext_mesh,
int* d_nibool_interfaces_ext_mesh,
int* d_ibool_interfaces_ext_mesh) {
//int bx = blockIdx.y*gridDim.x+blockIdx.x;
//int tx = threadIdx.x;
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
int ientry,iglob;
for( int iinterface=0; iinterface < num_interfaces_ext_mesh; iinterface++) {
if( id < d_nibool_interfaces_ext_mesh[iinterface] ) {
// entry in interface array
ientry = id + max_nibool_interfaces_ext_mesh*iinterface;
// global index in wavefield
iglob = d_ibool_interfaces_ext_mesh[ientry] - 1;
// for testing atomic operations against not atomic operations (0.1ms vs. 0.04 ms)
// d_accel[3*(iglob)] += d_send_accel_buffer[3*(ientry)];
// d_accel[3*(iglob)+1] += d_send_accel_buffer[3*(ientry)+1];
// d_accel[3*(iglob)+2] += d_send_accel_buffer[3*(ientry)+2];
atomicAdd(&d_accel[3*iglob],d_send_accel_buffer[3*ientry]);
atomicAdd(&d_accel[3*iglob + 1],d_send_accel_buffer[3*ientry + 1]);
atomicAdd(&d_accel[3*iglob + 2],d_send_accel_buffer[3*ientry + 2]);
}
}
// ! This step is done via previous function transfer_and_assemble...
// ! do iinterface = 1, num_interfaces_ext_mesh
// ! do ipoin = 1, nibool_interfaces_ext_mesh(iinterface)
// ! array_val(:,ibool_interfaces_ext_mesh(ipoin,iinterface)) = &
// ! array_val(:,ibool_interfaces_ext_mesh(ipoin,iinterface)) + buffer_recv_vector_ext_mesh(:,ipoin,iinterface)
// ! enddo
// ! enddo
}
/* ----------------------------------------------------------------------------------------------- */
// FORWARD_OR_ADJOINT == 1 for accel, and == 3 for b_accel
extern "C"
void FC_FUNC_(transfer_asmbl_accel_to_device,
TRANSFER_ASMBL_ACCEL_TO_DEVICE)(long* Mesh_pointer, realw* accel,
realw* buffer_recv_vector_ext_mesh,
int* num_interfaces_ext_mesh,
int* max_nibool_interfaces_ext_mesh,
int* nibool_interfaces_ext_mesh,
int* ibool_interfaces_ext_mesh,
int* FORWARD_OR_ADJOINT) {
TRACE("\ttransfer_asmbl_accel_to_device");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
if( mp->size_mpi_buffer > 0 ){
//daniel: todo - check if this copy is only needed for adjoint simulation, otherwise it is called asynchronously?
if(*FORWARD_OR_ADJOINT == 1 ){
// Wait until previous copy stream finishes. We assemble while other compute kernels execute.
hipStreamSynchronize(mp->copy_stream);
}
else if(*FORWARD_OR_ADJOINT == 3 ){
// explicitly synchronizes
// (hipMemcpy implicitly synchronizes all other cuda operations)
synchronize_cuda();
print_CUDA_error_if_any(hipMemcpy(mp->d_b_send_accel_buffer, buffer_recv_vector_ext_mesh,
mp->size_mpi_buffer*sizeof(realw),hipMemcpyHostToDevice),97001);
}
int blocksize = BLOCKSIZE_TRANSFER;
int size_padded = ((int)ceil(((double)mp->max_nibool_interfaces_ext_mesh)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
//double start_time = get_time();
// hipEvent_t start, stop;
// realw time;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// hipEventRecord( start, 0 );
if(*FORWARD_OR_ADJOINT == 1) {
//assemble forward accel
hipLaunchKernelGGL(( assemble_boundary_accel_on_device), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_accel, mp->d_send_accel_buffer,
mp->num_interfaces_ext_mesh,
mp->max_nibool_interfaces_ext_mesh,
mp->d_nibool_interfaces_ext_mesh,
mp->d_ibool_interfaces_ext_mesh);
}
else if(*FORWARD_OR_ADJOINT == 3) {
//assemble adjoint accel
hipLaunchKernelGGL(( assemble_boundary_accel_on_device), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_b_accel, mp->d_b_send_accel_buffer,
mp->num_interfaces_ext_mesh,
mp->max_nibool_interfaces_ext_mesh,
mp->d_nibool_interfaces_ext_mesh,
mp->d_ibool_interfaces_ext_mesh);
}
// hipEventRecord( stop, 0 );
// hipEventSynchronize( stop );
// hipEventElapsedTime( &time, start, stop );
// hipEventDestroy( start );
// hipEventDestroy( stop );
// printf("Boundary Assemble Kernel Execution Time: %f ms\n",time);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//double end_time = get_time();
//printf("Elapsed time: %e\n",end_time-start_time);
exit_on_cuda_error("transfer_asmbl_accel_to_device");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
//daniel: not used ...
//
//extern "C"
//void FC_FUNC_(assemble_accel_on_device,
// ASSEMBLE_ACCEL_on_DEVICE)(long* Mesh_pointer, realw* accel,
// realw* buffer_recv_vector_ext_mesh,
// int* num_interfaces_ext_mesh,
// int* max_nibool_interfaces_ext_mesh,
// int* nibool_interfaces_ext_mesh,
// int* ibool_interfaces_ext_mesh,
// int* FORWARD_OR_ADJOINT) {
// TRACE("assemble_accel_on_device");
//
// Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
//
// int blocksize = BLOCKSIZE_TRANSFER;
// int size_padded = ((int)ceil(((double)mp->max_nibool_interfaces_ext_mesh)/((double)blocksize)))*blocksize;
//
// int num_blocks_x, num_blocks_y;
// get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
//
// //double start_time = get_time();
// dim3 grid(num_blocks_x,num_blocks_y);
// dim3 threads(blocksize,1,1);
// // hipEvent_t start, stop;
// // realw time;
// // hipEventCreate(&start);
// // hipEventCreate(&stop);
// // hipEventRecord( start, 0 );
//
//
// // ***************************************************************************
// // Wait until previous copy stream finishes. We assemble while other compute kernels execute.
// hipStreamSynchronize(mp->copy_stream);
//
// // Assembling on the copy_stream breaks the solution and it "blows up"
// if(*FORWARD_OR_ADJOINT == 1) { //assemble forward accel
// hipLaunchKernelGGL(( assemble_boundary_accel_on_device), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_accel, mp->d_send_accel_buffer,
// mp->num_interfaces_ext_mesh,
// mp->max_nibool_interfaces_ext_mesh,
// mp->d_nibool_interfaces_ext_mesh,
// mp->d_ibool_interfaces_ext_mesh);
// }
// else if(*FORWARD_OR_ADJOINT == 3) { //assemble adjoint accel
// assemble_boundary_accel_on_device<<<grid,threads,0,mp->copy_stream>>>(mp->d_b_accel, mp->d_send_accel_buffer,
// mp->num_interfaces_ext_mesh,
// mp->max_nibool_interfaces_ext_mesh,
// mp->d_nibool_interfaces_ext_mesh,
// mp->d_ibool_interfaces_ext_mesh);
// }
//
// // hipEventRecord( stop, 0 );
// // hipEventSynchronize( stop );
// // hipEventElapsedTime( &time, start, stop );
// // hipEventDestroy( start );
// // hipEventDestroy( stop );
// // printf("Boundary Assemble Kernel Execution Time: %f ms\n",time);
//#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
// //double end_time = get_time();
// //printf("Elapsed time: %e\n",end_time-start_time);
// exit_on_cuda_error("assemble_accel_on_device");
//#endif
//}
/* ----------------------------------------------------------------------------------------------- */
// KERNEL 2
/* ----------------------------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------------------------------- */
//__global__ void Kernel_test(realw* d_debug_output,int* d_phase_ispec_inner_elastic,
// int num_phase_ispec_elastic, int d_iphase, int* d_ibool) {
// int bx = blockIdx.x;
// int tx = threadIdx.x;
// int working_element;
// //int ispec;
// //int NGLL3_ALIGN = 128;
// if(tx==0 && bx==0) {
//
// d_debug_output[0] = 420.0;
//
// d_debug_output[2] = num_phase_ispec_elastic;
// d_debug_output[3] = d_iphase;
// working_element = d_phase_ispec_inner_elastic[bx + num_phase_ispec_elastic*(d_iphase-1)]-1;
// d_debug_output[4] = working_element;
// d_debug_output[5] = d_phase_ispec_inner_elastic[0];
// /* d_debug_output[1] = d_ibool[working_element*NGLL3_ALIGN + tx]-1; */
// }
// /* d_debug_output[1+tx+128*bx] = 69.0; */
//
//}
/* ----------------------------------------------------------------------------------------------- */
// updates stress
__device__ void compute_element_att_stress(int tx,int working_element,int NSPEC,
realw* R_xx,realw* R_yy,realw* R_xy,
realw* R_xz,realw* R_yz,
realw* sigma_xx,realw* sigma_yy,realw* sigma_zz,
realw* sigma_xy,realw* sigma_xz,realw* sigma_yz) {
int i_sls,offset_sls;
realw R_xx_val,R_yy_val;
for(i_sls = 0; i_sls < N_SLS; i_sls++){
// index
offset_sls = tx + NGLL3*(working_element + NSPEC*i_sls);
R_xx_val = R_xx[offset_sls]; //(i,j,k,ispec,i_sls)
R_yy_val = R_yy[offset_sls];
*sigma_xx = *sigma_xx - R_xx_val;
*sigma_yy = *sigma_yy - R_yy_val;
*sigma_zz = *sigma_zz + R_xx_val + R_yy_val;
*sigma_xy = *sigma_xy - R_xy[offset_sls];
*sigma_xz = *sigma_xz - R_xz[offset_sls];
*sigma_yz = *sigma_yz - R_yz[offset_sls];
}
return;
}
/* ----------------------------------------------------------------------------------------------- */
// updates R_memory
__device__ void compute_element_att_memory(int tx,int working_element,int NSPEC,
realw* d_muv,
realw* factor_common,
realw* alphaval,realw* betaval,realw* gammaval,
realw* R_xx,realw* R_yy,realw* R_xy,realw* R_xz,realw* R_yz,
realw* epsilondev_xx,realw* epsilondev_yy,realw* epsilondev_xy,
realw* epsilondev_xz,realw* epsilondev_yz,
realw epsilondev_xx_loc,realw epsilondev_yy_loc,realw epsilondev_xy_loc,
realw epsilondev_xz_loc,realw epsilondev_yz_loc
){
int i_sls;
int ijk_ispec;
int offset_sls,offset_align,offset_common;
realw mul;
realw alphaval_loc,betaval_loc,gammaval_loc;
realw factor_loc,Sn,Snp1;
// indices
offset_align = tx + NGLL3_PADDED * working_element;
ijk_ispec = tx + NGLL3 * working_element;
mul = d_muv[offset_align];
// use Runge-Kutta scheme to march in time
for(i_sls = 0; i_sls < N_SLS; i_sls++){
// indices
offset_common = i_sls + N_SLS*(tx + NGLL3*working_element); // (i_sls,i,j,k,ispec)
offset_sls = tx + NGLL3*(working_element + NSPEC*i_sls); // (i,j,k,ispec,i_sls)
factor_loc = mul * factor_common[offset_common]; //mustore(i,j,k,ispec) * factor_common(i_sls,i,j,k,ispec)
alphaval_loc = alphaval[i_sls]; // (i_sls)
betaval_loc = betaval[i_sls];
gammaval_loc = gammaval[i_sls];
// term in xx
Sn = factor_loc * epsilondev_xx[ijk_ispec]; //(i,j,k,ispec)
Snp1 = factor_loc * epsilondev_xx_loc; //(i,j,k)
//R_xx(i,j,k,ispec,i_sls) = alphaval_loc * R_xx(i,j,k,ispec,i_sls) +
// betaval_loc * Sn + gammaval_loc * Snp1;
R_xx[offset_sls] = alphaval_loc * R_xx[offset_sls] +
betaval_loc * Sn + gammaval_loc * Snp1;
// term in yy
Sn = factor_loc * epsilondev_yy[ijk_ispec];
Snp1 = factor_loc * epsilondev_yy_loc;
R_yy[offset_sls] = alphaval_loc * R_yy[offset_sls] +
betaval_loc * Sn + gammaval_loc * Snp1;
// term in zz not computed since zero trace
// term in xy
Sn = factor_loc * epsilondev_xy[ijk_ispec];
Snp1 = factor_loc * epsilondev_xy_loc;
R_xy[offset_sls] = alphaval_loc * R_xy[offset_sls] +
betaval_loc * Sn + gammaval_loc * Snp1;
// term in xz
Sn = factor_loc * epsilondev_xz[ijk_ispec];
Snp1 = factor_loc * epsilondev_xz_loc;
R_xz[offset_sls] = alphaval_loc * R_xz[offset_sls] +
betaval_loc * Sn + gammaval_loc * Snp1;
// term in yz
Sn = factor_loc * epsilondev_yz[ijk_ispec];
Snp1 = factor_loc * epsilondev_yz_loc;
R_yz[offset_sls] = alphaval_loc * R_yz[offset_sls] +
betaval_loc * Sn + gammaval_loc * Snp1;
}
return;
}
/* ----------------------------------------------------------------------------------------------- */
// pre-computes gravity term
__device__ void compute_element_gravity(int tx,int working_element,
int* d_ibool,
realw* d_minus_g,
realw* d_minus_deriv_gravity,
realw* d_rhostore,
realw* wgll_cube,
realw jacobianl,
realw* s_dummyx_loc,
realw* s_dummyy_loc,
realw* s_dummyz_loc,
realw* sigma_xx,
realw* sigma_yy,
realw* sigma_xz,
realw* sigma_yz,
realw* rho_s_H1,
realw* rho_s_H2,
realw* rho_s_H3){
int iglob;
realw minus_g,minus_dg;
realw rhol;
realw gzl; // gxl,gyl,
realw sx_l,sy_l,sz_l;
realw Hxxl,Hyyl,Hzzl; //,Hxyl,Hxzl,Hyzl;
realw factor;
// compute non-symmetric terms for gravity
// get g, rho and dg/dr=dg
iglob = d_ibool[working_element*NGLL3 + tx]-1;
minus_g = d_minus_g[iglob];
minus_dg = d_minus_deriv_gravity[iglob];
// Cartesian components of the gravitational acceleration
//gxl = 0.f;
//gyl = 0.f;
gzl = minus_g;
// Cartesian components of gradient of gravitational acceleration
// H = grad g
// assumes g only acts in negative z-direction
Hxxl = 0.f;
Hyyl = 0.f;
Hzzl = minus_dg;
//Hxyl = 0.f;
//Hxzl = 0.f;
//Hyzl = 0.f;
rhol = d_rhostore[working_element*NGLL3_PADDED + tx];
// get displacement and multiply by density to compute G tensor
// G = rho [ sg - (s * g) I ]
sx_l = rhol * s_dummyx_loc[tx]; // d_displ[iglob*3];
sy_l = rhol * s_dummyy_loc[tx]; // d_displ[iglob*3 + 1];
sz_l = rhol * s_dummyz_loc[tx]; // d_displ[iglob*3 + 2];
// compute G tensor from s . g and add to sigma (not symmetric)
//sigma_xx += sy_l*gyl + sz_l*gzl;
*sigma_xx += sz_l*gzl;
//sigma_yy += sx_l*gxl + sz_l*gzl;
*sigma_yy += sz_l*gzl;
//sigma_zz += sx_l*gxl + sy_l*gyl;
//sigma_xy -= sx_l*gyl;
//sigma_yx -= sy_l*gxl;
*sigma_xz -= sx_l*gzl;
//sigma_zx -= sz_l*gxl;
*sigma_yz -= sy_l*gzl;
//sigma_zy -= sz_l*gyl;
// precompute vector
factor = jacobianl * wgll_cube[tx];
//rho_s_H1 = fac1 * (sx_l * Hxxl + sy_l * Hxyl + sz_l * Hxzl);
//rho_s_H2 = fac1 * (sx_l * Hxyl + sy_l * Hyyl + sz_l * Hyzl);
//rho_s_H3 = fac1 * (sx_l * Hxzl + sy_l * Hyzl + sz_l * Hzzl);
// only non-zero z-direction
*rho_s_H1 = factor * sx_l * Hxxl ; // 0.f;
*rho_s_H2 = factor * sy_l * Hyyl ; // 0.f;
*rho_s_H3 = factor * sz_l * Hzzl ;
// debug
//*rho_s_H1 = 0.f;
//*rho_s_H2 = 0.f;
//*rho_s_H3 = 0.f ;
}
/* ----------------------------------------------------------------------------------------------- */
// KERNEL 2
//
// for elastic domains
/* ----------------------------------------------------------------------------------------------- */
/*
// unused
// original elastic kernel, please leave this code here for reference...
__global__ void Kernel_2_impl(int nb_blocks_to_compute,
int NGLOB,
int* d_ibool,
int* d_phase_ispec_inner_elastic, int num_phase_ispec_elastic,
int d_iphase,
int use_mesh_coloring_gpu,
realw d_deltat,
realw* d_displ,realw* d_veloc,realw* d_accel,
realw* d_xix, realw* d_xiy, realw* d_xiz,
realw* d_etax, realw* d_etay, realw* d_etaz,
realw* d_gammax, realw* d_gammay, realw* d_gammaz,
realw* d_hprime_xx,
realw* d_hprimewgll_xx,
realw* d_wgllwgll_xy,realw* d_wgllwgll_xz,realw* d_wgllwgll_yz,
realw* d_kappav, realw* d_muv,
int COMPUTE_AND_STORE_STRAIN,
realw* epsilondev_xx,realw* epsilondev_yy,realw* epsilondev_xy,
realw* epsilondev_xz,realw* epsilondev_yz,
realw* epsilon_trace_over_3,
int SIMULATION_TYPE,
int ATTENUATION,
int NSPEC,
realw* one_minus_sum_beta,realw* factor_common,
realw* R_xx, realw* R_yy, realw* R_xy, realw* R_xz, realw* R_yz,
realw* alphaval,realw* betaval,realw* gammaval,
int ANISOTROPY,
realw* d_c11store,realw* d_c12store,realw* d_c13store,
realw* d_c14store,realw* d_c15store,realw* d_c16store,
realw* d_c22store,realw* d_c23store,realw* d_c24store,
realw* d_c25store,realw* d_c26store,realw* d_c33store,
realw* d_c34store,realw* d_c35store,realw* d_c36store,
realw* d_c44store,realw* d_c45store,realw* d_c46store,
realw* d_c55store,realw* d_c56store,realw* d_c66store,
int gravity,
realw* d_minus_g,
realw* d_minus_deriv_gravity,
realw* d_rhostore,
realw* wgll_cube){
int bx = blockIdx.y*gridDim.x + blockIdx.x;
int tx = threadIdx.x;
const int NGLL3_ALIGN = NGLL3_PADDED;
int K = (tx/NGLL2);
int J = ((tx-K*NGLL2)/NGLLX);
int I = (tx-K*NGLL2-J*NGLLX);
int active,offset;
int iglob = 0;
int working_element;
realw tempx1l,tempx2l,tempx3l,tempy1l,tempy2l,tempy3l,tempz1l,tempz2l,tempz3l;
realw xixl,xiyl,xizl,etaxl,etayl,etazl,gammaxl,gammayl,gammazl,jacobianl;
realw duxdxl,duxdyl,duxdzl,duydxl,duydyl,duydzl,duzdxl,duzdyl,duzdzl;
realw duxdxl_plus_duydyl,duxdxl_plus_duzdzl,duydyl_plus_duzdzl;
realw duxdyl_plus_duydxl,duzdxl_plus_duxdzl,duzdyl_plus_duydzl;
realw tempx1l_att,tempx2l_att,tempx3l_att,tempy1l_att,tempy2l_att,tempy3l_att,tempz1l_att,tempz2l_att,tempz3l_att;
realw duxdxl_att,duxdyl_att,duxdzl_att,duydxl_att,duydyl_att,duydzl_att,duzdxl_att,duzdyl_att,duzdzl_att;
realw duxdyl_plus_duydxl_att,duzdxl_plus_duxdzl_att,duzdyl_plus_duydzl_att;
realw fac1,fac2,fac3,lambdal,mul,lambdalplus2mul,kappal;
realw sigma_xx,sigma_yy,sigma_zz,sigma_xy,sigma_xz,sigma_yz;
realw epsilondev_xx_loc,epsilondev_yy_loc,epsilondev_xy_loc,epsilondev_xz_loc,epsilondev_yz_loc;
realw c11,c12,c13,c14,c15,c16,c22,c23,c24,c25,c26,c33,c34,c35,c36,c44,c45,c46,c55,c56,c66;
realw sum_terms1,sum_terms2,sum_terms3;
// gravity variables
realw sigma_yx,sigma_zx,sigma_zy;
realw rho_s_H1,rho_s_H2,rho_s_H3;
#ifndef MANUALLY_UNROLLED_LOOPS
int l;
realw hp1,hp2,hp3;
#endif
__shared__ realw s_dummyx_loc[NGLL3];
__shared__ realw s_dummyy_loc[NGLL3];
__shared__ realw s_dummyz_loc[NGLL3];
__shared__ realw s_dummyx_loc_att[NGLL3];
__shared__ realw s_dummyy_loc_att[NGLL3];
__shared__ realw s_dummyz_loc_att[NGLL3];
__shared__ realw s_tempx1[NGLL3];
__shared__ realw s_tempx2[NGLL3];
__shared__ realw s_tempx3[NGLL3];
__shared__ realw s_tempy1[NGLL3];
__shared__ realw s_tempy2[NGLL3];
__shared__ realw s_tempy3[NGLL3];
__shared__ realw s_tempz1[NGLL3];
__shared__ realw s_tempz2[NGLL3];
__shared__ realw s_tempz3[NGLL3];
__shared__ realw sh_hprime_xx[NGLL2];
// use only NGLL^3 = 125 active threads, plus 3 inactive/ghost threads,
// because we used memory padding from NGLL^3 = 125 to 128 to get coalescent memory accesses
active = (tx < NGLL3 && bx < nb_blocks_to_compute) ? 1:0;
// copy from global memory to shared memory
// each thread writes one of the NGLL^3 = 125 data points
if (active) {
#ifdef USE_MESH_COLORING_GPU
working_element = bx;
#else
//mesh coloring
if( use_mesh_coloring_gpu ){
working_element = bx;
}else{
// iphase-1 and working_element-1 for Fortran->C array conventions
working_element = d_phase_ispec_inner_elastic[bx + num_phase_ispec_elastic*(d_iphase-1)]-1;
}
#endif
iglob = d_ibool[working_element*NGLL3 + tx]-1;
#ifdef USE_TEXTURES_FIELDS
s_dummyx_loc[tx] = tex1Dfetch(d_displ_tex, iglob*3);
s_dummyy_loc[tx] = tex1Dfetch(d_displ_tex, iglob*3 + 1);
s_dummyz_loc[tx] = tex1Dfetch(d_displ_tex, iglob*3 + 2);
#else
// changing iglob indexing to match fortran row changes fast style
s_dummyx_loc[tx] = d_displ[iglob*3];
s_dummyy_loc[tx] = d_displ[iglob*3 + 1];
s_dummyz_loc[tx] = d_displ[iglob*3 + 2];
#endif
}
// JC JC here we will need to add GPU support for the new C-PML routines
if(ATTENUATION){
// use first order Taylor expansion of displacement for local storage of stresses
// at this current time step, to fix attenuation in a consistent way
#ifdef USE_TEXTURES_FIELDS
s_dummyx_loc_att[tx] = s_dummyx_loc[tx] + d_deltat * tex1Dfetch(d_veloc_tex, iglob);
s_dummyy_loc_att[tx] = s_dummyy_loc[tx] + d_deltat * tex1Dfetch(d_veloc_tex, iglob + NGLOB);
s_dummyz_loc_att[tx] = s_dummyz_loc[tx] + d_deltat * tex1Dfetch(d_veloc_tex, iglob + 2*NGLOB);
#else
s_dummyx_loc_att[tx] = s_dummyx_loc[tx] + d_deltat * d_veloc[iglob*3];
s_dummyy_loc_att[tx] = s_dummyy_loc[tx] + d_deltat * d_veloc[iglob*3 + 1];
s_dummyz_loc_att[tx] = s_dummyz_loc[tx] + d_deltat * d_veloc[iglob*3 + 2];
#endif
}
if (tx < NGLL2) {
#ifdef USE_TEXTURES_CONSTANTS
sh_hprime_xx[tx] = tex1Dfetch(d_hprime_xx_tex,tx);
#else
sh_hprime_xx[tx] = d_hprime_xx[tx];
#endif
}
// synchronize all the threads (one thread for each of the NGLL grid points of the
// current spectral element) because we need the whole element to be ready in order
// to be able to compute the matrix products along cut planes of the 3D element below
__syncthreads();
if (active) {
#ifndef MANUALLY_UNROLLED_LOOPS
tempx1l = 0.f;
tempx2l = 0.f;
tempx3l = 0.f;
tempy1l = 0.f;
tempy2l = 0.f;
tempy3l = 0.f;
tempz1l = 0.f;
tempz2l = 0.f;
tempz3l = 0.f;
for (l=0;l<NGLLX;l++) {
hp1 = sh_hprime_xx[l*NGLLX+I];
offset = K*NGLL2+J*NGLLX+l;
tempx1l += s_dummyx_loc[offset]*hp1;
tempy1l += s_dummyy_loc[offset]*hp1;
tempz1l += s_dummyz_loc[offset]*hp1;
//assumes that hprime_xx = hprime_yy = hprime_zz
hp2 = sh_hprime_xx[l*NGLLX+J];
offset = K*NGLL2+l*NGLLX+I;
tempx2l += s_dummyx_loc[offset]*hp2;
tempy2l += s_dummyy_loc[offset]*hp2;
tempz2l += s_dummyz_loc[offset]*hp2;
hp3 = sh_hprime_xx[l*NGLLX+K];
offset = l*NGLL2+J*NGLLX+I;
tempx3l += s_dummyx_loc[offset]*hp3;
tempy3l += s_dummyy_loc[offset]*hp3;
tempz3l += s_dummyz_loc[offset]*hp3;
}
// JC JC here we will need to add GPU support for the new C-PML routines
if( ATTENUATION){
// temporary variables used for fixing attenuation in a consistent way
tempx1l_att = 0.f;
tempx2l_att = 0.f;
tempx3l_att = 0.f;
tempy1l_att = 0.f;
tempy2l_att = 0.f;
tempy3l_att = 0.f;
tempz1l_att = 0.f;
tempz2l_att = 0.f;
tempz3l_att = 0.f;
for (l=0;l<NGLLX;l++) {
hp1 = sh_hprime_xx[l*NGLLX+I];
offset = K*NGLL2+J*NGLLX+l;
tempx1l_att += s_dummyx_loc_att[offset]*hp1;
tempy1l_att += s_dummyy_loc_att[offset]*hp1;
tempz1l_att += s_dummyz_loc_att[offset]*hp1;
hp2 = sh_hprime_xx[l*NGLLX+J];
offset = K*NGLL2+l*NGLLX+I;
tempx2l_att += s_dummyx_loc_att[offset]*hp2;
tempy2l_att += s_dummyy_loc_att[offset]*hp2;
tempz2l_att += s_dummyz_loc_att[offset]*hp2;
hp3 = sh_hprime_xx[l*NGLLX+K];
offset = l*NGLL2+J*NGLLX+I;
tempx3l_att += s_dummyx_loc_att[offset]*hp3;
tempy3l_att += s_dummyy_loc_att[offset]*hp3;
tempz3l_att += s_dummyz_loc_att[offset]*hp3;
}
}
#else
tempx1l = s_dummyx_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempy1l = s_dummyy_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempz1l = s_dummyz_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempx2l = s_dummyx_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyx_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyx_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyx_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyx_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempy2l = s_dummyy_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyy_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyy_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyy_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyy_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempz2l = s_dummyz_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyz_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyz_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyz_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyz_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempx3l = s_dummyx_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyx_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyx_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyx_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyx_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempy3l = s_dummyy_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyy_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyy_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyy_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyy_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempz3l = s_dummyz_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyz_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyz_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyz_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyz_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
// JC JC here we will need to add GPU support for the new C-PML routines
if( ATTENUATION){
// temporary variables used for fixing attenuation in a consistent way
tempx1l_att = s_dummyx_loc_att[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempy1l_att = s_dummyy_loc_att[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempz1l_att = s_dummyz_loc_att[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempx2l_att = s_dummyx_loc_att[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyx_loc_att[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyx_loc_att[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyx_loc_att[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyx_loc_att[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempy2l_att = s_dummyy_loc_att[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyy_loc_att[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyy_loc_att[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyy_loc_att[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyy_loc_att[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempz2l_att = s_dummyz_loc_att[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyz_loc_att[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyz_loc_att[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyz_loc_att[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyz_loc_att[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempx3l_att = s_dummyx_loc_att[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyx_loc_att[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyx_loc_att[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyx_loc_att[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyx_loc_att[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempy3l_att = s_dummyy_loc_att[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyy_loc_att[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyy_loc_att[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyy_loc_att[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyy_loc_att[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempz3l_att = s_dummyz_loc_att[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyz_loc_att[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyz_loc_att[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyz_loc_att[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyz_loc_att[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
}
#endif
// compute derivatives of ux, uy and uz with respect to x, y and z
offset = working_element*NGLL3_ALIGN + tx;
xixl = d_xix[offset];
xiyl = d_xiy[offset];
xizl = d_xiz[offset];
etaxl = d_etax[offset];
etayl = d_etay[offset];
etazl = d_etaz[offset];
gammaxl = d_gammax[offset];
gammayl = d_gammay[offset];
gammazl = d_gammaz[offset];
duxdxl = xixl*tempx1l + etaxl*tempx2l + gammaxl*tempx3l;
duxdyl = xiyl*tempx1l + etayl*tempx2l + gammayl*tempx3l;
duxdzl = xizl*tempx1l + etazl*tempx2l + gammazl*tempx3l;
duydxl = xixl*tempy1l + etaxl*tempy2l + gammaxl*tempy3l;
duydyl = xiyl*tempy1l + etayl*tempy2l + gammayl*tempy3l;
duydzl = xizl*tempy1l + etazl*tempy2l + gammazl*tempy3l;
duzdxl = xixl*tempz1l + etaxl*tempz2l + gammaxl*tempz3l;
duzdyl = xiyl*tempz1l + etayl*tempz2l + gammayl*tempz3l;
duzdzl = xizl*tempz1l + etazl*tempz2l + gammazl*tempz3l;
// JC JC here we will need to add GPU support for the new C-PML routines
// precompute some sums to save CPU time
duxdxl_plus_duydyl = duxdxl + duydyl;
duxdxl_plus_duzdzl = duxdxl + duzdzl;
duydyl_plus_duzdzl = duydyl + duzdzl;
duxdyl_plus_duydxl = duxdyl + duydxl;
duzdxl_plus_duxdzl = duzdxl + duxdzl;
duzdyl_plus_duydzl = duzdyl + duydzl;
// JC JC here we will need to add GPU support for the new C-PML routines
if( ATTENUATION){
// temporary variables used for fixing attenuation in a consistent way
duxdxl_att = xixl*tempx1l_att + etaxl*tempx2l_att + gammaxl*tempx3l_att;
duxdyl_att = xiyl*tempx1l_att + etayl*tempx2l_att + gammayl*tempx3l_att;
duxdzl_att = xizl*tempx1l_att + etazl*tempx2l_att + gammazl*tempx3l_att;
duydxl_att = xixl*tempy1l_att + etaxl*tempy2l_att + gammaxl*tempy3l_att;
duydyl_att = xiyl*tempy1l_att + etayl*tempy2l_att + gammayl*tempy3l_att;
duydzl_att = xizl*tempy1l_att + etazl*tempy2l_att + gammazl*tempy3l_att;
duzdxl_att = xixl*tempz1l_att + etaxl*tempz2l_att + gammaxl*tempz3l_att;
duzdyl_att = xiyl*tempz1l_att + etayl*tempz2l_att + gammayl*tempz3l_att;
duzdzl_att = xizl*tempz1l_att + etazl*tempz2l_att + gammazl*tempz3l_att;
// precompute some sums to save CPU time
duxdyl_plus_duydxl_att = duxdyl_att + duydxl_att;
duzdxl_plus_duxdzl_att = duzdxl_att + duxdzl_att;
duzdyl_plus_duydzl_att = duzdyl_att + duydzl_att;
// computes deviatoric strain attenuation and/or for kernel calculations
if(COMPUTE_AND_STORE_STRAIN) {
realw templ = 0.33333333333333333333f * (duxdxl_att + duydyl_att + duzdzl_att); // 1./3. = 0.33333
// local storage: stresses at this current time step
epsilondev_xx_loc = duxdxl_att - templ;
epsilondev_yy_loc = duydyl_att - templ;
epsilondev_xy_loc = 0.5f * duxdyl_plus_duydxl_att;
epsilondev_xz_loc = 0.5f * duzdxl_plus_duxdzl_att;
epsilondev_yz_loc = 0.5f * duzdyl_plus_duydzl_att;
if(SIMULATION_TYPE == 3) {
epsilon_trace_over_3[tx + working_element*NGLL3] = templ;
}
// JC JC here we will need to add GPU support for the new C-PML routines
}
}else{
// computes deviatoric strain attenuation and/or for kernel calculations
if(COMPUTE_AND_STORE_STRAIN) {
realw templ = 0.33333333333333333333f * (duxdxl + duydyl + duzdzl); // 1./3. = 0.33333
// epsilondev_xx[offset] = duxdxl - templ;
// epsilondev_yy[offset] = duydyl - templ;
// epsilondev_xy[offset] = 0.5f * duxdyl_plus_duydxl;
// epsilondev_xz[offset] = 0.5f * duzdxl_plus_duxdzl;
// epsilondev_yz[offset] = 0.5f * duzdyl_plus_duydzl;
// local storage: stresses at this current time step
epsilondev_xx_loc = duxdxl - templ;
epsilondev_yy_loc = duydyl - templ;
epsilondev_xy_loc = 0.5f * duxdyl_plus_duydxl;
epsilondev_xz_loc = 0.5f * duzdxl_plus_duxdzl;
epsilondev_yz_loc = 0.5f * duzdyl_plus_duydzl;
if(SIMULATION_TYPE == 3) {
epsilon_trace_over_3[tx + working_element*NGLL3] = templ;
}
}
}
// compute elements with an elastic isotropic rheology
kappal = d_kappav[offset];
mul = d_muv[offset];
// attenuation
if(ATTENUATION){
// use unrelaxed parameters if attenuation
mul = mul * one_minus_sum_beta[tx+working_element*NGLL3]; // (i,j,k,ispec)
}
// full anisotropic case, stress calculations
if(ANISOTROPY){
c11 = d_c11store[offset];
c12 = d_c12store[offset];
c13 = d_c13store[offset];
c14 = d_c14store[offset];
c15 = d_c15store[offset];
c16 = d_c16store[offset];
c22 = d_c22store[offset];
c23 = d_c23store[offset];
c24 = d_c24store[offset];
c25 = d_c25store[offset];
c26 = d_c26store[offset];
c33 = d_c33store[offset];
c34 = d_c34store[offset];
c35 = d_c35store[offset];
c36 = d_c36store[offset];
c44 = d_c44store[offset];
c45 = d_c45store[offset];
c46 = d_c46store[offset];
c55 = d_c55store[offset];
c56 = d_c56store[offset];
c66 = d_c66store[offset];
sigma_xx = c11*duxdxl + c16*duxdyl_plus_duydxl + c12*duydyl +
c15*duzdxl_plus_duxdzl + c14*duzdyl_plus_duydzl + c13*duzdzl;
sigma_yy = c12*duxdxl + c26*duxdyl_plus_duydxl + c22*duydyl +
c25*duzdxl_plus_duxdzl + c24*duzdyl_plus_duydzl + c23*duzdzl;
sigma_zz = c13*duxdxl + c36*duxdyl_plus_duydxl + c23*duydyl +
c35*duzdxl_plus_duxdzl + c34*duzdyl_plus_duydzl + c33*duzdzl;
sigma_xy = c16*duxdxl + c66*duxdyl_plus_duydxl + c26*duydyl +
c56*duzdxl_plus_duxdzl + c46*duzdyl_plus_duydzl + c36*duzdzl;
sigma_xz = c15*duxdxl + c56*duxdyl_plus_duydxl + c25*duydyl +
c55*duzdxl_plus_duxdzl + c45*duzdyl_plus_duydzl + c35*duzdzl;
sigma_yz = c14*duxdxl + c46*duxdyl_plus_duydxl + c24*duydyl +
c45*duzdxl_plus_duxdzl + c44*duzdyl_plus_duydzl + c34*duzdzl;
}else{
// isotropic case
lambdalplus2mul = kappal + 1.33333333333333333333f * mul; // 4./3. = 1.3333333
lambdal = lambdalplus2mul - 2.0f * mul;
// compute the six components of the stress tensor sigma
sigma_xx = lambdalplus2mul*duxdxl + lambdal*duydyl_plus_duzdzl;
sigma_yy = lambdalplus2mul*duydyl + lambdal*duxdxl_plus_duzdzl;
sigma_zz = lambdalplus2mul*duzdzl + lambdal*duxdxl_plus_duydyl;
sigma_xy = mul*duxdyl_plus_duydxl;
sigma_xz = mul*duzdxl_plus_duxdzl;
sigma_yz = mul*duzdyl_plus_duydzl;
}
if(ATTENUATION){
// subtracts memory variables if attenuation
compute_element_att_stress(tx,working_element,NSPEC,
R_xx,R_yy,R_xy,R_xz,R_yz,
&sigma_xx,&sigma_yy,&sigma_zz,&sigma_xy,&sigma_xz,&sigma_yz);
}
jacobianl = 1.0f / (xixl*(etayl*gammazl-etazl*gammayl)-xiyl*(etaxl*gammazl-etazl*gammaxl)+xizl*(etaxl*gammayl-etayl*gammaxl));
// define symmetric components (needed for non-symmetric dot product and sigma for gravity)
sigma_yx = sigma_xy;
sigma_zx = sigma_xz;
sigma_zy = sigma_yz;
if( gravity ){
// computes non-symmetric terms for gravity
compute_element_gravity(tx,working_element,d_ibool,d_minus_g,d_minus_deriv_gravity,
d_rhostore,wgll_cube,jacobianl,
s_dummyx_loc,s_dummyy_loc,s_dummyz_loc,
&sigma_xx,&sigma_yy,&sigma_xz,&sigma_yz,
&rho_s_H1,&rho_s_H2,&rho_s_H3);
}
// form dot product with test vector, non-symmetric form
s_tempx1[tx] = jacobianl * (sigma_xx*xixl + sigma_yx*xiyl + sigma_zx*xizl);
s_tempy1[tx] = jacobianl * (sigma_xy*xixl + sigma_yy*xiyl + sigma_zy*xizl);
s_tempz1[tx] = jacobianl * (sigma_xz*xixl + sigma_yz*xiyl + sigma_zz*xizl);
s_tempx2[tx] = jacobianl * (sigma_xx*etaxl + sigma_yx*etayl + sigma_zx*etazl);
s_tempy2[tx] = jacobianl * (sigma_xy*etaxl + sigma_yy*etayl + sigma_zy*etazl);
s_tempz2[tx] = jacobianl * (sigma_xz*etaxl + sigma_yz*etayl + sigma_zz*etazl);
s_tempx3[tx] = jacobianl * (sigma_xx*gammaxl + sigma_yx*gammayl + sigma_zx*gammazl);
s_tempy3[tx] = jacobianl * (sigma_xy*gammaxl + sigma_yy*gammayl + sigma_zy*gammazl);
s_tempz3[tx] = jacobianl * (sigma_xz*gammaxl + sigma_yz*gammayl + sigma_zz*gammazl);
}
// synchronize all the threads (one thread for each of the NGLL grid points of the
// current spectral element) because we need the whole element to be ready in order
// to be able to compute the matrix products along cut planes of the 3D element below
__syncthreads();
// JC JC here we will need to add GPU support for the new C-PML routines
if (active) {
#ifndef MANUALLY_UNROLLED_LOOPS
tempx1l = 0.f;
tempy1l = 0.f;
tempz1l = 0.f;
tempx2l = 0.f;
tempy2l = 0.f;
tempz2l = 0.f;
tempx3l = 0.f;
tempy3l = 0.f;
tempz3l = 0.f;
for (l=0;l<NGLLX;l++) {
fac1 = d_hprimewgll_xx[I*NGLLX+l];
offset = K*NGLL2+J*NGLLX+l;
tempx1l += s_tempx1[offset]*fac1;
tempy1l += s_tempy1[offset]*fac1;
tempz1l += s_tempz1[offset]*fac1;
// assumes hprimewgll_xx == hprimewgll_yy == hprimewgll_zz
fac2 = d_hprimewgll_xx[J*NGLLX+l];
offset = K*NGLL2+l*NGLLX+I;
tempx2l += s_tempx2[offset]*fac2;
tempy2l += s_tempy2[offset]*fac2;
tempz2l += s_tempz2[offset]*fac2;
fac3 = d_hprimewgll_xx[K*NGLLX+l];
offset = l*NGLL2+J*NGLLX+I;
tempx3l += s_tempx3[offset]*fac3;
tempy3l += s_tempy3[offset]*fac3;
tempz3l += s_tempz3[offset]*fac3;
}
#else
tempx1l = s_tempx1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempx1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempx1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempx1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempx1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempy1l = s_tempy1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempy1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempy1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempy1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempy1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempz1l = s_tempz1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempz1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempz1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempz1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempz1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempx2l = s_tempx2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempx2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempx2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempx2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempx2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempy2l = s_tempy2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempy2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempy2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempy2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempy2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempz2l = s_tempz2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempz2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempz2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempz2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempz2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempx3l = s_tempx3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempx3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempx3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempx3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempx3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
tempy3l = s_tempy3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempy3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempy3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempy3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempy3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
tempz3l = s_tempz3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempz3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempz3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempz3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempz3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
#endif
fac1 = d_wgllwgll_yz[K*NGLLX+J];
fac2 = d_wgllwgll_xz[K*NGLLX+I];
fac3 = d_wgllwgll_xy[J*NGLLX+I];
sum_terms1 = - (fac1*tempx1l + fac2*tempx2l + fac3*tempx3l);
sum_terms2 = - (fac1*tempy1l + fac2*tempy2l + fac3*tempy3l);
sum_terms3 = - (fac1*tempz1l + fac2*tempz2l + fac3*tempz3l);
// adds gravity term
if( gravity ){
sum_terms1 += rho_s_H1;
sum_terms2 += rho_s_H2;
sum_terms3 += rho_s_H3;
}
#ifdef USE_MESH_COLORING_GPU
// no atomic operation needed, colors don't share global points between elements
#ifdef USE_TEXTURES_FIELDS
d_accel[iglob*3] = tex1Dfetch(d_accel_tex, iglob*3) + sum_terms1;
d_accel[iglob*3 + 1] = tex1Dfetch(d_accel_tex, iglob*3 + 1) + sum_terms2;
d_accel[iglob*3 + 2] = tex1Dfetch(d_accel_tex, iglob*3 + 2) + sum_terms3;
#else
d_accel[iglob*3] += sum_terms1;
d_accel[iglob*3 + 1] += sum_terms2;
d_accel[iglob*3 + 2] += sum_terms3;
#endif // USE_TEXTURES_FIELDS
// JC JC here we will need to add GPU support for the new C-PML routines
#else // MESH_COLORING
//mesh coloring
if( use_mesh_coloring_gpu ){
// no atomic operation needed, colors don't share global points between elements
#ifdef USE_TEXTURES_FIELDS
d_accel[iglob*3] = tex1Dfetch(d_accel_tex, iglob*3) + sum_terms1;
d_accel[iglob*3 + 1] = tex1Dfetch(d_accel_tex, iglob*3 + 1) + sum_terms2;
d_accel[iglob*3 + 2] = tex1Dfetch(d_accel_tex, iglob*3 + 2) + sum_terms3;
#else
d_accel[iglob*3] += sum_terms1;
d_accel[iglob*3 + 1] += sum_terms2;
d_accel[iglob*3 + 2] += sum_terms3;
#endif // USE_TEXTURES_FIELDS
}
else {
// for testing purposes only: w/out atomic updates
//d_accel[iglob*3] -= (0.00000001f*tempx1l + 0.00000001f*tempx2l + 0.00000001f*tempx3l);
//d_accel[iglob*3 + 1] -= (0.00000001f*tempy1l + 0.00000001f*tempy2l + 0.00000001f*tempy3l);
//d_accel[iglob*3 + 2] -= (0.00000001f*tempz1l + 0.00000001f*tempz2l + 0.00000001f*tempz3l);
atomicAdd(&d_accel[iglob*3], sum_terms1);
atomicAdd(&d_accel[iglob*3+1], sum_terms2);
atomicAdd(&d_accel[iglob*3+2], sum_terms3);
} // if(use_mesh_coloring_gpu)
#endif // MESH_COLORING
// update memory variables based upon the Runge-Kutta scheme
if( ATTENUATION ){
compute_element_att_memory(tx,working_element,NSPEC,
d_muv,
factor_common,alphaval,betaval,gammaval,
R_xx,R_yy,R_xy,R_xz,R_yz,
epsilondev_xx,epsilondev_yy,epsilondev_xy,epsilondev_xz,epsilondev_yz,
epsilondev_xx_loc,epsilondev_yy_loc,epsilondev_xy_loc,epsilondev_xz_loc,epsilondev_yz_loc);
}
// save deviatoric strain for Runge-Kutta scheme
if( COMPUTE_AND_STORE_STRAIN ){
int ijk_ispec = tx + working_element*NGLL3;
// fortran: epsilondev_xx(:,:,:,ispec) = epsilondev_xx_loc(:,:,:)
epsilondev_xx[ijk_ispec] = epsilondev_xx_loc;
epsilondev_yy[ijk_ispec] = epsilondev_yy_loc;
epsilondev_xy[ijk_ispec] = epsilondev_xy_loc;
epsilondev_xz[ijk_ispec] = epsilondev_xz_loc;
epsilondev_yz[ijk_ispec] = epsilondev_yz_loc;
}
} // if(active)
// JC JC here we will need to add GPU support for the new C-PML routines
} // kernel_2_impl()
*/
/* ----------------------------------------------------------------------------------------------- */
// note: kernel_2 is split into two kernels:
// - a kernel without attenuation Kernel_2_noatt_impl() and
// - a kernel including attenuation Kernel_2_att_impl()
// this separation should help with performance
// kernel without attenuation
//
// we use templates to distinguish between calls with forward or adjoint texture fields
template<int FORWARD_OR_ADJOINT> __global__ void Kernel_2_noatt_impl(int nb_blocks_to_compute,
int NGLOB,
int* d_ibool,
int* d_phase_ispec_inner_elastic, int num_phase_ispec_elastic,
int d_iphase,
int use_mesh_coloring_gpu,
realw* d_displ,realw* d_veloc,realw* d_accel,
realw* d_xix, realw* d_xiy, realw* d_xiz,
realw* d_etax, realw* d_etay, realw* d_etaz,
realw* d_gammax, realw* d_gammay, realw* d_gammaz,
realw* d_hprime_xx,
realw* d_hprimewgll_xx,
realw* d_wgllwgll_xy,realw* d_wgllwgll_xz,realw* d_wgllwgll_yz,
realw* d_kappav, realw* d_muv,
int COMPUTE_AND_STORE_STRAIN,
realw* epsilondev_xx,realw* epsilondev_yy,realw* epsilondev_xy,
realw* epsilondev_xz,realw* epsilondev_yz,
realw* epsilon_trace_over_3,
int SIMULATION_TYPE,
int NSPEC,
realw* one_minus_sum_beta,realw* factor_common,
realw* R_xx, realw* R_yy, realw* R_xy, realw* R_xz, realw* R_yz,
realw* alphaval,realw* betaval,realw* gammaval,
int ANISOTROPY,
realw* d_c11store,realw* d_c12store,realw* d_c13store,
realw* d_c14store,realw* d_c15store,realw* d_c16store,
realw* d_c22store,realw* d_c23store,realw* d_c24store,
realw* d_c25store,realw* d_c26store,realw* d_c33store,
realw* d_c34store,realw* d_c35store,realw* d_c36store,
realw* d_c44store,realw* d_c45store,realw* d_c46store,
realw* d_c55store,realw* d_c56store,realw* d_c66store,
int gravity,
realw* d_minus_g,
realw* d_minus_deriv_gravity,
realw* d_rhostore,
realw* wgll_cube ){
// elastic compute kernel without attenuation
// holds for: ATTENUATION = .false.
// COMPUTE_AND_STORE_STRAIN = .true. or .false. (true for kernel simulations)
int bx = blockIdx.y*gridDim.x+blockIdx.x;
int tx = threadIdx.x;
const int NGLL3_ALIGN = NGLL3_PADDED;
int K = (tx/NGLL2);
int J = ((tx-K*NGLL2)/NGLLX);
int I = (tx-K*NGLL2-J*NGLLX);
int active,offset;
int iglob = 0;
int working_element;
realw tempx1l,tempx2l,tempx3l,tempy1l,tempy2l,tempy3l,tempz1l,tempz2l,tempz3l;
realw xixl,xiyl,xizl,etaxl,etayl,etazl,gammaxl,gammayl,gammazl,jacobianl;
realw duxdxl,duxdyl,duxdzl,duydxl,duydyl,duydzl,duzdxl,duzdyl,duzdzl;
realw duxdxl_plus_duydyl,duxdxl_plus_duzdzl,duydyl_plus_duzdzl;
realw duxdyl_plus_duydxl,duzdxl_plus_duxdzl,duzdyl_plus_duydzl;
realw fac1,fac2,fac3,lambdal,mul,lambdalplus2mul,kappal;
realw sigma_xx,sigma_yy,sigma_zz,sigma_xy,sigma_xz,sigma_yz;
realw epsilondev_xx_loc,epsilondev_yy_loc,epsilondev_xy_loc,epsilondev_xz_loc,epsilondev_yz_loc;
realw c11,c12,c13,c14,c15,c16,c22,c23,c24,c25,c26,c33,c34,c35,c36,c44,c45,c46,c55,c56,c66;
realw sum_terms1,sum_terms2,sum_terms3;
// gravity variables
realw sigma_yx,sigma_zx,sigma_zy;
realw rho_s_H1,rho_s_H2,rho_s_H3;
#ifndef MANUALLY_UNROLLED_LOOPS
int l;
realw hp1,hp2,hp3;
#endif
__shared__ realw s_dummyx_loc[NGLL3];
__shared__ realw s_dummyy_loc[NGLL3];
__shared__ realw s_dummyz_loc[NGLL3];
__shared__ realw s_tempx1[NGLL3];
__shared__ realw s_tempx2[NGLL3];
__shared__ realw s_tempx3[NGLL3];
__shared__ realw s_tempy1[NGLL3];
__shared__ realw s_tempy2[NGLL3];
__shared__ realw s_tempy3[NGLL3];
__shared__ realw s_tempz1[NGLL3];
__shared__ realw s_tempz2[NGLL3];
__shared__ realw s_tempz3[NGLL3];
__shared__ realw sh_hprime_xx[NGLL2];
// use only NGLL^3 = 125 active threads, plus 3 inactive/ghost threads,
// because we used memory padding from NGLL^3 = 125 to 128 to get coalescent memory accesses
active = (tx < NGLL3 && bx < nb_blocks_to_compute) ? 1:0;
// copy from global memory to shared memory
// each thread writes one of the NGLL^3 = 125 data points
if (active) {
#ifdef USE_MESH_COLORING_GPU
working_element = bx;
#else
//mesh coloring
if( use_mesh_coloring_gpu ){
working_element = bx;
}else{
// iphase-1 and working_element-1 for Fortran->C array conventions
working_element = d_phase_ispec_inner_elastic[bx + num_phase_ispec_elastic*(d_iphase-1)]-1;
}
#endif
iglob = d_ibool[working_element*NGLL3 + tx]-1;
// debug
//if( iglob < 0 || iglob >= NGLOB ){ printf("wrong iglob %d\n",iglob); }
#ifdef USE_TEXTURES_FIELDS
s_dummyx_loc[tx] = texfetch_displ<FORWARD_OR_ADJOINT>(iglob*3);
s_dummyy_loc[tx] = texfetch_displ<FORWARD_OR_ADJOINT>(iglob*3 + 1);
s_dummyz_loc[tx] = texfetch_displ<FORWARD_OR_ADJOINT>(iglob*3 + 2);
#else
// changing iglob indexing to match fortran row changes fast style
s_dummyx_loc[tx] = d_displ[iglob*3];
s_dummyy_loc[tx] = d_displ[iglob*3 + 1];
s_dummyz_loc[tx] = d_displ[iglob*3 + 2];
#endif
}
// JC JC here we will need to add GPU support for the new C-PML routines
if (tx < NGLL2) {
#ifdef USE_TEXTURES_CONSTANTS
sh_hprime_xx[tx] = tex1Dfetch(d_hprime_xx_tex,tx);
#else
sh_hprime_xx[tx] = d_hprime_xx[tx];
#endif
}
// synchronize all the threads (one thread for each of the NGLL grid points of the
// current spectral element) because we need the whole element to be ready in order
// to be able to compute the matrix products along cut planes of the 3D element below
__syncthreads();
if (active) {
#ifndef MANUALLY_UNROLLED_LOOPS
tempx1l = 0.f;
tempx2l = 0.f;
tempx3l = 0.f;
tempy1l = 0.f;
tempy2l = 0.f;
tempy3l = 0.f;
tempz1l = 0.f;
tempz2l = 0.f;
tempz3l = 0.f;
for (l=0;l<NGLLX;l++) {
hp1 = sh_hprime_xx[l*NGLLX+I];
offset = K*NGLL2+J*NGLLX+l;
tempx1l += s_dummyx_loc[offset]*hp1;
tempy1l += s_dummyy_loc[offset]*hp1;
tempz1l += s_dummyz_loc[offset]*hp1;
//assumes that hprime_xx = hprime_yy = hprime_zz
hp2 = sh_hprime_xx[l*NGLLX+J];
offset = K*NGLL2+l*NGLLX+I;
tempx2l += s_dummyx_loc[offset]*hp2;
tempy2l += s_dummyy_loc[offset]*hp2;
tempz2l += s_dummyz_loc[offset]*hp2;
hp3 = sh_hprime_xx[l*NGLLX+K];
offset = l*NGLL2+J*NGLLX+I;
tempx3l += s_dummyx_loc[offset]*hp3;
tempy3l += s_dummyy_loc[offset]*hp3;
tempz3l += s_dummyz_loc[offset]*hp3;
}
// JC JC here we will need to add GPU support for the new C-PML routines
#else
tempx1l = s_dummyx_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempy1l = s_dummyy_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempz1l = s_dummyz_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempx2l = s_dummyx_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyx_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyx_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyx_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyx_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempy2l = s_dummyy_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyy_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyy_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyy_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyy_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempz2l = s_dummyz_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyz_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyz_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyz_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyz_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempx3l = s_dummyx_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyx_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyx_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyx_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyx_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempy3l = s_dummyy_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyy_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyy_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyy_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyy_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempz3l = s_dummyz_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyz_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyz_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyz_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyz_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
// JC JC here we will need to add GPU support for the new C-PML routines
#endif
// compute derivatives of ux, uy and uz with respect to x, y and z
offset = working_element*NGLL3_ALIGN + tx;
xixl = d_xix[offset];
xiyl = d_xiy[offset];
xizl = d_xiz[offset];
etaxl = d_etax[offset];
etayl = d_etay[offset];
etazl = d_etaz[offset];
gammaxl = d_gammax[offset];
gammayl = d_gammay[offset];
gammazl = d_gammaz[offset];
duxdxl = xixl*tempx1l + etaxl*tempx2l + gammaxl*tempx3l;
duxdyl = xiyl*tempx1l + etayl*tempx2l + gammayl*tempx3l;
duxdzl = xizl*tempx1l + etazl*tempx2l + gammazl*tempx3l;
duydxl = xixl*tempy1l + etaxl*tempy2l + gammaxl*tempy3l;
duydyl = xiyl*tempy1l + etayl*tempy2l + gammayl*tempy3l;
duydzl = xizl*tempy1l + etazl*tempy2l + gammazl*tempy3l;
duzdxl = xixl*tempz1l + etaxl*tempz2l + gammaxl*tempz3l;
duzdyl = xiyl*tempz1l + etayl*tempz2l + gammayl*tempz3l;
duzdzl = xizl*tempz1l + etazl*tempz2l + gammazl*tempz3l;
// JC JC here we will need to add GPU support for the new C-PML routines
// precompute some sums to save CPU time
duxdxl_plus_duydyl = duxdxl + duydyl;
duxdxl_plus_duzdzl = duxdxl + duzdzl;
duydyl_plus_duzdzl = duydyl + duzdzl;
duxdyl_plus_duydxl = duxdyl + duydxl;
duzdxl_plus_duxdzl = duzdxl + duxdzl;
duzdyl_plus_duydzl = duzdyl + duydzl;
// JC JC here we will need to add GPU support for the new C-PML routines
// computes deviatoric strain for kernel calculations
if(COMPUTE_AND_STORE_STRAIN) {
realw templ = 0.33333333333333333333f * (duxdxl + duydyl + duzdzl); // 1./3. = 0.33333
// local storage: stresses at this current time step
epsilondev_xx_loc = duxdxl - templ;
epsilondev_yy_loc = duydyl - templ;
epsilondev_xy_loc = 0.5f * duxdyl_plus_duydxl;
epsilondev_xz_loc = 0.5f * duzdxl_plus_duxdzl;
epsilondev_yz_loc = 0.5f * duzdyl_plus_duydzl;
if(SIMULATION_TYPE == 3) {
epsilon_trace_over_3[tx + working_element*NGLL3] = templ;
}
}
// compute elements with an elastic isotropic rheology
kappal = d_kappav[offset];
mul = d_muv[offset];
// full anisotropic case, stress calculations
if(ANISOTROPY){
c11 = d_c11store[offset];
c12 = d_c12store[offset];
c13 = d_c13store[offset];
c14 = d_c14store[offset];
c15 = d_c15store[offset];
c16 = d_c16store[offset];
c22 = d_c22store[offset];
c23 = d_c23store[offset];
c24 = d_c24store[offset];
c25 = d_c25store[offset];
c26 = d_c26store[offset];
c33 = d_c33store[offset];
c34 = d_c34store[offset];
c35 = d_c35store[offset];
c36 = d_c36store[offset];
c44 = d_c44store[offset];
c45 = d_c45store[offset];
c46 = d_c46store[offset];
c55 = d_c55store[offset];
c56 = d_c56store[offset];
c66 = d_c66store[offset];
sigma_xx = c11*duxdxl + c16*duxdyl_plus_duydxl + c12*duydyl +
c15*duzdxl_plus_duxdzl + c14*duzdyl_plus_duydzl + c13*duzdzl;
sigma_yy = c12*duxdxl + c26*duxdyl_plus_duydxl + c22*duydyl +
c25*duzdxl_plus_duxdzl + c24*duzdyl_plus_duydzl + c23*duzdzl;
sigma_zz = c13*duxdxl + c36*duxdyl_plus_duydxl + c23*duydyl +
c35*duzdxl_plus_duxdzl + c34*duzdyl_plus_duydzl + c33*duzdzl;
sigma_xy = c16*duxdxl + c66*duxdyl_plus_duydxl + c26*duydyl +
c56*duzdxl_plus_duxdzl + c46*duzdyl_plus_duydzl + c36*duzdzl;
sigma_xz = c15*duxdxl + c56*duxdyl_plus_duydxl + c25*duydyl +
c55*duzdxl_plus_duxdzl + c45*duzdyl_plus_duydzl + c35*duzdzl;
sigma_yz = c14*duxdxl + c46*duxdyl_plus_duydxl + c24*duydyl +
c45*duzdxl_plus_duxdzl + c44*duzdyl_plus_duydzl + c34*duzdzl;
}else{
// isotropic case
lambdalplus2mul = kappal + 1.33333333333333333333f * mul; // 4./3. = 1.3333333
lambdal = lambdalplus2mul - 2.0f * mul;
// compute the six components of the stress tensor sigma
sigma_xx = lambdalplus2mul*duxdxl + lambdal*duydyl_plus_duzdzl;
sigma_yy = lambdalplus2mul*duydyl + lambdal*duxdxl_plus_duzdzl;
sigma_zz = lambdalplus2mul*duzdzl + lambdal*duxdxl_plus_duydyl;
sigma_xy = mul*duxdyl_plus_duydxl;
sigma_xz = mul*duzdxl_plus_duxdzl;
sigma_yz = mul*duzdyl_plus_duydzl;
}
jacobianl = 1.0f / (xixl*(etayl*gammazl-etazl*gammayl)-xiyl*(etaxl*gammazl-etazl*gammaxl)+xizl*(etaxl*gammayl-etayl*gammaxl));
// define symmetric components (needed for non-symmetric dot product and sigma for gravity)
sigma_yx = sigma_xy;
sigma_zx = sigma_xz;
sigma_zy = sigma_yz;
if( gravity ){
// computes non-symmetric terms for gravity
compute_element_gravity(tx,working_element,d_ibool,d_minus_g,d_minus_deriv_gravity,
d_rhostore,wgll_cube,jacobianl,
s_dummyx_loc,s_dummyy_loc,s_dummyz_loc,
&sigma_xx,&sigma_yy,&sigma_xz,&sigma_yz,
&rho_s_H1,&rho_s_H2,&rho_s_H3);
}
// form dot product with test vector, non-symmetric form
s_tempx1[tx] = jacobianl * (sigma_xx*xixl + sigma_yx*xiyl + sigma_zx*xizl);
s_tempy1[tx] = jacobianl * (sigma_xy*xixl + sigma_yy*xiyl + sigma_zy*xizl);
s_tempz1[tx] = jacobianl * (sigma_xz*xixl + sigma_yz*xiyl + sigma_zz*xizl);
s_tempx2[tx] = jacobianl * (sigma_xx*etaxl + sigma_yx*etayl + sigma_zx*etazl);
s_tempy2[tx] = jacobianl * (sigma_xy*etaxl + sigma_yy*etayl + sigma_zy*etazl);
s_tempz2[tx] = jacobianl * (sigma_xz*etaxl + sigma_yz*etayl + sigma_zz*etazl);
s_tempx3[tx] = jacobianl * (sigma_xx*gammaxl + sigma_yx*gammayl + sigma_zx*gammazl);
s_tempy3[tx] = jacobianl * (sigma_xy*gammaxl + sigma_yy*gammayl + sigma_zy*gammazl);
s_tempz3[tx] = jacobianl * (sigma_xz*gammaxl + sigma_yz*gammayl + sigma_zz*gammazl);
}
// synchronize all the threads (one thread for each of the NGLL grid points of the
// current spectral element) because we need the whole element to be ready in order
// to be able to compute the matrix products along cut planes of the 3D element below
__syncthreads();
// JC JC here we will need to add GPU support for the new C-PML routines
if (active) {
#ifndef MANUALLY_UNROLLED_LOOPS
tempx1l = 0.f;
tempy1l = 0.f;
tempz1l = 0.f;
tempx2l = 0.f;
tempy2l = 0.f;
tempz2l = 0.f;
tempx3l = 0.f;
tempy3l = 0.f;
tempz3l = 0.f;
for (l=0;l<NGLLX;l++) {
fac1 = d_hprimewgll_xx[I*NGLLX+l];
offset = K*NGLL2+J*NGLLX+l;
tempx1l += s_tempx1[offset]*fac1;
tempy1l += s_tempy1[offset]*fac1;
tempz1l += s_tempz1[offset]*fac1;
// assumes hprimewgll_xx == hprimewgll_yy == hprimewgll_zz
fac2 = d_hprimewgll_xx[J*NGLLX+l];
offset = K*NGLL2+l*NGLLX+I;
tempx2l += s_tempx2[offset]*fac2;
tempy2l += s_tempy2[offset]*fac2;
tempz2l += s_tempz2[offset]*fac2;
fac3 = d_hprimewgll_xx[K*NGLLX+l];
offset = l*NGLL2+J*NGLLX+I;
tempx3l += s_tempx3[offset]*fac3;
tempy3l += s_tempy3[offset]*fac3;
tempz3l += s_tempz3[offset]*fac3;
}
#else
tempx1l = s_tempx1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempx1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempx1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempx1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempx1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempy1l = s_tempy1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempy1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempy1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempy1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempy1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempz1l = s_tempz1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempz1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempz1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempz1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempz1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempx2l = s_tempx2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempx2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempx2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempx2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempx2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempy2l = s_tempy2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempy2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempy2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempy2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempy2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempz2l = s_tempz2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempz2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempz2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempz2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempz2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempx3l = s_tempx3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempx3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempx3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempx3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempx3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
tempy3l = s_tempy3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempy3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempy3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempy3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempy3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
tempz3l = s_tempz3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempz3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempz3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempz3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempz3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
#endif
fac1 = d_wgllwgll_yz[K*NGLLX+J];
fac2 = d_wgllwgll_xz[K*NGLLX+I];
fac3 = d_wgllwgll_xy[J*NGLLX+I];
sum_terms1 = - (fac1*tempx1l + fac2*tempx2l + fac3*tempx3l);
sum_terms2 = - (fac1*tempy1l + fac2*tempy2l + fac3*tempy3l);
sum_terms3 = - (fac1*tempz1l + fac2*tempz2l + fac3*tempz3l);
// adds gravity term
if( gravity ){
sum_terms1 += rho_s_H1;
sum_terms2 += rho_s_H2;
sum_terms3 += rho_s_H3;
}
#ifdef USE_MESH_COLORING_GPU
// no atomic operation needed, colors don't share global points between elements
#ifdef USE_TEXTURES_FIELDS
d_accel[iglob*3] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3) + sum_terms1;
d_accel[iglob*3 + 1] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 1) + sum_terms2;
d_accel[iglob*3 + 2] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 2) + sum_terms3;
#else
d_accel[iglob*3] += sum_terms1;
d_accel[iglob*3 + 1] += sum_terms2;
d_accel[iglob*3 + 2] += sum_terms3;
#endif // USE_TEXTURES_FIELDS
// JC JC here we will need to add GPU support for the new C-PML routines
#else // MESH_COLORING
//mesh coloring
if( use_mesh_coloring_gpu ){
// no atomic operation needed, colors don't share global points between elements
#ifdef USE_TEXTURES_FIELDS
d_accel[iglob*3] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3) + sum_terms1;
d_accel[iglob*3 + 1] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 1) + sum_terms2;
d_accel[iglob*3 + 2] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 2) + sum_terms3;
#else
d_accel[iglob*3] += sum_terms1;
d_accel[iglob*3 + 1] += sum_terms2;
d_accel[iglob*3 + 2] += sum_terms3;
#endif // USE_TEXTURES_FIELDS
}else {
// for testing purposes only: w/out atomic updates
//d_accel[iglob*3] -= (0.00000001f*tempx1l + 0.00000001f*tempx2l + 0.00000001f*tempx3l);
//d_accel[iglob*3 + 1] -= (0.00000001f*tempy1l + 0.00000001f*tempy2l + 0.00000001f*tempy3l);
//d_accel[iglob*3 + 2] -= (0.00000001f*tempz1l + 0.00000001f*tempz2l + 0.00000001f*tempz3l);
// w/out atomic update
//d_accel[iglob*3] += sum_terms1;
//d_accel[iglob*3 + 1] += sum_terms2;
//d_accel[iglob*3 + 2] += sum_terms3;
atomicAdd(&d_accel[iglob*3], sum_terms1);
atomicAdd(&d_accel[iglob*3+1], sum_terms2);
atomicAdd(&d_accel[iglob*3+2], sum_terms3);
} // if(use_mesh_coloring_gpu)
#endif // MESH_COLORING
// save deviatoric strain for Runge-Kutta scheme
if( COMPUTE_AND_STORE_STRAIN ){
int ijk_ispec = tx + working_element*NGLL3;
// fortran: epsilondev_xx(:,:,:,ispec) = epsilondev_xx_loc(:,:,:)
epsilondev_xx[ijk_ispec] = epsilondev_xx_loc;
epsilondev_yy[ijk_ispec] = epsilondev_yy_loc;
epsilondev_xy[ijk_ispec] = epsilondev_xy_loc;
epsilondev_xz[ijk_ispec] = epsilondev_xz_loc;
epsilondev_yz[ijk_ispec] = epsilondev_yz_loc;
}
} // if(active)
// JC JC here we will need to add GPU support for the new C-PML routines
} // kernel_2_noatt_impl()
/* ----------------------------------------------------------------------------------------------- */
// kernel with attenuation
//
// we use templates to distinguish between calls with forward or adjoint texture fields
template<int FORWARD_OR_ADJOINT> __global__ void Kernel_2_att_impl(int nb_blocks_to_compute,
int NGLOB,
int* d_ibool,
int* d_phase_ispec_inner_elastic, int num_phase_ispec_elastic,
int d_iphase,
int use_mesh_coloring_gpu,
realw d_deltat,
realw* d_displ,realw* d_veloc,realw* d_accel,
realw* d_xix, realw* d_xiy, realw* d_xiz,
realw* d_etax, realw* d_etay, realw* d_etaz,
realw* d_gammax, realw* d_gammay, realw* d_gammaz,
realw* d_hprime_xx,
realw* d_hprimewgll_xx,
realw* d_wgllwgll_xy,realw* d_wgllwgll_xz,realw* d_wgllwgll_yz,
realw* d_kappav, realw* d_muv,
realw* epsilondev_xx,realw* epsilondev_yy,realw* epsilondev_xy,
realw* epsilondev_xz,realw* epsilondev_yz,
realw* epsilon_trace_over_3,
int SIMULATION_TYPE,
int NSPEC,
realw* one_minus_sum_beta,realw* factor_common,
realw* R_xx, realw* R_yy, realw* R_xy, realw* R_xz, realw* R_yz,
realw* alphaval,realw* betaval,realw* gammaval,
int ANISOTROPY,
realw* d_c11store,realw* d_c12store,realw* d_c13store,
realw* d_c14store,realw* d_c15store,realw* d_c16store,
realw* d_c22store,realw* d_c23store,realw* d_c24store,
realw* d_c25store,realw* d_c26store,realw* d_c33store,
realw* d_c34store,realw* d_c35store,realw* d_c36store,
realw* d_c44store,realw* d_c45store,realw* d_c46store,
realw* d_c55store,realw* d_c56store,realw* d_c66store,
int gravity,
realw* d_minus_g,
realw* d_minus_deriv_gravity,
realw* d_rhostore,
realw* wgll_cube){
// elastic compute kernel with attenuation
// holds for: ATTENUATION = .true.
// COMPUTE_AND_STORE_STRAIN = .true. (always true for attenuation)
int bx = blockIdx.y*gridDim.x+blockIdx.x;
int tx = threadIdx.x;
const int NGLL3_ALIGN = NGLL3_PADDED;
int K = (tx/NGLL2);
int J = ((tx-K*NGLL2)/NGLLX);
int I = (tx-K*NGLL2-J*NGLLX);
int active,offset;
int iglob = 0;
int working_element;
realw tempx1l,tempx2l,tempx3l,tempy1l,tempy2l,tempy3l,tempz1l,tempz2l,tempz3l;
realw xixl,xiyl,xizl,etaxl,etayl,etazl,gammaxl,gammayl,gammazl,jacobianl;
realw duxdxl,duxdyl,duxdzl,duydxl,duydyl,duydzl,duzdxl,duzdyl,duzdzl;
realw duxdxl_plus_duydyl,duxdxl_plus_duzdzl,duydyl_plus_duzdzl;
realw duxdyl_plus_duydxl,duzdxl_plus_duxdzl,duzdyl_plus_duydzl;
realw tempx1l_att,tempx2l_att,tempx3l_att,tempy1l_att,tempy2l_att,tempy3l_att,tempz1l_att,tempz2l_att,tempz3l_att;
realw duxdxl_att,duxdyl_att,duxdzl_att,duydxl_att,duydyl_att,duydzl_att,duzdxl_att,duzdyl_att,duzdzl_att;
realw duxdyl_plus_duydxl_att,duzdxl_plus_duxdzl_att,duzdyl_plus_duydzl_att;
realw fac1,fac2,fac3,lambdal,mul,lambdalplus2mul,kappal;
realw sigma_xx,sigma_yy,sigma_zz,sigma_xy,sigma_xz,sigma_yz;
realw epsilondev_xx_loc,epsilondev_yy_loc,epsilondev_xy_loc,epsilondev_xz_loc,epsilondev_yz_loc;
realw c11,c12,c13,c14,c15,c16,c22,c23,c24,c25,c26,c33,c34,c35,c36,c44,c45,c46,c55,c56,c66;
realw sum_terms1,sum_terms2,sum_terms3;
// gravity variables
realw sigma_yx,sigma_zx,sigma_zy;
realw rho_s_H1,rho_s_H2,rho_s_H3;
#ifndef MANUALLY_UNROLLED_LOOPS
int l;
realw hp1,hp2,hp3;
#endif
__shared__ realw s_dummyx_loc[NGLL3];
__shared__ realw s_dummyy_loc[NGLL3];
__shared__ realw s_dummyz_loc[NGLL3];
__shared__ realw s_dummyx_loc_att[NGLL3];
__shared__ realw s_dummyy_loc_att[NGLL3];
__shared__ realw s_dummyz_loc_att[NGLL3];
__shared__ realw s_tempx1[NGLL3];
__shared__ realw s_tempx2[NGLL3];
__shared__ realw s_tempx3[NGLL3];
__shared__ realw s_tempy1[NGLL3];
__shared__ realw s_tempy2[NGLL3];
__shared__ realw s_tempy3[NGLL3];
__shared__ realw s_tempz1[NGLL3];
__shared__ realw s_tempz2[NGLL3];
__shared__ realw s_tempz3[NGLL3];
__shared__ realw sh_hprime_xx[NGLL2];
// use only NGLL^3 = 125 active threads, plus 3 inactive/ghost threads,
// because we used memory padding from NGLL^3 = 125 to 128 to get coalescent memory accesses
active = (tx < NGLL3 && bx < nb_blocks_to_compute) ? 1:0;
// copy from global memory to shared memory
// each thread writes one of the NGLL^3 = 125 data points
if (active) {
#ifdef USE_MESH_COLORING_GPU
working_element = bx;
#else
//mesh coloring
if( use_mesh_coloring_gpu ){
working_element = bx;
}else{
// iphase-1 and working_element-1 for Fortran->C array conventions
working_element = d_phase_ispec_inner_elastic[bx + num_phase_ispec_elastic*(d_iphase-1)]-1;
}
#endif
iglob = d_ibool[working_element*NGLL3 + tx]-1;
#ifdef USE_TEXTURES_FIELDS
s_dummyx_loc[tx] = texfetch_displ<FORWARD_OR_ADJOINT>(iglob*3);
s_dummyy_loc[tx] = texfetch_displ<FORWARD_OR_ADJOINT>(iglob*3 + 1);
s_dummyz_loc[tx] = texfetch_displ<FORWARD_OR_ADJOINT>(iglob*3 + 2);
#else
// changing iglob indexing to match fortran row changes fast style
s_dummyx_loc[tx] = d_displ[iglob*3];
s_dummyy_loc[tx] = d_displ[iglob*3 + 1];
s_dummyz_loc[tx] = d_displ[iglob*3 + 2];
#endif
// JC JC here we will need to add GPU support for the new C-PML routines
// attenuation
// use first order Taylor expansion of displacement for local storage of stresses
// at this current time step, to fix attenuation in a consistent way
#ifdef USE_TEXTURES_FIELDS
s_dummyx_loc_att[tx] = s_dummyx_loc[tx] + d_deltat * texfetch_veloc<FORWARD_OR_ADJOINT>(iglob*3);
s_dummyy_loc_att[tx] = s_dummyy_loc[tx] + d_deltat * texfetch_veloc<FORWARD_OR_ADJOINT>(iglob*3 + 1);
s_dummyz_loc_att[tx] = s_dummyz_loc[tx] + d_deltat * texfetch_veloc<FORWARD_OR_ADJOINT>(iglob*3 + 2);
#else
s_dummyx_loc_att[tx] = s_dummyx_loc[tx] + d_deltat * d_veloc[iglob*3];
s_dummyy_loc_att[tx] = s_dummyy_loc[tx] + d_deltat * d_veloc[iglob*3 + 1];
s_dummyz_loc_att[tx] = s_dummyz_loc[tx] + d_deltat * d_veloc[iglob*3 + 2];
#endif
}
if (tx < NGLL2) {
#ifdef USE_TEXTURES_CONSTANTS
sh_hprime_xx[tx] = tex1Dfetch(d_hprime_xx_tex,tx);
#else
sh_hprime_xx[tx] = d_hprime_xx[tx];
#endif
}
// synchronize all the threads (one thread for each of the NGLL grid points of the
// current spectral element) because we need the whole element to be ready in order
// to be able to compute the matrix products along cut planes of the 3D element below
__syncthreads();
if (active) {
#ifndef MANUALLY_UNROLLED_LOOPS
tempx1l = 0.f;
tempx2l = 0.f;
tempx3l = 0.f;
tempy1l = 0.f;
tempy2l = 0.f;
tempy3l = 0.f;
tempz1l = 0.f;
tempz2l = 0.f;
tempz3l = 0.f;
for (l=0;l<NGLLX;l++) {
hp1 = sh_hprime_xx[l*NGLLX+I];
offset = K*NGLL2+J*NGLLX+l;
tempx1l += s_dummyx_loc[offset]*hp1;
tempy1l += s_dummyy_loc[offset]*hp1;
tempz1l += s_dummyz_loc[offset]*hp1;
//assumes that hprime_xx = hprime_yy = hprime_zz
hp2 = sh_hprime_xx[l*NGLLX+J];
offset = K*NGLL2+l*NGLLX+I;
tempx2l += s_dummyx_loc[offset]*hp2;
tempy2l += s_dummyy_loc[offset]*hp2;
tempz2l += s_dummyz_loc[offset]*hp2;
hp3 = sh_hprime_xx[l*NGLLX+K];
offset = l*NGLL2+J*NGLLX+I;
tempx3l += s_dummyx_loc[offset]*hp3;
tempy3l += s_dummyy_loc[offset]*hp3;
tempz3l += s_dummyz_loc[offset]*hp3;
}
// JC JC here we will need to add GPU support for the new C-PML routines
// attenuation
// temporary variables used for fixing attenuation in a consistent way
tempx1l_att = 0.f;
tempx2l_att = 0.f;
tempx3l_att = 0.f;
tempy1l_att = 0.f;
tempy2l_att = 0.f;
tempy3l_att = 0.f;
tempz1l_att = 0.f;
tempz2l_att = 0.f;
tempz3l_att = 0.f;
for (l=0;l<NGLLX;l++) {
hp1 = sh_hprime_xx[l*NGLLX+I];
offset = K*NGLL2+J*NGLLX+l;
tempx1l_att += s_dummyx_loc_att[offset]*hp1;
tempy1l_att += s_dummyy_loc_att[offset]*hp1;
tempz1l_att += s_dummyz_loc_att[offset]*hp1;
hp2 = sh_hprime_xx[l*NGLLX+J];
offset = K*NGLL2+l*NGLLX+I;
tempx2l_att += s_dummyx_loc_att[offset]*hp2;
tempy2l_att += s_dummyy_loc_att[offset]*hp2;
tempz2l_att += s_dummyz_loc_att[offset]*hp2;
hp3 = sh_hprime_xx[l*NGLLX+K];
offset = l*NGLL2+J*NGLLX+I;
tempx3l_att += s_dummyx_loc_att[offset]*hp3;
tempy3l_att += s_dummyy_loc_att[offset]*hp3;
tempz3l_att += s_dummyz_loc_att[offset]*hp3;
}
#else
tempx1l = s_dummyx_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempy1l = s_dummyy_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempz1l = s_dummyz_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempx2l = s_dummyx_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyx_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyx_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyx_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyx_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempy2l = s_dummyy_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyy_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyy_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyy_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyy_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempz2l = s_dummyz_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyz_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyz_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyz_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyz_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempx3l = s_dummyx_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyx_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyx_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyx_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyx_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempy3l = s_dummyy_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyy_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyy_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyy_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyy_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempz3l = s_dummyz_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyz_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyz_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyz_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyz_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
// JC JC here we will need to add GPU support for the new C-PML routines
// attenuation
// temporary variables used for fixing attenuation in a consistent way
tempx1l_att = s_dummyx_loc_att[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempy1l_att = s_dummyy_loc_att[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempz1l_att = s_dummyz_loc_att[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempx2l_att = s_dummyx_loc_att[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyx_loc_att[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyx_loc_att[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyx_loc_att[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyx_loc_att[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempy2l_att = s_dummyy_loc_att[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyy_loc_att[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyy_loc_att[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyy_loc_att[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyy_loc_att[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempz2l_att = s_dummyz_loc_att[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyz_loc_att[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyz_loc_att[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyz_loc_att[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyz_loc_att[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempx3l_att = s_dummyx_loc_att[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyx_loc_att[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyx_loc_att[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyx_loc_att[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyx_loc_att[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempy3l_att = s_dummyy_loc_att[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyy_loc_att[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyy_loc_att[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyy_loc_att[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyy_loc_att[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempz3l_att = s_dummyz_loc_att[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyz_loc_att[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyz_loc_att[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyz_loc_att[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyz_loc_att[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
#endif
// compute derivatives of ux, uy and uz with respect to x, y and z
offset = working_element*NGLL3_ALIGN + tx;
xixl = d_xix[offset];
xiyl = d_xiy[offset];
xizl = d_xiz[offset];
etaxl = d_etax[offset];
etayl = d_etay[offset];
etazl = d_etaz[offset];
gammaxl = d_gammax[offset];
gammayl = d_gammay[offset];
gammazl = d_gammaz[offset];
duxdxl = xixl*tempx1l + etaxl*tempx2l + gammaxl*tempx3l;
duxdyl = xiyl*tempx1l + etayl*tempx2l + gammayl*tempx3l;
duxdzl = xizl*tempx1l + etazl*tempx2l + gammazl*tempx3l;
duydxl = xixl*tempy1l + etaxl*tempy2l + gammaxl*tempy3l;
duydyl = xiyl*tempy1l + etayl*tempy2l + gammayl*tempy3l;
duydzl = xizl*tempy1l + etazl*tempy2l + gammazl*tempy3l;
duzdxl = xixl*tempz1l + etaxl*tempz2l + gammaxl*tempz3l;
duzdyl = xiyl*tempz1l + etayl*tempz2l + gammayl*tempz3l;
duzdzl = xizl*tempz1l + etazl*tempz2l + gammazl*tempz3l;
// JC JC here we will need to add GPU support for the new C-PML routines
// precompute some sums to save CPU time
duxdxl_plus_duydyl = duxdxl + duydyl;
duxdxl_plus_duzdzl = duxdxl + duzdzl;
duydyl_plus_duzdzl = duydyl + duzdzl;
duxdyl_plus_duydxl = duxdyl + duydxl;
duzdxl_plus_duxdzl = duzdxl + duxdzl;
duzdyl_plus_duydzl = duzdyl + duydzl;
// JC JC here we will need to add GPU support for the new C-PML routines
// attenuation
// temporary variables used for fixing attenuation in a consistent way
duxdxl_att = xixl*tempx1l_att + etaxl*tempx2l_att + gammaxl*tempx3l_att;
duxdyl_att = xiyl*tempx1l_att + etayl*tempx2l_att + gammayl*tempx3l_att;
duxdzl_att = xizl*tempx1l_att + etazl*tempx2l_att + gammazl*tempx3l_att;
duydxl_att = xixl*tempy1l_att + etaxl*tempy2l_att + gammaxl*tempy3l_att;
duydyl_att = xiyl*tempy1l_att + etayl*tempy2l_att + gammayl*tempy3l_att;
duydzl_att = xizl*tempy1l_att + etazl*tempy2l_att + gammazl*tempy3l_att;
duzdxl_att = xixl*tempz1l_att + etaxl*tempz2l_att + gammaxl*tempz3l_att;
duzdyl_att = xiyl*tempz1l_att + etayl*tempz2l_att + gammayl*tempz3l_att;
duzdzl_att = xizl*tempz1l_att + etazl*tempz2l_att + gammazl*tempz3l_att;
// precompute some sums to save CPU time
duxdyl_plus_duydxl_att = duxdyl_att + duydxl_att;
duzdxl_plus_duxdzl_att = duzdxl_att + duxdzl_att;
duzdyl_plus_duydzl_att = duzdyl_att + duydzl_att;
// attenuation
// computes deviatoric strain attenuation and/or for kernel calculations
realw templ = 0.33333333333333333333f * (duxdxl_att + duydyl_att + duzdzl_att); // 1./3. = 0.33333
// local storage: stresses at this current time step
epsilondev_xx_loc = duxdxl_att - templ;
epsilondev_yy_loc = duydyl_att - templ;
epsilondev_xy_loc = 0.5f * duxdyl_plus_duydxl_att;
epsilondev_xz_loc = 0.5f * duzdxl_plus_duxdzl_att;
epsilondev_yz_loc = 0.5f * duzdyl_plus_duydzl_att;
if(SIMULATION_TYPE == 3) {
epsilon_trace_over_3[tx + working_element*NGLL3] = templ;
}
// compute elements with an elastic isotropic rheology
kappal = d_kappav[offset];
mul = d_muv[offset];
// attenuation
// use unrelaxed parameters if attenuation
mul = mul * one_minus_sum_beta[tx+working_element*NGLL3]; // (i,j,k,ispec)
// full anisotropic case, stress calculations
if(ANISOTROPY){
c11 = d_c11store[offset];
c12 = d_c12store[offset];
c13 = d_c13store[offset];
c14 = d_c14store[offset];
c15 = d_c15store[offset];
c16 = d_c16store[offset];
c22 = d_c22store[offset];
c23 = d_c23store[offset];
c24 = d_c24store[offset];
c25 = d_c25store[offset];
c26 = d_c26store[offset];
c33 = d_c33store[offset];
c34 = d_c34store[offset];
c35 = d_c35store[offset];
c36 = d_c36store[offset];
c44 = d_c44store[offset];
c45 = d_c45store[offset];
c46 = d_c46store[offset];
c55 = d_c55store[offset];
c56 = d_c56store[offset];
c66 = d_c66store[offset];
sigma_xx = c11*duxdxl + c16*duxdyl_plus_duydxl + c12*duydyl +
c15*duzdxl_plus_duxdzl + c14*duzdyl_plus_duydzl + c13*duzdzl;
sigma_yy = c12*duxdxl + c26*duxdyl_plus_duydxl + c22*duydyl +
c25*duzdxl_plus_duxdzl + c24*duzdyl_plus_duydzl + c23*duzdzl;
sigma_zz = c13*duxdxl + c36*duxdyl_plus_duydxl + c23*duydyl +
c35*duzdxl_plus_duxdzl + c34*duzdyl_plus_duydzl + c33*duzdzl;
sigma_xy = c16*duxdxl + c66*duxdyl_plus_duydxl + c26*duydyl +
c56*duzdxl_plus_duxdzl + c46*duzdyl_plus_duydzl + c36*duzdzl;
sigma_xz = c15*duxdxl + c56*duxdyl_plus_duydxl + c25*duydyl +
c55*duzdxl_plus_duxdzl + c45*duzdyl_plus_duydzl + c35*duzdzl;
sigma_yz = c14*duxdxl + c46*duxdyl_plus_duydxl + c24*duydyl +
c45*duzdxl_plus_duxdzl + c44*duzdyl_plus_duydzl + c34*duzdzl;
}else{
// isotropic case
lambdalplus2mul = kappal + 1.33333333333333333333f * mul; // 4./3. = 1.3333333
lambdal = lambdalplus2mul - 2.0f * mul;
// compute the six components of the stress tensor sigma
sigma_xx = lambdalplus2mul*duxdxl + lambdal*duydyl_plus_duzdzl;
sigma_yy = lambdalplus2mul*duydyl + lambdal*duxdxl_plus_duzdzl;
sigma_zz = lambdalplus2mul*duzdzl + lambdal*duxdxl_plus_duydyl;
sigma_xy = mul*duxdyl_plus_duydxl;
sigma_xz = mul*duzdxl_plus_duxdzl;
sigma_yz = mul*duzdyl_plus_duydzl;
}
// attenuation
// subtracts memory variables if attenuation
compute_element_att_stress(tx,working_element,NSPEC,
R_xx,R_yy,R_xy,R_xz,R_yz,
&sigma_xx,&sigma_yy,&sigma_zz,&sigma_xy,&sigma_xz,&sigma_yz);
jacobianl = 1.0f / (xixl*(etayl*gammazl-etazl*gammayl)-xiyl*(etaxl*gammazl-etazl*gammaxl)+xizl*(etaxl*gammayl-etayl*gammaxl));
// define symmetric components (needed for non-symmetric dot product and sigma for gravity)
sigma_yx = sigma_xy;
sigma_zx = sigma_xz;
sigma_zy = sigma_yz;
if( gravity ){
// computes non-symmetric terms for gravity
compute_element_gravity(tx,working_element,d_ibool,d_minus_g,d_minus_deriv_gravity,
d_rhostore,wgll_cube,jacobianl,
s_dummyx_loc,s_dummyy_loc,s_dummyz_loc,
&sigma_xx,&sigma_yy,&sigma_xz,&sigma_yz,
&rho_s_H1,&rho_s_H2,&rho_s_H3);
}
// form dot product with test vector, non-symmetric form
s_tempx1[tx] = jacobianl * (sigma_xx*xixl + sigma_yx*xiyl + sigma_zx*xizl);
s_tempy1[tx] = jacobianl * (sigma_xy*xixl + sigma_yy*xiyl + sigma_zy*xizl);
s_tempz1[tx] = jacobianl * (sigma_xz*xixl + sigma_yz*xiyl + sigma_zz*xizl);
s_tempx2[tx] = jacobianl * (sigma_xx*etaxl + sigma_yx*etayl + sigma_zx*etazl);
s_tempy2[tx] = jacobianl * (sigma_xy*etaxl + sigma_yy*etayl + sigma_zy*etazl);
s_tempz2[tx] = jacobianl * (sigma_xz*etaxl + sigma_yz*etayl + sigma_zz*etazl);
s_tempx3[tx] = jacobianl * (sigma_xx*gammaxl + sigma_yx*gammayl + sigma_zx*gammazl);
s_tempy3[tx] = jacobianl * (sigma_xy*gammaxl + sigma_yy*gammayl + sigma_zy*gammazl);
s_tempz3[tx] = jacobianl * (sigma_xz*gammaxl + sigma_yz*gammayl + sigma_zz*gammazl);
}
// synchronize all the threads (one thread for each of the NGLL grid points of the
// current spectral element) because we need the whole element to be ready in order
// to be able to compute the matrix products along cut planes of the 3D element below
__syncthreads();
// JC JC here we will need to add GPU support for the new C-PML routines
if (active) {
#ifndef MANUALLY_UNROLLED_LOOPS
tempx1l = 0.f;
tempy1l = 0.f;
tempz1l = 0.f;
tempx2l = 0.f;
tempy2l = 0.f;
tempz2l = 0.f;
tempx3l = 0.f;
tempy3l = 0.f;
tempz3l = 0.f;
for (l=0;l<NGLLX;l++) {
fac1 = d_hprimewgll_xx[I*NGLLX+l];
offset = K*NGLL2+J*NGLLX+l;
tempx1l += s_tempx1[offset]*fac1;
tempy1l += s_tempy1[offset]*fac1;
tempz1l += s_tempz1[offset]*fac1;
// assumes hprimewgll_xx == hprimewgll_yy == hprimewgll_zz
fac2 = d_hprimewgll_xx[J*NGLLX+l];
offset = K*NGLL2+l*NGLLX+I;
tempx2l += s_tempx2[offset]*fac2;
tempy2l += s_tempy2[offset]*fac2;
tempz2l += s_tempz2[offset]*fac2;
fac3 = d_hprimewgll_xx[K*NGLLX+l];
offset = l*NGLL2+J*NGLLX+I;
tempx3l += s_tempx3[offset]*fac3;
tempy3l += s_tempy3[offset]*fac3;
tempz3l += s_tempz3[offset]*fac3;
}
#else
tempx1l = s_tempx1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempx1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempx1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempx1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempx1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempy1l = s_tempy1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempy1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempy1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempy1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempy1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempz1l = s_tempz1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempz1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempz1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempz1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempz1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempx2l = s_tempx2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempx2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempx2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempx2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempx2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempy2l = s_tempy2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempy2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempy2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempy2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempy2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempz2l = s_tempz2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempz2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempz2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempz2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempz2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempx3l = s_tempx3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempx3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempx3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempx3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempx3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
tempy3l = s_tempy3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempy3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempy3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempy3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempy3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
tempz3l = s_tempz3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempz3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempz3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempz3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempz3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
#endif
fac1 = d_wgllwgll_yz[K*NGLLX+J];
fac2 = d_wgllwgll_xz[K*NGLLX+I];
fac3 = d_wgllwgll_xy[J*NGLLX+I];
sum_terms1 = - (fac1*tempx1l + fac2*tempx2l + fac3*tempx3l);
sum_terms2 = - (fac1*tempy1l + fac2*tempy2l + fac3*tempy3l);
sum_terms3 = - (fac1*tempz1l + fac2*tempz2l + fac3*tempz3l);
// adds gravity term
if( gravity ){
sum_terms1 += rho_s_H1;
sum_terms2 += rho_s_H2;
sum_terms3 += rho_s_H3;
}
#ifdef USE_MESH_COLORING_GPU
// no atomic operation needed, colors don't share global points between elements
#ifdef USE_TEXTURES_FIELDS
d_accel[iglob*3] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3) + sum_terms1;
d_accel[iglob*3 + 1] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 1) + sum_terms2;
d_accel[iglob*3 + 2] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 2) + sum_terms3;
#else
d_accel[iglob*3] += sum_terms1;
d_accel[iglob*3 + 1] += sum_terms2;
d_accel[iglob*3 + 2] += sum_terms3;
#endif // USE_TEXTURES_FIELDS
// JC JC here we will need to add GPU support for the new C-PML routines
#else // MESH_COLORING
//mesh coloring
if( use_mesh_coloring_gpu ){
// no atomic operation needed, colors don't share global points between elements
#ifdef USE_TEXTURES_FIELDS
d_accel[iglob*3] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3) + sum_terms1;
d_accel[iglob*3 + 1] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 1) + sum_terms2;
d_accel[iglob*3 + 2] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 2) + sum_terms3;
#else
d_accel[iglob*3] += sum_terms1;
d_accel[iglob*3 + 1] += sum_terms2;
d_accel[iglob*3 + 2] += sum_terms3;
#endif // USE_TEXTURES_FIELDS
}
else {
// for testing purposes only: w/out atomic updates
//d_accel[iglob*3] -= (0.00000001f*tempx1l + 0.00000001f*tempx2l + 0.00000001f*tempx3l);
//d_accel[iglob*3 + 1] -= (0.00000001f*tempy1l + 0.00000001f*tempy2l + 0.00000001f*tempy3l);
//d_accel[iglob*3 + 2] -= (0.00000001f*tempz1l + 0.00000001f*tempz2l + 0.00000001f*tempz3l);
// w/out atomic update
//d_accel[iglob*3] += sum_terms1;
//d_accel[iglob*3 + 1] += sum_terms2;
//d_accel[iglob*3 + 2] += sum_terms3;
atomicAdd(&d_accel[iglob*3], sum_terms1);
atomicAdd(&d_accel[iglob*3+1], sum_terms2);
atomicAdd(&d_accel[iglob*3+2], sum_terms3);
} // if(use_mesh_coloring_gpu)
#endif // MESH_COLORING
// attenuation
// update memory variables based upon the Runge-Kutta scheme
compute_element_att_memory(tx,working_element,NSPEC,
d_muv,
factor_common,alphaval,betaval,gammaval,
R_xx,R_yy,R_xy,R_xz,R_yz,
epsilondev_xx,epsilondev_yy,epsilondev_xy,epsilondev_xz,epsilondev_yz,
epsilondev_xx_loc,epsilondev_yy_loc,epsilondev_xy_loc,epsilondev_xz_loc,epsilondev_yz_loc);
// save deviatoric strain for Runge-Kutta scheme
int ijk_ispec = tx + working_element*NGLL3;
// fortran: epsilondev_xx(:,:,:,ispec) = epsilondev_xx_loc(:,:,:)
epsilondev_xx[ijk_ispec] = epsilondev_xx_loc;
epsilondev_yy[ijk_ispec] = epsilondev_yy_loc;
epsilondev_xy[ijk_ispec] = epsilondev_xy_loc;
epsilondev_xz[ijk_ispec] = epsilondev_xz_loc;
epsilondev_yz[ijk_ispec] = epsilondev_yz_loc;
} // if(active)
// JC JC here we will need to add GPU support for the new C-PML routines
} // kernel_2_att_impl()
/* ----------------------------------------------------------------------------------------------- */
void Kernel_2(int nb_blocks_to_compute,Mesh* mp,int d_iphase,realw d_deltat,
int COMPUTE_AND_STORE_STRAIN,
int ATTENUATION,int ANISOTROPY,
int* d_ibool,
realw* d_xix,realw* d_xiy,realw* d_xiz,
realw* d_etax,realw* d_etay,realw* d_etaz,
realw* d_gammax,realw* d_gammay,realw* d_gammaz,
realw* d_kappav,
realw* d_muv,
realw* d_epsilondev_xx,realw* d_epsilondev_yy,realw* d_epsilondev_xy,
realw* d_epsilondev_xz,realw* d_epsilondev_yz,
realw* d_epsilon_trace_over_3,
realw* d_one_minus_sum_beta,
realw* d_factor_common,
realw* d_R_xx,realw* d_R_yy,realw* d_R_xy,
realw* d_R_xz,realw* d_R_yz,
realw* d_b_epsilondev_xx,realw* d_b_epsilondev_yy,realw* d_b_epsilondev_xy,
realw* d_b_epsilondev_xz,realw* d_b_epsilondev_yz,
realw* d_b_epsilon_trace_over_3,
realw* d_b_R_xx,realw* d_b_R_yy,realw* d_b_R_xy,
realw* d_b_R_xz,realw* d_b_R_yz,
realw* d_c11store,realw* d_c12store,realw* d_c13store,
realw* d_c14store,realw* d_c15store,realw* d_c16store,
realw* d_c22store,realw* d_c23store,realw* d_c24store,
realw* d_c25store,realw* d_c26store,realw* d_c33store,
realw* d_c34store,realw* d_c35store,realw* d_c36store,
realw* d_c44store,realw* d_c45store,realw* d_c46store,
realw* d_c55store,realw* d_c56store,realw* d_c66store,
realw* d_rhostore){
TRACE("\tKernel_2");
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("before kernel Kernel 2");
#endif
/* if the grid can handle the number of blocks, we let it be 1D */
/* grid_2_x = nb_elem_color; */
/* nb_elem_color is just how many blocks we are computing now */
int blocksize = NGLL3_PADDED;
int num_blocks_x, num_blocks_y;
get_blocks_xy(nb_blocks_to_compute,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
// Cuda timing
// hipEvent_t start, stop;
// realw time;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// hipEventRecord( start, 0 );
if( ATTENUATION ){
// debug
//printf("Running Kernel_2 with attenuation\n");
// compute kernels with attenuation
// forward wavefields -> FORWARD_OR_ADJOINT == 1
hipLaunchKernelGGL(( Kernel_2_att_impl<1>), dim3(grid),dim3(threads),0,mp->compute_stream, nb_blocks_to_compute,
mp->NGLOB_AB,
d_ibool,
mp->d_phase_ispec_inner_elastic,
mp->num_phase_ispec_elastic,
d_iphase,
mp->use_mesh_coloring_gpu,
d_deltat,
mp->d_displ,mp->d_veloc,mp->d_accel,
d_xix, d_xiy, d_xiz,
d_etax, d_etay, d_etaz,
d_gammax, d_gammay, d_gammaz,
mp->d_hprime_xx,
mp->d_hprimewgll_xx,
mp->d_wgllwgll_xy, mp->d_wgllwgll_xz, mp->d_wgllwgll_yz,
d_kappav, d_muv,
d_epsilondev_xx,d_epsilondev_yy,d_epsilondev_xy,
d_epsilondev_xz,d_epsilondev_yz,
d_epsilon_trace_over_3,
mp->simulation_type,
mp->NSPEC_AB,
d_one_minus_sum_beta,
d_factor_common,
d_R_xx,d_R_yy,d_R_xy,d_R_xz,d_R_yz,
mp->d_alphaval,mp->d_betaval,mp->d_gammaval,
ANISOTROPY,
d_c11store,d_c12store,d_c13store,
d_c14store,d_c15store,d_c16store,
d_c22store,d_c23store,d_c24store,
d_c25store,d_c26store,d_c33store,
d_c34store,d_c35store,d_c36store,
d_c44store,d_c45store,d_c46store,
d_c55store,d_c56store,d_c66store,
mp->gravity,
mp->d_minus_g,
mp->d_minus_deriv_gravity,
d_rhostore,
mp->d_wgll_cube);
if(mp->simulation_type == 3) {
// backward/reconstructed wavefields -> FORWARD_OR_ADJOINT == 3
hipLaunchKernelGGL(( Kernel_2_att_impl<3>), dim3(grid),dim3(threads),0,mp->compute_stream, nb_blocks_to_compute,
mp->NGLOB_AB,
d_ibool,
mp->d_phase_ispec_inner_elastic,
mp->num_phase_ispec_elastic,
d_iphase,
mp->use_mesh_coloring_gpu,
d_deltat,
mp->d_b_displ,mp->d_b_veloc,mp->d_b_accel,
d_xix, d_xiy, d_xiz,
d_etax, d_etay, d_etaz,
d_gammax, d_gammay, d_gammaz,
mp->d_hprime_xx,
mp->d_hprimewgll_xx,
mp->d_wgllwgll_xy, mp->d_wgllwgll_xz, mp->d_wgllwgll_yz,
d_kappav, d_muv,
d_b_epsilondev_xx,d_b_epsilondev_yy,d_b_epsilondev_xy,
d_b_epsilondev_xz,d_b_epsilondev_yz,
d_b_epsilon_trace_over_3,
mp->simulation_type,
mp->NSPEC_AB,
d_one_minus_sum_beta,
d_factor_common,
d_b_R_xx,d_b_R_yy,d_b_R_xy,d_b_R_xz,d_b_R_yz,
mp->d_b_alphaval,mp->d_b_betaval,mp->d_b_gammaval,
ANISOTROPY,
d_c11store,d_c12store,d_c13store,
d_c14store,d_c15store,d_c16store,
d_c22store,d_c23store,d_c24store,
d_c25store,d_c26store,d_c33store,
d_c34store,d_c35store,d_c36store,
d_c44store,d_c45store,d_c46store,
d_c55store,d_c56store,d_c66store,
mp->gravity,
mp->d_minus_g,
mp->d_minus_deriv_gravity,
d_rhostore,
mp->d_wgll_cube);
}
}else{
// debug
//printf("Running Kernel_2 without attenuation\n");
// compute kernels without attenuation
// forward wavefields -> FORWARD_OR_ADJOINT == 1
hipLaunchKernelGGL(( Kernel_2_noatt_impl<1>), dim3(grid),dim3(threads),0,mp->compute_stream, nb_blocks_to_compute,
mp->NGLOB_AB,
d_ibool,
mp->d_phase_ispec_inner_elastic,mp->num_phase_ispec_elastic,
d_iphase,
mp->use_mesh_coloring_gpu,
mp->d_displ,mp->d_veloc,mp->d_accel,
d_xix, d_xiy, d_xiz,
d_etax, d_etay, d_etaz,
d_gammax, d_gammay, d_gammaz,
mp->d_hprime_xx,
mp->d_hprimewgll_xx,
mp->d_wgllwgll_xy, mp->d_wgllwgll_xz, mp->d_wgllwgll_yz,
d_kappav, d_muv,
COMPUTE_AND_STORE_STRAIN,
d_epsilondev_xx,d_epsilondev_yy,d_epsilondev_xy,
d_epsilondev_xz,d_epsilondev_yz,
d_epsilon_trace_over_3,
mp->simulation_type,
mp->NSPEC_AB,
d_one_minus_sum_beta,d_factor_common,
d_R_xx,d_R_yy,d_R_xy,d_R_xz,d_R_yz,
mp->d_alphaval,mp->d_betaval,mp->d_gammaval,
ANISOTROPY,
d_c11store,d_c12store,d_c13store,
d_c14store,d_c15store,d_c16store,
d_c22store,d_c23store,d_c24store,
d_c25store,d_c26store,d_c33store,
d_c34store,d_c35store,d_c36store,
d_c44store,d_c45store,d_c46store,
d_c55store,d_c56store,d_c66store,
mp->gravity,
mp->d_minus_g,
mp->d_minus_deriv_gravity,
d_rhostore,
mp->d_wgll_cube );
// backward/reconstructed wavefield
if(mp->simulation_type == 3) {
// backward/reconstructed wavefields -> FORWARD_OR_ADJOINT == 3
hipLaunchKernelGGL(( Kernel_2_noatt_impl<3>), dim3(grid),dim3(threads),0,mp->compute_stream, nb_blocks_to_compute,
mp->NGLOB_AB,
d_ibool,
mp->d_phase_ispec_inner_elastic,mp->num_phase_ispec_elastic,
d_iphase,
mp->use_mesh_coloring_gpu,
mp->d_b_displ,mp->d_b_veloc,mp->d_b_accel,
d_xix, d_xiy, d_xiz,
d_etax, d_etay, d_etaz,
d_gammax, d_gammay, d_gammaz,
mp->d_hprime_xx,
mp->d_hprimewgll_xx,
mp->d_wgllwgll_xy, mp->d_wgllwgll_xz, mp->d_wgllwgll_yz,
d_kappav, d_muv,
COMPUTE_AND_STORE_STRAIN,
d_b_epsilondev_xx,d_b_epsilondev_yy,d_b_epsilondev_xy,
d_b_epsilondev_xz,d_b_epsilondev_yz,
d_b_epsilon_trace_over_3,
mp->simulation_type,
mp->NSPEC_AB,
d_one_minus_sum_beta,d_factor_common,
d_b_R_xx,d_b_R_yy,d_b_R_xy,d_b_R_xz,d_b_R_yz,
mp->d_b_alphaval,mp->d_b_betaval,mp->d_b_gammaval,
ANISOTROPY,
d_c11store,d_c12store,d_c13store,
d_c14store,d_c15store,d_c16store,
d_c22store,d_c23store,d_c24store,
d_c25store,d_c26store,d_c33store,
d_c34store,d_c35store,d_c36store,
d_c44store,d_c45store,d_c46store,
d_c55store,d_c56store,d_c66store,
mp->gravity,
mp->d_minus_g,
mp->d_minus_deriv_gravity,
d_rhostore,
mp->d_wgll_cube );
}
}
// hipEventRecord( stop, 0 );
// hipEventSynchronize( stop );
// hipEventElapsedTime( &time, start, stop );
// hipEventDestroy( start );
// hipEventDestroy( stop );
// printf("Kernel2 Execution Time: %f ms\n",time);
// hipDeviceSynchronize(); //
// LOG("Kernel 2 finished"); //
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("Kernel_2_impl");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(compute_forces_viscoelastic_cuda,
COMPUTE_FORCES_VISCOELASTIC_CUDA)(long* Mesh_pointer,
int* iphase,
realw* deltat,
int* nspec_outer_elastic,
int* nspec_inner_elastic,
int* COMPUTE_AND_STORE_STRAIN,
int* ATTENUATION,
int* ANISOTROPY) {
TRACE("\tcompute_forces_viscoelastic_cuda");
// EPIK_TRACER("compute_forces_viscoelastic_cuda");
//printf("Running compute_forces\n");
//double start_time = get_time();
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
int num_elements;
if( *iphase == 1 )
num_elements = *nspec_outer_elastic;
else
num_elements = *nspec_inner_elastic;
// checks if anything to do
if( num_elements == 0 ) return;
// mesh coloring
if( mp->use_mesh_coloring_gpu ){
// note: array offsets require sorted arrays, such that e.g. ibool starts with elastic elements
// and followed by acoustic ones.
// elastic elements also start with outer than inner element ordering
int nb_colors,nb_blocks_to_compute;
int istart;
int offset,offset_nonpadded,offset_nonpadded_att2;
// sets up color loop
if( *iphase == 1 ){
// outer elements
nb_colors = mp->num_colors_outer_elastic;
istart = 0;
// array offsets
offset = 0;
offset_nonpadded = 0;
offset_nonpadded_att2 = 0;
}else{
// inner elements (start after outer elements)
nb_colors = mp->num_colors_outer_elastic + mp->num_colors_inner_elastic;
istart = mp->num_colors_outer_elastic;
// array offsets
offset = (*nspec_outer_elastic) * NGLL3_PADDED;
offset_nonpadded = (*nspec_outer_elastic) * NGLL3;
offset_nonpadded_att2 = (*nspec_outer_elastic) * NGLL3 * N_SLS;
}
// loops over colors
for(int icolor = istart; icolor < nb_colors; icolor++){
nb_blocks_to_compute = mp->h_num_elem_colors_elastic[icolor];
// checks
//if( nb_blocks_to_compute <= 0 ){
// printf("error number of elastic color blocks: %d -- color = %d \n",nb_blocks_to_compute,icolor);
// exit(EXIT_FAILURE);
//}
Kernel_2(nb_blocks_to_compute,mp,*iphase,*deltat,
*COMPUTE_AND_STORE_STRAIN,
*ATTENUATION,*ANISOTROPY,
mp->d_ibool + offset_nonpadded,
mp->d_xix + offset,mp->d_xiy + offset,mp->d_xiz + offset,
mp->d_etax + offset,mp->d_etay + offset,mp->d_etaz + offset,
mp->d_gammax + offset,mp->d_gammay + offset,mp->d_gammaz + offset,
mp->d_kappav + offset,
mp->d_muv + offset,
mp->d_epsilondev_xx + offset_nonpadded,mp->d_epsilondev_yy + offset_nonpadded,mp->d_epsilondev_xy + offset_nonpadded,
mp->d_epsilondev_xz + offset_nonpadded,mp->d_epsilondev_yz + offset_nonpadded,
mp->d_epsilon_trace_over_3 + offset_nonpadded,
mp->d_one_minus_sum_beta + offset_nonpadded,
mp->d_factor_common + offset_nonpadded_att2,
mp->d_R_xx + offset_nonpadded,mp->d_R_yy + offset_nonpadded,mp->d_R_xy + offset_nonpadded,
mp->d_R_xz + offset_nonpadded,mp->d_R_yz + offset_nonpadded,
mp->d_b_epsilondev_xx + offset_nonpadded,mp->d_b_epsilondev_yy + offset_nonpadded,mp->d_b_epsilondev_xy + offset_nonpadded,
mp->d_b_epsilondev_xz + offset_nonpadded,mp->d_b_epsilondev_yz + offset_nonpadded,
mp->d_b_epsilon_trace_over_3 + offset_nonpadded,
mp->d_b_R_xx + offset_nonpadded,mp->d_b_R_yy + offset_nonpadded,mp->d_b_R_xy + offset_nonpadded,
mp->d_b_R_xz + offset_nonpadded,mp->d_b_R_yz + offset_nonpadded,
mp->d_c11store + offset,mp->d_c12store + offset,mp->d_c13store + offset,
mp->d_c14store + offset,mp->d_c15store + offset,mp->d_c16store + offset,
mp->d_c22store + offset,mp->d_c23store + offset,mp->d_c24store + offset,
mp->d_c25store + offset,mp->d_c26store + offset,mp->d_c33store + offset,
mp->d_c34store + offset,mp->d_c35store + offset,mp->d_c36store + offset,
mp->d_c44store + offset,mp->d_c45store + offset,mp->d_c46store + offset,
mp->d_c55store + offset,mp->d_c56store + offset,mp->d_c66store + offset,
mp->d_rhostore + offset);
// for padded and aligned arrays
offset += nb_blocks_to_compute * NGLL3_PADDED;
// for no-aligned arrays
offset_nonpadded += nb_blocks_to_compute * NGLL3;
// for factor_common array
offset_nonpadded_att2 += nb_blocks_to_compute * NGLL3 * N_SLS;
//note: we use the same stream, so kernels are executed one after the other
// thus, there should be no need to synchronize in case we run on only 1 process to avoid race-conditions
}
}else{
// no mesh coloring: uses atomic updates
Kernel_2(num_elements,mp,*iphase,*deltat,
*COMPUTE_AND_STORE_STRAIN,
*ATTENUATION,*ANISOTROPY,
mp->d_ibool,
mp->d_xix,mp->d_xiy,mp->d_xiz,
mp->d_etax,mp->d_etay,mp->d_etaz,
mp->d_gammax,mp->d_gammay,mp->d_gammaz,
mp->d_kappav,
mp->d_muv,
mp->d_epsilondev_xx,mp->d_epsilondev_yy,mp->d_epsilondev_xy,
mp->d_epsilondev_xz,mp->d_epsilondev_yz,
mp->d_epsilon_trace_over_3,
mp->d_one_minus_sum_beta,
mp->d_factor_common,
mp->d_R_xx,mp->d_R_yy,mp->d_R_xy,
mp->d_R_xz,mp->d_R_yz,
mp->d_b_epsilondev_xx,mp->d_b_epsilondev_yy,mp->d_b_epsilondev_xy,
mp->d_b_epsilondev_xz,mp->d_b_epsilondev_yz,
mp->d_b_epsilon_trace_over_3,
mp->d_b_R_xx,mp->d_b_R_yy,mp->d_b_R_xy,
mp->d_b_R_xz,mp->d_b_R_yz,
mp->d_c11store,mp->d_c12store,mp->d_c13store,
mp->d_c14store,mp->d_c15store,mp->d_c16store,
mp->d_c22store,mp->d_c23store,mp->d_c24store,
mp->d_c25store,mp->d_c26store,mp->d_c33store,
mp->d_c34store,mp->d_c35store,mp->d_c36store,
mp->d_c44store,mp->d_c45store,mp->d_c46store,
mp->d_c55store,mp->d_c56store,mp->d_c66store,
mp->d_rhostore);
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(sync_copy_from_device,
SYNC_copy_FROM_DEVICE)(long* Mesh_pointer,
int* iphase,
realw* send_buffer) {
TRACE("sync_copy_from_device");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
// Wait until async-memcpy of outer elements is finished and start MPI.
if( *iphase != 2 ){ exit_on_cuda_error("sync_copy_from_device must be called for iphase == 2"); }
if( mp->size_mpi_buffer > 0 ){
// waits for asynchronous copy to finish
hipStreamSynchronize(mp->copy_stream);
// There have been problems using the pinned-memory with MPI, so
// we copy the buffer into a non-pinned region.
memcpy(send_buffer,mp->h_send_accel_buffer,mp->size_mpi_buffer*sizeof(float));
}
// memory copy is now finished, so non-blocking MPI send can proceed
}
| 30ac47e44884ec8df73f530b55e6387c4f13b35c.cu | /*
!=====================================================================
!
! S p e c f e m 3 D V e r s i o n 2 . 1
! ---------------------------------------
!
! Main authors: Dimitri Komatitsch and Jeroen Tromp
! Princeton University, USA and CNRS / INRIA / University of Pau
! (c) Princeton University / California Institute of Technology and CNRS / INRIA / University of Pau
! July 2012
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 2 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#include <stdio.h>
#include <cuda.h>
#include <cublas.h>
#include <sys/time.h>
#include <sys/resource.h>
#include "config.h"
#include "mesh_constants_cuda.h"
#ifdef USE_TEXTURES_FIELDS
realw_texture d_displ_tex;
realw_texture d_veloc_tex;
realw_texture d_accel_tex;
//backward/reconstructed
realw_texture d_b_displ_tex;
realw_texture d_b_veloc_tex;
realw_texture d_b_accel_tex;
//note: texture variables are implicitly static, and cannot be passed as arguments to cuda kernels;
// thus, 1) we thus use if-statements (FORWARD_OR_ADJOINT) to determine from which texture to fetch from
// 2) we use templates
// since if-statements are a bit slower as the variable is only known at runtime, we use option 2)
// templates definitions
template<int FORWARD_OR_ADJOINT> __device__ float texfetch_displ(int x);
template<int FORWARD_OR_ADJOINT> __device__ float texfetch_veloc(int x);
template<int FORWARD_OR_ADJOINT> __device__ float texfetch_accel(int x);
// templates for texture fetching
// FORWARD_OR_ADJOINT == 1 <- forward arrays
template<> __device__ float texfetch_displ<1>(int x) { return tex1Dfetch(d_displ_tex, x); }
template<> __device__ float texfetch_veloc<1>(int x) { return tex1Dfetch(d_veloc_tex, x); }
template<> __device__ float texfetch_accel<1>(int x) { return tex1Dfetch(d_accel_tex, x); }
// FORWARD_OR_ADJOINT == 3 <- backward/reconstructed arrays
template<> __device__ float texfetch_displ<3>(int x) { return tex1Dfetch(d_b_displ_tex, x); }
template<> __device__ float texfetch_veloc<3>(int x) { return tex1Dfetch(d_b_veloc_tex, x); }
template<> __device__ float texfetch_accel<3>(int x) { return tex1Dfetch(d_b_accel_tex, x); }
#endif
#ifdef USE_TEXTURES_CONSTANTS
realw_texture d_hprime_xx_tex;
#endif
/* ----------------------------------------------------------------------------------------------- */
// prepares a device array with with all inter-element edge-nodes -- this
// is followed by a memcpy and MPI operations
__global__ void prepare_boundary_accel_on_device(realw* d_accel, realw* d_send_accel_buffer,
int num_interfaces_ext_mesh,
int max_nibool_interfaces_ext_mesh,
int* d_nibool_interfaces_ext_mesh,
int* d_ibool_interfaces_ext_mesh) {
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
int ientry,iglob;
for( int iinterface=0; iinterface < num_interfaces_ext_mesh; iinterface++) {
if( id < d_nibool_interfaces_ext_mesh[iinterface] ) {
// entry in interface array
ientry = id + max_nibool_interfaces_ext_mesh*iinterface;
// global index in wavefield
iglob = d_ibool_interfaces_ext_mesh[ientry] - 1;
d_send_accel_buffer[3*ientry] = d_accel[3*iglob];
d_send_accel_buffer[3*ientry + 1 ] = d_accel[3*iglob + 1];
d_send_accel_buffer[3*ientry + 2 ] = d_accel[3*iglob + 2];
}
}
}
/* ----------------------------------------------------------------------------------------------- */
// prepares and transfers the inter-element edge-nodes to the host to be MPI'd
// (elements on boundary)
extern "C"
void FC_FUNC_(transfer_boun_accel_from_device,
TRANSFER_BOUN_ACCEL_FROM_DEVICE)(long* Mesh_pointer,
realw* accel,
realw* send_accel_buffer,
int* FORWARD_OR_ADJOINT){
TRACE("\ttransfer_boun_accel_from_device");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
// checks if anything to do
if( mp->size_mpi_buffer > 0 ){
int blocksize = BLOCKSIZE_TRANSFER;
int size_padded = ((int)ceil(((double)mp->max_nibool_interfaces_ext_mesh)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
//timing for memory xfer
// cudaEvent_t start, stop;
// realw time;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord( start, 0 );
if(*FORWARD_OR_ADJOINT == 1) {
prepare_boundary_accel_on_device<<<grid,threads,0,mp->compute_stream>>>(mp->d_accel,mp->d_send_accel_buffer,
mp->num_interfaces_ext_mesh,
mp->max_nibool_interfaces_ext_mesh,
mp->d_nibool_interfaces_ext_mesh,
mp->d_ibool_interfaces_ext_mesh);
// synchronizes
//synchronize_cuda();
// explicitly waits until previous compute stream finishes
// (cudaMemcpy implicitly synchronizes all other cuda operations)
cudaStreamSynchronize(mp->compute_stream);
// copies buffer from GPU to CPU host
print_CUDA_error_if_any(cudaMemcpy(send_accel_buffer,mp->d_send_accel_buffer,
mp->size_mpi_buffer*sizeof(realw),cudaMemcpyDeviceToHost),97001);
}
else if(*FORWARD_OR_ADJOINT == 3) {
prepare_boundary_accel_on_device<<<grid,threads,0,mp->compute_stream>>>(mp->d_b_accel,mp->d_b_send_accel_buffer,
mp->num_interfaces_ext_mesh,
mp->max_nibool_interfaces_ext_mesh,
mp->d_nibool_interfaces_ext_mesh,
mp->d_ibool_interfaces_ext_mesh);
// synchronizes
//synchronize_cuda();
// explicitly waits until previous compute stream finishes
// (cudaMemcpy implicitly synchronizes all other cuda operations)
cudaStreamSynchronize(mp->compute_stream);
// copies buffer from GPU to CPU host
print_CUDA_error_if_any(cudaMemcpy(send_accel_buffer,mp->d_b_send_accel_buffer,
mp->size_mpi_buffer*sizeof(realw),cudaMemcpyDeviceToHost),97002);
}
// finish timing of kernel+memcpy
// cudaEventRecord( stop, 0 );
// cudaEventSynchronize( stop );
// cudaEventElapsedTime( &time, start, stop );
// cudaEventDestroy( start );
// cudaEventDestroy( stop );
// printf("boundary xfer d->h Time: %f ms\n",time);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("transfer_boun_accel_from_device");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(transfer_boundary_from_device_a,
TRANSFER_BOUNDARY_FROM_DEVICE_A)(long* Mesh_pointer,
int* nspec_outer_elastic) {
// asynchronous transfer from device to host
TRACE("\ttransfer_boundary_from_device_a");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
if( mp->size_mpi_buffer > 0 ){
int blocksize = BLOCKSIZE_TRANSFER;
int size_padded = ((int)ceil(((double)mp->max_nibool_interfaces_ext_mesh)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
prepare_boundary_accel_on_device<<<grid,threads,0,mp->compute_stream>>>(mp->d_accel,mp->d_send_accel_buffer,
mp->num_interfaces_ext_mesh,
mp->max_nibool_interfaces_ext_mesh,
mp->d_nibool_interfaces_ext_mesh,
mp->d_ibool_interfaces_ext_mesh);
// waits until kernel is finished before starting async memcpy
//synchronize_cuda();
// waits until previous compute stream finishes
cudaStreamSynchronize(mp->compute_stream);
cudaMemcpyAsync(mp->h_send_accel_buffer,mp->d_send_accel_buffer,
mp->size_mpi_buffer*sizeof(realw),cudaMemcpyDeviceToHost,mp->copy_stream);
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(transfer_boundary_to_device_a,
TRANSFER_BOUNDARY_TO_DEVICE_A)(long* Mesh_pointer,
realw* buffer_recv_vector_ext_mesh,
int* num_interfaces_ext_mesh,
int* max_nibool_interfaces_ext_mesh) {
// asynchronous transfer from host to device
TRACE("transfer_boundary_to_device_a");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
if( mp->size_mpi_buffer > 0 ){
// copy on host memory
memcpy(mp->h_recv_accel_buffer,buffer_recv_vector_ext_mesh,mp->size_mpi_buffer*sizeof(realw));
// asynchronous copy to GPU using copy_stream
cudaMemcpyAsync(mp->d_send_accel_buffer, buffer_recv_vector_ext_mesh,
mp->size_mpi_buffer*sizeof(realw),cudaMemcpyHostToDevice,mp->copy_stream);
}
}
/* ----------------------------------------------------------------------------------------------- */
// Assembly
/* ----------------------------------------------------------------------------------------------- */
__global__ void assemble_boundary_accel_on_device(realw* d_accel, realw* d_send_accel_buffer,
int num_interfaces_ext_mesh,
int max_nibool_interfaces_ext_mesh,
int* d_nibool_interfaces_ext_mesh,
int* d_ibool_interfaces_ext_mesh) {
//int bx = blockIdx.y*gridDim.x+blockIdx.x;
//int tx = threadIdx.x;
int id = threadIdx.x + blockIdx.x*blockDim.x + blockIdx.y*gridDim.x*blockDim.x;
int ientry,iglob;
for( int iinterface=0; iinterface < num_interfaces_ext_mesh; iinterface++) {
if( id < d_nibool_interfaces_ext_mesh[iinterface] ) {
// entry in interface array
ientry = id + max_nibool_interfaces_ext_mesh*iinterface;
// global index in wavefield
iglob = d_ibool_interfaces_ext_mesh[ientry] - 1;
// for testing atomic operations against not atomic operations (0.1ms vs. 0.04 ms)
// d_accel[3*(iglob)] += d_send_accel_buffer[3*(ientry)];
// d_accel[3*(iglob)+1] += d_send_accel_buffer[3*(ientry)+1];
// d_accel[3*(iglob)+2] += d_send_accel_buffer[3*(ientry)+2];
atomicAdd(&d_accel[3*iglob],d_send_accel_buffer[3*ientry]);
atomicAdd(&d_accel[3*iglob + 1],d_send_accel_buffer[3*ientry + 1]);
atomicAdd(&d_accel[3*iglob + 2],d_send_accel_buffer[3*ientry + 2]);
}
}
// ! This step is done via previous function transfer_and_assemble...
// ! do iinterface = 1, num_interfaces_ext_mesh
// ! do ipoin = 1, nibool_interfaces_ext_mesh(iinterface)
// ! array_val(:,ibool_interfaces_ext_mesh(ipoin,iinterface)) = &
// ! array_val(:,ibool_interfaces_ext_mesh(ipoin,iinterface)) + buffer_recv_vector_ext_mesh(:,ipoin,iinterface)
// ! enddo
// ! enddo
}
/* ----------------------------------------------------------------------------------------------- */
// FORWARD_OR_ADJOINT == 1 for accel, and == 3 for b_accel
extern "C"
void FC_FUNC_(transfer_asmbl_accel_to_device,
TRANSFER_ASMBL_ACCEL_TO_DEVICE)(long* Mesh_pointer, realw* accel,
realw* buffer_recv_vector_ext_mesh,
int* num_interfaces_ext_mesh,
int* max_nibool_interfaces_ext_mesh,
int* nibool_interfaces_ext_mesh,
int* ibool_interfaces_ext_mesh,
int* FORWARD_OR_ADJOINT) {
TRACE("\ttransfer_asmbl_accel_to_device");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
if( mp->size_mpi_buffer > 0 ){
//daniel: todo - check if this copy is only needed for adjoint simulation, otherwise it is called asynchronously?
if(*FORWARD_OR_ADJOINT == 1 ){
// Wait until previous copy stream finishes. We assemble while other compute kernels execute.
cudaStreamSynchronize(mp->copy_stream);
}
else if(*FORWARD_OR_ADJOINT == 3 ){
// explicitly synchronizes
// (cudaMemcpy implicitly synchronizes all other cuda operations)
synchronize_cuda();
print_CUDA_error_if_any(cudaMemcpy(mp->d_b_send_accel_buffer, buffer_recv_vector_ext_mesh,
mp->size_mpi_buffer*sizeof(realw),cudaMemcpyHostToDevice),97001);
}
int blocksize = BLOCKSIZE_TRANSFER;
int size_padded = ((int)ceil(((double)mp->max_nibool_interfaces_ext_mesh)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
//double start_time = get_time();
// cudaEvent_t start, stop;
// realw time;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord( start, 0 );
if(*FORWARD_OR_ADJOINT == 1) {
//assemble forward accel
assemble_boundary_accel_on_device<<<grid,threads,0,mp->compute_stream>>>(mp->d_accel, mp->d_send_accel_buffer,
mp->num_interfaces_ext_mesh,
mp->max_nibool_interfaces_ext_mesh,
mp->d_nibool_interfaces_ext_mesh,
mp->d_ibool_interfaces_ext_mesh);
}
else if(*FORWARD_OR_ADJOINT == 3) {
//assemble adjoint accel
assemble_boundary_accel_on_device<<<grid,threads,0,mp->compute_stream>>>(mp->d_b_accel, mp->d_b_send_accel_buffer,
mp->num_interfaces_ext_mesh,
mp->max_nibool_interfaces_ext_mesh,
mp->d_nibool_interfaces_ext_mesh,
mp->d_ibool_interfaces_ext_mesh);
}
// cudaEventRecord( stop, 0 );
// cudaEventSynchronize( stop );
// cudaEventElapsedTime( &time, start, stop );
// cudaEventDestroy( start );
// cudaEventDestroy( stop );
// printf("Boundary Assemble Kernel Execution Time: %f ms\n",time);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//double end_time = get_time();
//printf("Elapsed time: %e\n",end_time-start_time);
exit_on_cuda_error("transfer_asmbl_accel_to_device");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
//daniel: not used ...
//
//extern "C"
//void FC_FUNC_(assemble_accel_on_device,
// ASSEMBLE_ACCEL_on_DEVICE)(long* Mesh_pointer, realw* accel,
// realw* buffer_recv_vector_ext_mesh,
// int* num_interfaces_ext_mesh,
// int* max_nibool_interfaces_ext_mesh,
// int* nibool_interfaces_ext_mesh,
// int* ibool_interfaces_ext_mesh,
// int* FORWARD_OR_ADJOINT) {
// TRACE("assemble_accel_on_device");
//
// Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
//
// int blocksize = BLOCKSIZE_TRANSFER;
// int size_padded = ((int)ceil(((double)mp->max_nibool_interfaces_ext_mesh)/((double)blocksize)))*blocksize;
//
// int num_blocks_x, num_blocks_y;
// get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
//
// //double start_time = get_time();
// dim3 grid(num_blocks_x,num_blocks_y);
// dim3 threads(blocksize,1,1);
// // cudaEvent_t start, stop;
// // realw time;
// // cudaEventCreate(&start);
// // cudaEventCreate(&stop);
// // cudaEventRecord( start, 0 );
//
//
// // ***************************************************************************
// // Wait until previous copy stream finishes. We assemble while other compute kernels execute.
// cudaStreamSynchronize(mp->copy_stream);
//
// // Assembling on the copy_stream breaks the solution and it "blows up"
// if(*FORWARD_OR_ADJOINT == 1) { //assemble forward accel
// assemble_boundary_accel_on_device<<<grid,threads,0,mp->compute_stream>>>(mp->d_accel, mp->d_send_accel_buffer,
// mp->num_interfaces_ext_mesh,
// mp->max_nibool_interfaces_ext_mesh,
// mp->d_nibool_interfaces_ext_mesh,
// mp->d_ibool_interfaces_ext_mesh);
// }
// else if(*FORWARD_OR_ADJOINT == 3) { //assemble adjoint accel
// assemble_boundary_accel_on_device<<<grid,threads,0,mp->copy_stream>>>(mp->d_b_accel, mp->d_send_accel_buffer,
// mp->num_interfaces_ext_mesh,
// mp->max_nibool_interfaces_ext_mesh,
// mp->d_nibool_interfaces_ext_mesh,
// mp->d_ibool_interfaces_ext_mesh);
// }
//
// // cudaEventRecord( stop, 0 );
// // cudaEventSynchronize( stop );
// // cudaEventElapsedTime( &time, start, stop );
// // cudaEventDestroy( start );
// // cudaEventDestroy( stop );
// // printf("Boundary Assemble Kernel Execution Time: %f ms\n",time);
//#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
// //double end_time = get_time();
// //printf("Elapsed time: %e\n",end_time-start_time);
// exit_on_cuda_error("assemble_accel_on_device");
//#endif
//}
/* ----------------------------------------------------------------------------------------------- */
// KERNEL 2
/* ----------------------------------------------------------------------------------------------- */
/* ----------------------------------------------------------------------------------------------- */
//__global__ void Kernel_test(realw* d_debug_output,int* d_phase_ispec_inner_elastic,
// int num_phase_ispec_elastic, int d_iphase, int* d_ibool) {
// int bx = blockIdx.x;
// int tx = threadIdx.x;
// int working_element;
// //int ispec;
// //int NGLL3_ALIGN = 128;
// if(tx==0 && bx==0) {
//
// d_debug_output[0] = 420.0;
//
// d_debug_output[2] = num_phase_ispec_elastic;
// d_debug_output[3] = d_iphase;
// working_element = d_phase_ispec_inner_elastic[bx + num_phase_ispec_elastic*(d_iphase-1)]-1;
// d_debug_output[4] = working_element;
// d_debug_output[5] = d_phase_ispec_inner_elastic[0];
// /* d_debug_output[1] = d_ibool[working_element*NGLL3_ALIGN + tx]-1; */
// }
// /* d_debug_output[1+tx+128*bx] = 69.0; */
//
//}
/* ----------------------------------------------------------------------------------------------- */
// updates stress
__device__ void compute_element_att_stress(int tx,int working_element,int NSPEC,
realw* R_xx,realw* R_yy,realw* R_xy,
realw* R_xz,realw* R_yz,
realw* sigma_xx,realw* sigma_yy,realw* sigma_zz,
realw* sigma_xy,realw* sigma_xz,realw* sigma_yz) {
int i_sls,offset_sls;
realw R_xx_val,R_yy_val;
for(i_sls = 0; i_sls < N_SLS; i_sls++){
// index
offset_sls = tx + NGLL3*(working_element + NSPEC*i_sls);
R_xx_val = R_xx[offset_sls]; //(i,j,k,ispec,i_sls)
R_yy_val = R_yy[offset_sls];
*sigma_xx = *sigma_xx - R_xx_val;
*sigma_yy = *sigma_yy - R_yy_val;
*sigma_zz = *sigma_zz + R_xx_val + R_yy_val;
*sigma_xy = *sigma_xy - R_xy[offset_sls];
*sigma_xz = *sigma_xz - R_xz[offset_sls];
*sigma_yz = *sigma_yz - R_yz[offset_sls];
}
return;
}
/* ----------------------------------------------------------------------------------------------- */
// updates R_memory
__device__ void compute_element_att_memory(int tx,int working_element,int NSPEC,
realw* d_muv,
realw* factor_common,
realw* alphaval,realw* betaval,realw* gammaval,
realw* R_xx,realw* R_yy,realw* R_xy,realw* R_xz,realw* R_yz,
realw* epsilondev_xx,realw* epsilondev_yy,realw* epsilondev_xy,
realw* epsilondev_xz,realw* epsilondev_yz,
realw epsilondev_xx_loc,realw epsilondev_yy_loc,realw epsilondev_xy_loc,
realw epsilondev_xz_loc,realw epsilondev_yz_loc
){
int i_sls;
int ijk_ispec;
int offset_sls,offset_align,offset_common;
realw mul;
realw alphaval_loc,betaval_loc,gammaval_loc;
realw factor_loc,Sn,Snp1;
// indices
offset_align = tx + NGLL3_PADDED * working_element;
ijk_ispec = tx + NGLL3 * working_element;
mul = d_muv[offset_align];
// use Runge-Kutta scheme to march in time
for(i_sls = 0; i_sls < N_SLS; i_sls++){
// indices
offset_common = i_sls + N_SLS*(tx + NGLL3*working_element); // (i_sls,i,j,k,ispec)
offset_sls = tx + NGLL3*(working_element + NSPEC*i_sls); // (i,j,k,ispec,i_sls)
factor_loc = mul * factor_common[offset_common]; //mustore(i,j,k,ispec) * factor_common(i_sls,i,j,k,ispec)
alphaval_loc = alphaval[i_sls]; // (i_sls)
betaval_loc = betaval[i_sls];
gammaval_loc = gammaval[i_sls];
// term in xx
Sn = factor_loc * epsilondev_xx[ijk_ispec]; //(i,j,k,ispec)
Snp1 = factor_loc * epsilondev_xx_loc; //(i,j,k)
//R_xx(i,j,k,ispec,i_sls) = alphaval_loc * R_xx(i,j,k,ispec,i_sls) +
// betaval_loc * Sn + gammaval_loc * Snp1;
R_xx[offset_sls] = alphaval_loc * R_xx[offset_sls] +
betaval_loc * Sn + gammaval_loc * Snp1;
// term in yy
Sn = factor_loc * epsilondev_yy[ijk_ispec];
Snp1 = factor_loc * epsilondev_yy_loc;
R_yy[offset_sls] = alphaval_loc * R_yy[offset_sls] +
betaval_loc * Sn + gammaval_loc * Snp1;
// term in zz not computed since zero trace
// term in xy
Sn = factor_loc * epsilondev_xy[ijk_ispec];
Snp1 = factor_loc * epsilondev_xy_loc;
R_xy[offset_sls] = alphaval_loc * R_xy[offset_sls] +
betaval_loc * Sn + gammaval_loc * Snp1;
// term in xz
Sn = factor_loc * epsilondev_xz[ijk_ispec];
Snp1 = factor_loc * epsilondev_xz_loc;
R_xz[offset_sls] = alphaval_loc * R_xz[offset_sls] +
betaval_loc * Sn + gammaval_loc * Snp1;
// term in yz
Sn = factor_loc * epsilondev_yz[ijk_ispec];
Snp1 = factor_loc * epsilondev_yz_loc;
R_yz[offset_sls] = alphaval_loc * R_yz[offset_sls] +
betaval_loc * Sn + gammaval_loc * Snp1;
}
return;
}
/* ----------------------------------------------------------------------------------------------- */
// pre-computes gravity term
__device__ void compute_element_gravity(int tx,int working_element,
int* d_ibool,
realw* d_minus_g,
realw* d_minus_deriv_gravity,
realw* d_rhostore,
realw* wgll_cube,
realw jacobianl,
realw* s_dummyx_loc,
realw* s_dummyy_loc,
realw* s_dummyz_loc,
realw* sigma_xx,
realw* sigma_yy,
realw* sigma_xz,
realw* sigma_yz,
realw* rho_s_H1,
realw* rho_s_H2,
realw* rho_s_H3){
int iglob;
realw minus_g,minus_dg;
realw rhol;
realw gzl; // gxl,gyl,
realw sx_l,sy_l,sz_l;
realw Hxxl,Hyyl,Hzzl; //,Hxyl,Hxzl,Hyzl;
realw factor;
// compute non-symmetric terms for gravity
// get g, rho and dg/dr=dg
iglob = d_ibool[working_element*NGLL3 + tx]-1;
minus_g = d_minus_g[iglob];
minus_dg = d_minus_deriv_gravity[iglob];
// Cartesian components of the gravitational acceleration
//gxl = 0.f;
//gyl = 0.f;
gzl = minus_g;
// Cartesian components of gradient of gravitational acceleration
// H = grad g
// assumes g only acts in negative z-direction
Hxxl = 0.f;
Hyyl = 0.f;
Hzzl = minus_dg;
//Hxyl = 0.f;
//Hxzl = 0.f;
//Hyzl = 0.f;
rhol = d_rhostore[working_element*NGLL3_PADDED + tx];
// get displacement and multiply by density to compute G tensor
// G = rho [ sg - (s * g) I ]
sx_l = rhol * s_dummyx_loc[tx]; // d_displ[iglob*3];
sy_l = rhol * s_dummyy_loc[tx]; // d_displ[iglob*3 + 1];
sz_l = rhol * s_dummyz_loc[tx]; // d_displ[iglob*3 + 2];
// compute G tensor from s . g and add to sigma (not symmetric)
//sigma_xx += sy_l*gyl + sz_l*gzl;
*sigma_xx += sz_l*gzl;
//sigma_yy += sx_l*gxl + sz_l*gzl;
*sigma_yy += sz_l*gzl;
//sigma_zz += sx_l*gxl + sy_l*gyl;
//sigma_xy -= sx_l*gyl;
//sigma_yx -= sy_l*gxl;
*sigma_xz -= sx_l*gzl;
//sigma_zx -= sz_l*gxl;
*sigma_yz -= sy_l*gzl;
//sigma_zy -= sz_l*gyl;
// precompute vector
factor = jacobianl * wgll_cube[tx];
//rho_s_H1 = fac1 * (sx_l * Hxxl + sy_l * Hxyl + sz_l * Hxzl);
//rho_s_H2 = fac1 * (sx_l * Hxyl + sy_l * Hyyl + sz_l * Hyzl);
//rho_s_H3 = fac1 * (sx_l * Hxzl + sy_l * Hyzl + sz_l * Hzzl);
// only non-zero z-direction
*rho_s_H1 = factor * sx_l * Hxxl ; // 0.f;
*rho_s_H2 = factor * sy_l * Hyyl ; // 0.f;
*rho_s_H3 = factor * sz_l * Hzzl ;
// debug
//*rho_s_H1 = 0.f;
//*rho_s_H2 = 0.f;
//*rho_s_H3 = 0.f ;
}
/* ----------------------------------------------------------------------------------------------- */
// KERNEL 2
//
// for elastic domains
/* ----------------------------------------------------------------------------------------------- */
/*
// unused
// original elastic kernel, please leave this code here for reference...
__global__ void Kernel_2_impl(int nb_blocks_to_compute,
int NGLOB,
int* d_ibool,
int* d_phase_ispec_inner_elastic, int num_phase_ispec_elastic,
int d_iphase,
int use_mesh_coloring_gpu,
realw d_deltat,
realw* d_displ,realw* d_veloc,realw* d_accel,
realw* d_xix, realw* d_xiy, realw* d_xiz,
realw* d_etax, realw* d_etay, realw* d_etaz,
realw* d_gammax, realw* d_gammay, realw* d_gammaz,
realw* d_hprime_xx,
realw* d_hprimewgll_xx,
realw* d_wgllwgll_xy,realw* d_wgllwgll_xz,realw* d_wgllwgll_yz,
realw* d_kappav, realw* d_muv,
int COMPUTE_AND_STORE_STRAIN,
realw* epsilondev_xx,realw* epsilondev_yy,realw* epsilondev_xy,
realw* epsilondev_xz,realw* epsilondev_yz,
realw* epsilon_trace_over_3,
int SIMULATION_TYPE,
int ATTENUATION,
int NSPEC,
realw* one_minus_sum_beta,realw* factor_common,
realw* R_xx, realw* R_yy, realw* R_xy, realw* R_xz, realw* R_yz,
realw* alphaval,realw* betaval,realw* gammaval,
int ANISOTROPY,
realw* d_c11store,realw* d_c12store,realw* d_c13store,
realw* d_c14store,realw* d_c15store,realw* d_c16store,
realw* d_c22store,realw* d_c23store,realw* d_c24store,
realw* d_c25store,realw* d_c26store,realw* d_c33store,
realw* d_c34store,realw* d_c35store,realw* d_c36store,
realw* d_c44store,realw* d_c45store,realw* d_c46store,
realw* d_c55store,realw* d_c56store,realw* d_c66store,
int gravity,
realw* d_minus_g,
realw* d_minus_deriv_gravity,
realw* d_rhostore,
realw* wgll_cube){
int bx = blockIdx.y*gridDim.x + blockIdx.x;
int tx = threadIdx.x;
const int NGLL3_ALIGN = NGLL3_PADDED;
int K = (tx/NGLL2);
int J = ((tx-K*NGLL2)/NGLLX);
int I = (tx-K*NGLL2-J*NGLLX);
int active,offset;
int iglob = 0;
int working_element;
realw tempx1l,tempx2l,tempx3l,tempy1l,tempy2l,tempy3l,tempz1l,tempz2l,tempz3l;
realw xixl,xiyl,xizl,etaxl,etayl,etazl,gammaxl,gammayl,gammazl,jacobianl;
realw duxdxl,duxdyl,duxdzl,duydxl,duydyl,duydzl,duzdxl,duzdyl,duzdzl;
realw duxdxl_plus_duydyl,duxdxl_plus_duzdzl,duydyl_plus_duzdzl;
realw duxdyl_plus_duydxl,duzdxl_plus_duxdzl,duzdyl_plus_duydzl;
realw tempx1l_att,tempx2l_att,tempx3l_att,tempy1l_att,tempy2l_att,tempy3l_att,tempz1l_att,tempz2l_att,tempz3l_att;
realw duxdxl_att,duxdyl_att,duxdzl_att,duydxl_att,duydyl_att,duydzl_att,duzdxl_att,duzdyl_att,duzdzl_att;
realw duxdyl_plus_duydxl_att,duzdxl_plus_duxdzl_att,duzdyl_plus_duydzl_att;
realw fac1,fac2,fac3,lambdal,mul,lambdalplus2mul,kappal;
realw sigma_xx,sigma_yy,sigma_zz,sigma_xy,sigma_xz,sigma_yz;
realw epsilondev_xx_loc,epsilondev_yy_loc,epsilondev_xy_loc,epsilondev_xz_loc,epsilondev_yz_loc;
realw c11,c12,c13,c14,c15,c16,c22,c23,c24,c25,c26,c33,c34,c35,c36,c44,c45,c46,c55,c56,c66;
realw sum_terms1,sum_terms2,sum_terms3;
// gravity variables
realw sigma_yx,sigma_zx,sigma_zy;
realw rho_s_H1,rho_s_H2,rho_s_H3;
#ifndef MANUALLY_UNROLLED_LOOPS
int l;
realw hp1,hp2,hp3;
#endif
__shared__ realw s_dummyx_loc[NGLL3];
__shared__ realw s_dummyy_loc[NGLL3];
__shared__ realw s_dummyz_loc[NGLL3];
__shared__ realw s_dummyx_loc_att[NGLL3];
__shared__ realw s_dummyy_loc_att[NGLL3];
__shared__ realw s_dummyz_loc_att[NGLL3];
__shared__ realw s_tempx1[NGLL3];
__shared__ realw s_tempx2[NGLL3];
__shared__ realw s_tempx3[NGLL3];
__shared__ realw s_tempy1[NGLL3];
__shared__ realw s_tempy2[NGLL3];
__shared__ realw s_tempy3[NGLL3];
__shared__ realw s_tempz1[NGLL3];
__shared__ realw s_tempz2[NGLL3];
__shared__ realw s_tempz3[NGLL3];
__shared__ realw sh_hprime_xx[NGLL2];
// use only NGLL^3 = 125 active threads, plus 3 inactive/ghost threads,
// because we used memory padding from NGLL^3 = 125 to 128 to get coalescent memory accesses
active = (tx < NGLL3 && bx < nb_blocks_to_compute) ? 1:0;
// copy from global memory to shared memory
// each thread writes one of the NGLL^3 = 125 data points
if (active) {
#ifdef USE_MESH_COLORING_GPU
working_element = bx;
#else
//mesh coloring
if( use_mesh_coloring_gpu ){
working_element = bx;
}else{
// iphase-1 and working_element-1 for Fortran->C array conventions
working_element = d_phase_ispec_inner_elastic[bx + num_phase_ispec_elastic*(d_iphase-1)]-1;
}
#endif
iglob = d_ibool[working_element*NGLL3 + tx]-1;
#ifdef USE_TEXTURES_FIELDS
s_dummyx_loc[tx] = tex1Dfetch(d_displ_tex, iglob*3);
s_dummyy_loc[tx] = tex1Dfetch(d_displ_tex, iglob*3 + 1);
s_dummyz_loc[tx] = tex1Dfetch(d_displ_tex, iglob*3 + 2);
#else
// changing iglob indexing to match fortran row changes fast style
s_dummyx_loc[tx] = d_displ[iglob*3];
s_dummyy_loc[tx] = d_displ[iglob*3 + 1];
s_dummyz_loc[tx] = d_displ[iglob*3 + 2];
#endif
}
// JC JC here we will need to add GPU support for the new C-PML routines
if(ATTENUATION){
// use first order Taylor expansion of displacement for local storage of stresses
// at this current time step, to fix attenuation in a consistent way
#ifdef USE_TEXTURES_FIELDS
s_dummyx_loc_att[tx] = s_dummyx_loc[tx] + d_deltat * tex1Dfetch(d_veloc_tex, iglob);
s_dummyy_loc_att[tx] = s_dummyy_loc[tx] + d_deltat * tex1Dfetch(d_veloc_tex, iglob + NGLOB);
s_dummyz_loc_att[tx] = s_dummyz_loc[tx] + d_deltat * tex1Dfetch(d_veloc_tex, iglob + 2*NGLOB);
#else
s_dummyx_loc_att[tx] = s_dummyx_loc[tx] + d_deltat * d_veloc[iglob*3];
s_dummyy_loc_att[tx] = s_dummyy_loc[tx] + d_deltat * d_veloc[iglob*3 + 1];
s_dummyz_loc_att[tx] = s_dummyz_loc[tx] + d_deltat * d_veloc[iglob*3 + 2];
#endif
}
if (tx < NGLL2) {
#ifdef USE_TEXTURES_CONSTANTS
sh_hprime_xx[tx] = tex1Dfetch(d_hprime_xx_tex,tx);
#else
sh_hprime_xx[tx] = d_hprime_xx[tx];
#endif
}
// synchronize all the threads (one thread for each of the NGLL grid points of the
// current spectral element) because we need the whole element to be ready in order
// to be able to compute the matrix products along cut planes of the 3D element below
__syncthreads();
if (active) {
#ifndef MANUALLY_UNROLLED_LOOPS
tempx1l = 0.f;
tempx2l = 0.f;
tempx3l = 0.f;
tempy1l = 0.f;
tempy2l = 0.f;
tempy3l = 0.f;
tempz1l = 0.f;
tempz2l = 0.f;
tempz3l = 0.f;
for (l=0;l<NGLLX;l++) {
hp1 = sh_hprime_xx[l*NGLLX+I];
offset = K*NGLL2+J*NGLLX+l;
tempx1l += s_dummyx_loc[offset]*hp1;
tempy1l += s_dummyy_loc[offset]*hp1;
tempz1l += s_dummyz_loc[offset]*hp1;
//assumes that hprime_xx = hprime_yy = hprime_zz
hp2 = sh_hprime_xx[l*NGLLX+J];
offset = K*NGLL2+l*NGLLX+I;
tempx2l += s_dummyx_loc[offset]*hp2;
tempy2l += s_dummyy_loc[offset]*hp2;
tempz2l += s_dummyz_loc[offset]*hp2;
hp3 = sh_hprime_xx[l*NGLLX+K];
offset = l*NGLL2+J*NGLLX+I;
tempx3l += s_dummyx_loc[offset]*hp3;
tempy3l += s_dummyy_loc[offset]*hp3;
tempz3l += s_dummyz_loc[offset]*hp3;
}
// JC JC here we will need to add GPU support for the new C-PML routines
if( ATTENUATION){
// temporary variables used for fixing attenuation in a consistent way
tempx1l_att = 0.f;
tempx2l_att = 0.f;
tempx3l_att = 0.f;
tempy1l_att = 0.f;
tempy2l_att = 0.f;
tempy3l_att = 0.f;
tempz1l_att = 0.f;
tempz2l_att = 0.f;
tempz3l_att = 0.f;
for (l=0;l<NGLLX;l++) {
hp1 = sh_hprime_xx[l*NGLLX+I];
offset = K*NGLL2+J*NGLLX+l;
tempx1l_att += s_dummyx_loc_att[offset]*hp1;
tempy1l_att += s_dummyy_loc_att[offset]*hp1;
tempz1l_att += s_dummyz_loc_att[offset]*hp1;
hp2 = sh_hprime_xx[l*NGLLX+J];
offset = K*NGLL2+l*NGLLX+I;
tempx2l_att += s_dummyx_loc_att[offset]*hp2;
tempy2l_att += s_dummyy_loc_att[offset]*hp2;
tempz2l_att += s_dummyz_loc_att[offset]*hp2;
hp3 = sh_hprime_xx[l*NGLLX+K];
offset = l*NGLL2+J*NGLLX+I;
tempx3l_att += s_dummyx_loc_att[offset]*hp3;
tempy3l_att += s_dummyy_loc_att[offset]*hp3;
tempz3l_att += s_dummyz_loc_att[offset]*hp3;
}
}
#else
tempx1l = s_dummyx_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempy1l = s_dummyy_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempz1l = s_dummyz_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempx2l = s_dummyx_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyx_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyx_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyx_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyx_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempy2l = s_dummyy_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyy_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyy_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyy_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyy_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempz2l = s_dummyz_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyz_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyz_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyz_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyz_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempx3l = s_dummyx_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyx_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyx_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyx_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyx_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempy3l = s_dummyy_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyy_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyy_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyy_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyy_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempz3l = s_dummyz_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyz_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyz_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyz_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyz_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
// JC JC here we will need to add GPU support for the new C-PML routines
if( ATTENUATION){
// temporary variables used for fixing attenuation in a consistent way
tempx1l_att = s_dummyx_loc_att[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempy1l_att = s_dummyy_loc_att[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempz1l_att = s_dummyz_loc_att[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempx2l_att = s_dummyx_loc_att[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyx_loc_att[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyx_loc_att[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyx_loc_att[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyx_loc_att[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempy2l_att = s_dummyy_loc_att[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyy_loc_att[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyy_loc_att[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyy_loc_att[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyy_loc_att[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempz2l_att = s_dummyz_loc_att[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyz_loc_att[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyz_loc_att[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyz_loc_att[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyz_loc_att[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempx3l_att = s_dummyx_loc_att[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyx_loc_att[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyx_loc_att[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyx_loc_att[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyx_loc_att[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempy3l_att = s_dummyy_loc_att[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyy_loc_att[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyy_loc_att[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyy_loc_att[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyy_loc_att[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempz3l_att = s_dummyz_loc_att[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyz_loc_att[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyz_loc_att[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyz_loc_att[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyz_loc_att[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
}
#endif
// compute derivatives of ux, uy and uz with respect to x, y and z
offset = working_element*NGLL3_ALIGN + tx;
xixl = d_xix[offset];
xiyl = d_xiy[offset];
xizl = d_xiz[offset];
etaxl = d_etax[offset];
etayl = d_etay[offset];
etazl = d_etaz[offset];
gammaxl = d_gammax[offset];
gammayl = d_gammay[offset];
gammazl = d_gammaz[offset];
duxdxl = xixl*tempx1l + etaxl*tempx2l + gammaxl*tempx3l;
duxdyl = xiyl*tempx1l + etayl*tempx2l + gammayl*tempx3l;
duxdzl = xizl*tempx1l + etazl*tempx2l + gammazl*tempx3l;
duydxl = xixl*tempy1l + etaxl*tempy2l + gammaxl*tempy3l;
duydyl = xiyl*tempy1l + etayl*tempy2l + gammayl*tempy3l;
duydzl = xizl*tempy1l + etazl*tempy2l + gammazl*tempy3l;
duzdxl = xixl*tempz1l + etaxl*tempz2l + gammaxl*tempz3l;
duzdyl = xiyl*tempz1l + etayl*tempz2l + gammayl*tempz3l;
duzdzl = xizl*tempz1l + etazl*tempz2l + gammazl*tempz3l;
// JC JC here we will need to add GPU support for the new C-PML routines
// precompute some sums to save CPU time
duxdxl_plus_duydyl = duxdxl + duydyl;
duxdxl_plus_duzdzl = duxdxl + duzdzl;
duydyl_plus_duzdzl = duydyl + duzdzl;
duxdyl_plus_duydxl = duxdyl + duydxl;
duzdxl_plus_duxdzl = duzdxl + duxdzl;
duzdyl_plus_duydzl = duzdyl + duydzl;
// JC JC here we will need to add GPU support for the new C-PML routines
if( ATTENUATION){
// temporary variables used for fixing attenuation in a consistent way
duxdxl_att = xixl*tempx1l_att + etaxl*tempx2l_att + gammaxl*tempx3l_att;
duxdyl_att = xiyl*tempx1l_att + etayl*tempx2l_att + gammayl*tempx3l_att;
duxdzl_att = xizl*tempx1l_att + etazl*tempx2l_att + gammazl*tempx3l_att;
duydxl_att = xixl*tempy1l_att + etaxl*tempy2l_att + gammaxl*tempy3l_att;
duydyl_att = xiyl*tempy1l_att + etayl*tempy2l_att + gammayl*tempy3l_att;
duydzl_att = xizl*tempy1l_att + etazl*tempy2l_att + gammazl*tempy3l_att;
duzdxl_att = xixl*tempz1l_att + etaxl*tempz2l_att + gammaxl*tempz3l_att;
duzdyl_att = xiyl*tempz1l_att + etayl*tempz2l_att + gammayl*tempz3l_att;
duzdzl_att = xizl*tempz1l_att + etazl*tempz2l_att + gammazl*tempz3l_att;
// precompute some sums to save CPU time
duxdyl_plus_duydxl_att = duxdyl_att + duydxl_att;
duzdxl_plus_duxdzl_att = duzdxl_att + duxdzl_att;
duzdyl_plus_duydzl_att = duzdyl_att + duydzl_att;
// computes deviatoric strain attenuation and/or for kernel calculations
if(COMPUTE_AND_STORE_STRAIN) {
realw templ = 0.33333333333333333333f * (duxdxl_att + duydyl_att + duzdzl_att); // 1./3. = 0.33333
// local storage: stresses at this current time step
epsilondev_xx_loc = duxdxl_att - templ;
epsilondev_yy_loc = duydyl_att - templ;
epsilondev_xy_loc = 0.5f * duxdyl_plus_duydxl_att;
epsilondev_xz_loc = 0.5f * duzdxl_plus_duxdzl_att;
epsilondev_yz_loc = 0.5f * duzdyl_plus_duydzl_att;
if(SIMULATION_TYPE == 3) {
epsilon_trace_over_3[tx + working_element*NGLL3] = templ;
}
// JC JC here we will need to add GPU support for the new C-PML routines
}
}else{
// computes deviatoric strain attenuation and/or for kernel calculations
if(COMPUTE_AND_STORE_STRAIN) {
realw templ = 0.33333333333333333333f * (duxdxl + duydyl + duzdzl); // 1./3. = 0.33333
// epsilondev_xx[offset] = duxdxl - templ;
// epsilondev_yy[offset] = duydyl - templ;
// epsilondev_xy[offset] = 0.5f * duxdyl_plus_duydxl;
// epsilondev_xz[offset] = 0.5f * duzdxl_plus_duxdzl;
// epsilondev_yz[offset] = 0.5f * duzdyl_plus_duydzl;
// local storage: stresses at this current time step
epsilondev_xx_loc = duxdxl - templ;
epsilondev_yy_loc = duydyl - templ;
epsilondev_xy_loc = 0.5f * duxdyl_plus_duydxl;
epsilondev_xz_loc = 0.5f * duzdxl_plus_duxdzl;
epsilondev_yz_loc = 0.5f * duzdyl_plus_duydzl;
if(SIMULATION_TYPE == 3) {
epsilon_trace_over_3[tx + working_element*NGLL3] = templ;
}
}
}
// compute elements with an elastic isotropic rheology
kappal = d_kappav[offset];
mul = d_muv[offset];
// attenuation
if(ATTENUATION){
// use unrelaxed parameters if attenuation
mul = mul * one_minus_sum_beta[tx+working_element*NGLL3]; // (i,j,k,ispec)
}
// full anisotropic case, stress calculations
if(ANISOTROPY){
c11 = d_c11store[offset];
c12 = d_c12store[offset];
c13 = d_c13store[offset];
c14 = d_c14store[offset];
c15 = d_c15store[offset];
c16 = d_c16store[offset];
c22 = d_c22store[offset];
c23 = d_c23store[offset];
c24 = d_c24store[offset];
c25 = d_c25store[offset];
c26 = d_c26store[offset];
c33 = d_c33store[offset];
c34 = d_c34store[offset];
c35 = d_c35store[offset];
c36 = d_c36store[offset];
c44 = d_c44store[offset];
c45 = d_c45store[offset];
c46 = d_c46store[offset];
c55 = d_c55store[offset];
c56 = d_c56store[offset];
c66 = d_c66store[offset];
sigma_xx = c11*duxdxl + c16*duxdyl_plus_duydxl + c12*duydyl +
c15*duzdxl_plus_duxdzl + c14*duzdyl_plus_duydzl + c13*duzdzl;
sigma_yy = c12*duxdxl + c26*duxdyl_plus_duydxl + c22*duydyl +
c25*duzdxl_plus_duxdzl + c24*duzdyl_plus_duydzl + c23*duzdzl;
sigma_zz = c13*duxdxl + c36*duxdyl_plus_duydxl + c23*duydyl +
c35*duzdxl_plus_duxdzl + c34*duzdyl_plus_duydzl + c33*duzdzl;
sigma_xy = c16*duxdxl + c66*duxdyl_plus_duydxl + c26*duydyl +
c56*duzdxl_plus_duxdzl + c46*duzdyl_plus_duydzl + c36*duzdzl;
sigma_xz = c15*duxdxl + c56*duxdyl_plus_duydxl + c25*duydyl +
c55*duzdxl_plus_duxdzl + c45*duzdyl_plus_duydzl + c35*duzdzl;
sigma_yz = c14*duxdxl + c46*duxdyl_plus_duydxl + c24*duydyl +
c45*duzdxl_plus_duxdzl + c44*duzdyl_plus_duydzl + c34*duzdzl;
}else{
// isotropic case
lambdalplus2mul = kappal + 1.33333333333333333333f * mul; // 4./3. = 1.3333333
lambdal = lambdalplus2mul - 2.0f * mul;
// compute the six components of the stress tensor sigma
sigma_xx = lambdalplus2mul*duxdxl + lambdal*duydyl_plus_duzdzl;
sigma_yy = lambdalplus2mul*duydyl + lambdal*duxdxl_plus_duzdzl;
sigma_zz = lambdalplus2mul*duzdzl + lambdal*duxdxl_plus_duydyl;
sigma_xy = mul*duxdyl_plus_duydxl;
sigma_xz = mul*duzdxl_plus_duxdzl;
sigma_yz = mul*duzdyl_plus_duydzl;
}
if(ATTENUATION){
// subtracts memory variables if attenuation
compute_element_att_stress(tx,working_element,NSPEC,
R_xx,R_yy,R_xy,R_xz,R_yz,
&sigma_xx,&sigma_yy,&sigma_zz,&sigma_xy,&sigma_xz,&sigma_yz);
}
jacobianl = 1.0f / (xixl*(etayl*gammazl-etazl*gammayl)-xiyl*(etaxl*gammazl-etazl*gammaxl)+xizl*(etaxl*gammayl-etayl*gammaxl));
// define symmetric components (needed for non-symmetric dot product and sigma for gravity)
sigma_yx = sigma_xy;
sigma_zx = sigma_xz;
sigma_zy = sigma_yz;
if( gravity ){
// computes non-symmetric terms for gravity
compute_element_gravity(tx,working_element,d_ibool,d_minus_g,d_minus_deriv_gravity,
d_rhostore,wgll_cube,jacobianl,
s_dummyx_loc,s_dummyy_loc,s_dummyz_loc,
&sigma_xx,&sigma_yy,&sigma_xz,&sigma_yz,
&rho_s_H1,&rho_s_H2,&rho_s_H3);
}
// form dot product with test vector, non-symmetric form
s_tempx1[tx] = jacobianl * (sigma_xx*xixl + sigma_yx*xiyl + sigma_zx*xizl);
s_tempy1[tx] = jacobianl * (sigma_xy*xixl + sigma_yy*xiyl + sigma_zy*xizl);
s_tempz1[tx] = jacobianl * (sigma_xz*xixl + sigma_yz*xiyl + sigma_zz*xizl);
s_tempx2[tx] = jacobianl * (sigma_xx*etaxl + sigma_yx*etayl + sigma_zx*etazl);
s_tempy2[tx] = jacobianl * (sigma_xy*etaxl + sigma_yy*etayl + sigma_zy*etazl);
s_tempz2[tx] = jacobianl * (sigma_xz*etaxl + sigma_yz*etayl + sigma_zz*etazl);
s_tempx3[tx] = jacobianl * (sigma_xx*gammaxl + sigma_yx*gammayl + sigma_zx*gammazl);
s_tempy3[tx] = jacobianl * (sigma_xy*gammaxl + sigma_yy*gammayl + sigma_zy*gammazl);
s_tempz3[tx] = jacobianl * (sigma_xz*gammaxl + sigma_yz*gammayl + sigma_zz*gammazl);
}
// synchronize all the threads (one thread for each of the NGLL grid points of the
// current spectral element) because we need the whole element to be ready in order
// to be able to compute the matrix products along cut planes of the 3D element below
__syncthreads();
// JC JC here we will need to add GPU support for the new C-PML routines
if (active) {
#ifndef MANUALLY_UNROLLED_LOOPS
tempx1l = 0.f;
tempy1l = 0.f;
tempz1l = 0.f;
tempx2l = 0.f;
tempy2l = 0.f;
tempz2l = 0.f;
tempx3l = 0.f;
tempy3l = 0.f;
tempz3l = 0.f;
for (l=0;l<NGLLX;l++) {
fac1 = d_hprimewgll_xx[I*NGLLX+l];
offset = K*NGLL2+J*NGLLX+l;
tempx1l += s_tempx1[offset]*fac1;
tempy1l += s_tempy1[offset]*fac1;
tempz1l += s_tempz1[offset]*fac1;
// assumes hprimewgll_xx == hprimewgll_yy == hprimewgll_zz
fac2 = d_hprimewgll_xx[J*NGLLX+l];
offset = K*NGLL2+l*NGLLX+I;
tempx2l += s_tempx2[offset]*fac2;
tempy2l += s_tempy2[offset]*fac2;
tempz2l += s_tempz2[offset]*fac2;
fac3 = d_hprimewgll_xx[K*NGLLX+l];
offset = l*NGLL2+J*NGLLX+I;
tempx3l += s_tempx3[offset]*fac3;
tempy3l += s_tempy3[offset]*fac3;
tempz3l += s_tempz3[offset]*fac3;
}
#else
tempx1l = s_tempx1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempx1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempx1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempx1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempx1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempy1l = s_tempy1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempy1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempy1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempy1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempy1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempz1l = s_tempz1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempz1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempz1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempz1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempz1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempx2l = s_tempx2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempx2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempx2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempx2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempx2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempy2l = s_tempy2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempy2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempy2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempy2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempy2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempz2l = s_tempz2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempz2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempz2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempz2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempz2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempx3l = s_tempx3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempx3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempx3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempx3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempx3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
tempy3l = s_tempy3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempy3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempy3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempy3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempy3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
tempz3l = s_tempz3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempz3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempz3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempz3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempz3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
#endif
fac1 = d_wgllwgll_yz[K*NGLLX+J];
fac2 = d_wgllwgll_xz[K*NGLLX+I];
fac3 = d_wgllwgll_xy[J*NGLLX+I];
sum_terms1 = - (fac1*tempx1l + fac2*tempx2l + fac3*tempx3l);
sum_terms2 = - (fac1*tempy1l + fac2*tempy2l + fac3*tempy3l);
sum_terms3 = - (fac1*tempz1l + fac2*tempz2l + fac3*tempz3l);
// adds gravity term
if( gravity ){
sum_terms1 += rho_s_H1;
sum_terms2 += rho_s_H2;
sum_terms3 += rho_s_H3;
}
#ifdef USE_MESH_COLORING_GPU
// no atomic operation needed, colors don't share global points between elements
#ifdef USE_TEXTURES_FIELDS
d_accel[iglob*3] = tex1Dfetch(d_accel_tex, iglob*3) + sum_terms1;
d_accel[iglob*3 + 1] = tex1Dfetch(d_accel_tex, iglob*3 + 1) + sum_terms2;
d_accel[iglob*3 + 2] = tex1Dfetch(d_accel_tex, iglob*3 + 2) + sum_terms3;
#else
d_accel[iglob*3] += sum_terms1;
d_accel[iglob*3 + 1] += sum_terms2;
d_accel[iglob*3 + 2] += sum_terms3;
#endif // USE_TEXTURES_FIELDS
// JC JC here we will need to add GPU support for the new C-PML routines
#else // MESH_COLORING
//mesh coloring
if( use_mesh_coloring_gpu ){
// no atomic operation needed, colors don't share global points between elements
#ifdef USE_TEXTURES_FIELDS
d_accel[iglob*3] = tex1Dfetch(d_accel_tex, iglob*3) + sum_terms1;
d_accel[iglob*3 + 1] = tex1Dfetch(d_accel_tex, iglob*3 + 1) + sum_terms2;
d_accel[iglob*3 + 2] = tex1Dfetch(d_accel_tex, iglob*3 + 2) + sum_terms3;
#else
d_accel[iglob*3] += sum_terms1;
d_accel[iglob*3 + 1] += sum_terms2;
d_accel[iglob*3 + 2] += sum_terms3;
#endif // USE_TEXTURES_FIELDS
}
else {
// for testing purposes only: w/out atomic updates
//d_accel[iglob*3] -= (0.00000001f*tempx1l + 0.00000001f*tempx2l + 0.00000001f*tempx3l);
//d_accel[iglob*3 + 1] -= (0.00000001f*tempy1l + 0.00000001f*tempy2l + 0.00000001f*tempy3l);
//d_accel[iglob*3 + 2] -= (0.00000001f*tempz1l + 0.00000001f*tempz2l + 0.00000001f*tempz3l);
atomicAdd(&d_accel[iglob*3], sum_terms1);
atomicAdd(&d_accel[iglob*3+1], sum_terms2);
atomicAdd(&d_accel[iglob*3+2], sum_terms3);
} // if(use_mesh_coloring_gpu)
#endif // MESH_COLORING
// update memory variables based upon the Runge-Kutta scheme
if( ATTENUATION ){
compute_element_att_memory(tx,working_element,NSPEC,
d_muv,
factor_common,alphaval,betaval,gammaval,
R_xx,R_yy,R_xy,R_xz,R_yz,
epsilondev_xx,epsilondev_yy,epsilondev_xy,epsilondev_xz,epsilondev_yz,
epsilondev_xx_loc,epsilondev_yy_loc,epsilondev_xy_loc,epsilondev_xz_loc,epsilondev_yz_loc);
}
// save deviatoric strain for Runge-Kutta scheme
if( COMPUTE_AND_STORE_STRAIN ){
int ijk_ispec = tx + working_element*NGLL3;
// fortran: epsilondev_xx(:,:,:,ispec) = epsilondev_xx_loc(:,:,:)
epsilondev_xx[ijk_ispec] = epsilondev_xx_loc;
epsilondev_yy[ijk_ispec] = epsilondev_yy_loc;
epsilondev_xy[ijk_ispec] = epsilondev_xy_loc;
epsilondev_xz[ijk_ispec] = epsilondev_xz_loc;
epsilondev_yz[ijk_ispec] = epsilondev_yz_loc;
}
} // if(active)
// JC JC here we will need to add GPU support for the new C-PML routines
} // kernel_2_impl()
*/
/* ----------------------------------------------------------------------------------------------- */
// note: kernel_2 is split into two kernels:
// - a kernel without attenuation Kernel_2_noatt_impl() and
// - a kernel including attenuation Kernel_2_att_impl()
// this separation should help with performance
// kernel without attenuation
//
// we use templates to distinguish between calls with forward or adjoint texture fields
template<int FORWARD_OR_ADJOINT> __global__ void Kernel_2_noatt_impl(int nb_blocks_to_compute,
int NGLOB,
int* d_ibool,
int* d_phase_ispec_inner_elastic, int num_phase_ispec_elastic,
int d_iphase,
int use_mesh_coloring_gpu,
realw* d_displ,realw* d_veloc,realw* d_accel,
realw* d_xix, realw* d_xiy, realw* d_xiz,
realw* d_etax, realw* d_etay, realw* d_etaz,
realw* d_gammax, realw* d_gammay, realw* d_gammaz,
realw* d_hprime_xx,
realw* d_hprimewgll_xx,
realw* d_wgllwgll_xy,realw* d_wgllwgll_xz,realw* d_wgllwgll_yz,
realw* d_kappav, realw* d_muv,
int COMPUTE_AND_STORE_STRAIN,
realw* epsilondev_xx,realw* epsilondev_yy,realw* epsilondev_xy,
realw* epsilondev_xz,realw* epsilondev_yz,
realw* epsilon_trace_over_3,
int SIMULATION_TYPE,
int NSPEC,
realw* one_minus_sum_beta,realw* factor_common,
realw* R_xx, realw* R_yy, realw* R_xy, realw* R_xz, realw* R_yz,
realw* alphaval,realw* betaval,realw* gammaval,
int ANISOTROPY,
realw* d_c11store,realw* d_c12store,realw* d_c13store,
realw* d_c14store,realw* d_c15store,realw* d_c16store,
realw* d_c22store,realw* d_c23store,realw* d_c24store,
realw* d_c25store,realw* d_c26store,realw* d_c33store,
realw* d_c34store,realw* d_c35store,realw* d_c36store,
realw* d_c44store,realw* d_c45store,realw* d_c46store,
realw* d_c55store,realw* d_c56store,realw* d_c66store,
int gravity,
realw* d_minus_g,
realw* d_minus_deriv_gravity,
realw* d_rhostore,
realw* wgll_cube ){
// elastic compute kernel without attenuation
// holds for: ATTENUATION = .false.
// COMPUTE_AND_STORE_STRAIN = .true. or .false. (true for kernel simulations)
int bx = blockIdx.y*gridDim.x+blockIdx.x;
int tx = threadIdx.x;
const int NGLL3_ALIGN = NGLL3_PADDED;
int K = (tx/NGLL2);
int J = ((tx-K*NGLL2)/NGLLX);
int I = (tx-K*NGLL2-J*NGLLX);
int active,offset;
int iglob = 0;
int working_element;
realw tempx1l,tempx2l,tempx3l,tempy1l,tempy2l,tempy3l,tempz1l,tempz2l,tempz3l;
realw xixl,xiyl,xizl,etaxl,etayl,etazl,gammaxl,gammayl,gammazl,jacobianl;
realw duxdxl,duxdyl,duxdzl,duydxl,duydyl,duydzl,duzdxl,duzdyl,duzdzl;
realw duxdxl_plus_duydyl,duxdxl_plus_duzdzl,duydyl_plus_duzdzl;
realw duxdyl_plus_duydxl,duzdxl_plus_duxdzl,duzdyl_plus_duydzl;
realw fac1,fac2,fac3,lambdal,mul,lambdalplus2mul,kappal;
realw sigma_xx,sigma_yy,sigma_zz,sigma_xy,sigma_xz,sigma_yz;
realw epsilondev_xx_loc,epsilondev_yy_loc,epsilondev_xy_loc,epsilondev_xz_loc,epsilondev_yz_loc;
realw c11,c12,c13,c14,c15,c16,c22,c23,c24,c25,c26,c33,c34,c35,c36,c44,c45,c46,c55,c56,c66;
realw sum_terms1,sum_terms2,sum_terms3;
// gravity variables
realw sigma_yx,sigma_zx,sigma_zy;
realw rho_s_H1,rho_s_H2,rho_s_H3;
#ifndef MANUALLY_UNROLLED_LOOPS
int l;
realw hp1,hp2,hp3;
#endif
__shared__ realw s_dummyx_loc[NGLL3];
__shared__ realw s_dummyy_loc[NGLL3];
__shared__ realw s_dummyz_loc[NGLL3];
__shared__ realw s_tempx1[NGLL3];
__shared__ realw s_tempx2[NGLL3];
__shared__ realw s_tempx3[NGLL3];
__shared__ realw s_tempy1[NGLL3];
__shared__ realw s_tempy2[NGLL3];
__shared__ realw s_tempy3[NGLL3];
__shared__ realw s_tempz1[NGLL3];
__shared__ realw s_tempz2[NGLL3];
__shared__ realw s_tempz3[NGLL3];
__shared__ realw sh_hprime_xx[NGLL2];
// use only NGLL^3 = 125 active threads, plus 3 inactive/ghost threads,
// because we used memory padding from NGLL^3 = 125 to 128 to get coalescent memory accesses
active = (tx < NGLL3 && bx < nb_blocks_to_compute) ? 1:0;
// copy from global memory to shared memory
// each thread writes one of the NGLL^3 = 125 data points
if (active) {
#ifdef USE_MESH_COLORING_GPU
working_element = bx;
#else
//mesh coloring
if( use_mesh_coloring_gpu ){
working_element = bx;
}else{
// iphase-1 and working_element-1 for Fortran->C array conventions
working_element = d_phase_ispec_inner_elastic[bx + num_phase_ispec_elastic*(d_iphase-1)]-1;
}
#endif
iglob = d_ibool[working_element*NGLL3 + tx]-1;
// debug
//if( iglob < 0 || iglob >= NGLOB ){ printf("wrong iglob %d\n",iglob); }
#ifdef USE_TEXTURES_FIELDS
s_dummyx_loc[tx] = texfetch_displ<FORWARD_OR_ADJOINT>(iglob*3);
s_dummyy_loc[tx] = texfetch_displ<FORWARD_OR_ADJOINT>(iglob*3 + 1);
s_dummyz_loc[tx] = texfetch_displ<FORWARD_OR_ADJOINT>(iglob*3 + 2);
#else
// changing iglob indexing to match fortran row changes fast style
s_dummyx_loc[tx] = d_displ[iglob*3];
s_dummyy_loc[tx] = d_displ[iglob*3 + 1];
s_dummyz_loc[tx] = d_displ[iglob*3 + 2];
#endif
}
// JC JC here we will need to add GPU support for the new C-PML routines
if (tx < NGLL2) {
#ifdef USE_TEXTURES_CONSTANTS
sh_hprime_xx[tx] = tex1Dfetch(d_hprime_xx_tex,tx);
#else
sh_hprime_xx[tx] = d_hprime_xx[tx];
#endif
}
// synchronize all the threads (one thread for each of the NGLL grid points of the
// current spectral element) because we need the whole element to be ready in order
// to be able to compute the matrix products along cut planes of the 3D element below
__syncthreads();
if (active) {
#ifndef MANUALLY_UNROLLED_LOOPS
tempx1l = 0.f;
tempx2l = 0.f;
tempx3l = 0.f;
tempy1l = 0.f;
tempy2l = 0.f;
tempy3l = 0.f;
tempz1l = 0.f;
tempz2l = 0.f;
tempz3l = 0.f;
for (l=0;l<NGLLX;l++) {
hp1 = sh_hprime_xx[l*NGLLX+I];
offset = K*NGLL2+J*NGLLX+l;
tempx1l += s_dummyx_loc[offset]*hp1;
tempy1l += s_dummyy_loc[offset]*hp1;
tempz1l += s_dummyz_loc[offset]*hp1;
//assumes that hprime_xx = hprime_yy = hprime_zz
hp2 = sh_hprime_xx[l*NGLLX+J];
offset = K*NGLL2+l*NGLLX+I;
tempx2l += s_dummyx_loc[offset]*hp2;
tempy2l += s_dummyy_loc[offset]*hp2;
tempz2l += s_dummyz_loc[offset]*hp2;
hp3 = sh_hprime_xx[l*NGLLX+K];
offset = l*NGLL2+J*NGLLX+I;
tempx3l += s_dummyx_loc[offset]*hp3;
tempy3l += s_dummyy_loc[offset]*hp3;
tempz3l += s_dummyz_loc[offset]*hp3;
}
// JC JC here we will need to add GPU support for the new C-PML routines
#else
tempx1l = s_dummyx_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempy1l = s_dummyy_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempz1l = s_dummyz_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempx2l = s_dummyx_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyx_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyx_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyx_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyx_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempy2l = s_dummyy_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyy_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyy_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyy_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyy_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempz2l = s_dummyz_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyz_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyz_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyz_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyz_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempx3l = s_dummyx_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyx_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyx_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyx_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyx_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempy3l = s_dummyy_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyy_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyy_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyy_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyy_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempz3l = s_dummyz_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyz_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyz_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyz_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyz_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
// JC JC here we will need to add GPU support for the new C-PML routines
#endif
// compute derivatives of ux, uy and uz with respect to x, y and z
offset = working_element*NGLL3_ALIGN + tx;
xixl = d_xix[offset];
xiyl = d_xiy[offset];
xizl = d_xiz[offset];
etaxl = d_etax[offset];
etayl = d_etay[offset];
etazl = d_etaz[offset];
gammaxl = d_gammax[offset];
gammayl = d_gammay[offset];
gammazl = d_gammaz[offset];
duxdxl = xixl*tempx1l + etaxl*tempx2l + gammaxl*tempx3l;
duxdyl = xiyl*tempx1l + etayl*tempx2l + gammayl*tempx3l;
duxdzl = xizl*tempx1l + etazl*tempx2l + gammazl*tempx3l;
duydxl = xixl*tempy1l + etaxl*tempy2l + gammaxl*tempy3l;
duydyl = xiyl*tempy1l + etayl*tempy2l + gammayl*tempy3l;
duydzl = xizl*tempy1l + etazl*tempy2l + gammazl*tempy3l;
duzdxl = xixl*tempz1l + etaxl*tempz2l + gammaxl*tempz3l;
duzdyl = xiyl*tempz1l + etayl*tempz2l + gammayl*tempz3l;
duzdzl = xizl*tempz1l + etazl*tempz2l + gammazl*tempz3l;
// JC JC here we will need to add GPU support for the new C-PML routines
// precompute some sums to save CPU time
duxdxl_plus_duydyl = duxdxl + duydyl;
duxdxl_plus_duzdzl = duxdxl + duzdzl;
duydyl_plus_duzdzl = duydyl + duzdzl;
duxdyl_plus_duydxl = duxdyl + duydxl;
duzdxl_plus_duxdzl = duzdxl + duxdzl;
duzdyl_plus_duydzl = duzdyl + duydzl;
// JC JC here we will need to add GPU support for the new C-PML routines
// computes deviatoric strain for kernel calculations
if(COMPUTE_AND_STORE_STRAIN) {
realw templ = 0.33333333333333333333f * (duxdxl + duydyl + duzdzl); // 1./3. = 0.33333
// local storage: stresses at this current time step
epsilondev_xx_loc = duxdxl - templ;
epsilondev_yy_loc = duydyl - templ;
epsilondev_xy_loc = 0.5f * duxdyl_plus_duydxl;
epsilondev_xz_loc = 0.5f * duzdxl_plus_duxdzl;
epsilondev_yz_loc = 0.5f * duzdyl_plus_duydzl;
if(SIMULATION_TYPE == 3) {
epsilon_trace_over_3[tx + working_element*NGLL3] = templ;
}
}
// compute elements with an elastic isotropic rheology
kappal = d_kappav[offset];
mul = d_muv[offset];
// full anisotropic case, stress calculations
if(ANISOTROPY){
c11 = d_c11store[offset];
c12 = d_c12store[offset];
c13 = d_c13store[offset];
c14 = d_c14store[offset];
c15 = d_c15store[offset];
c16 = d_c16store[offset];
c22 = d_c22store[offset];
c23 = d_c23store[offset];
c24 = d_c24store[offset];
c25 = d_c25store[offset];
c26 = d_c26store[offset];
c33 = d_c33store[offset];
c34 = d_c34store[offset];
c35 = d_c35store[offset];
c36 = d_c36store[offset];
c44 = d_c44store[offset];
c45 = d_c45store[offset];
c46 = d_c46store[offset];
c55 = d_c55store[offset];
c56 = d_c56store[offset];
c66 = d_c66store[offset];
sigma_xx = c11*duxdxl + c16*duxdyl_plus_duydxl + c12*duydyl +
c15*duzdxl_plus_duxdzl + c14*duzdyl_plus_duydzl + c13*duzdzl;
sigma_yy = c12*duxdxl + c26*duxdyl_plus_duydxl + c22*duydyl +
c25*duzdxl_plus_duxdzl + c24*duzdyl_plus_duydzl + c23*duzdzl;
sigma_zz = c13*duxdxl + c36*duxdyl_plus_duydxl + c23*duydyl +
c35*duzdxl_plus_duxdzl + c34*duzdyl_plus_duydzl + c33*duzdzl;
sigma_xy = c16*duxdxl + c66*duxdyl_plus_duydxl + c26*duydyl +
c56*duzdxl_plus_duxdzl + c46*duzdyl_plus_duydzl + c36*duzdzl;
sigma_xz = c15*duxdxl + c56*duxdyl_plus_duydxl + c25*duydyl +
c55*duzdxl_plus_duxdzl + c45*duzdyl_plus_duydzl + c35*duzdzl;
sigma_yz = c14*duxdxl + c46*duxdyl_plus_duydxl + c24*duydyl +
c45*duzdxl_plus_duxdzl + c44*duzdyl_plus_duydzl + c34*duzdzl;
}else{
// isotropic case
lambdalplus2mul = kappal + 1.33333333333333333333f * mul; // 4./3. = 1.3333333
lambdal = lambdalplus2mul - 2.0f * mul;
// compute the six components of the stress tensor sigma
sigma_xx = lambdalplus2mul*duxdxl + lambdal*duydyl_plus_duzdzl;
sigma_yy = lambdalplus2mul*duydyl + lambdal*duxdxl_plus_duzdzl;
sigma_zz = lambdalplus2mul*duzdzl + lambdal*duxdxl_plus_duydyl;
sigma_xy = mul*duxdyl_plus_duydxl;
sigma_xz = mul*duzdxl_plus_duxdzl;
sigma_yz = mul*duzdyl_plus_duydzl;
}
jacobianl = 1.0f / (xixl*(etayl*gammazl-etazl*gammayl)-xiyl*(etaxl*gammazl-etazl*gammaxl)+xizl*(etaxl*gammayl-etayl*gammaxl));
// define symmetric components (needed for non-symmetric dot product and sigma for gravity)
sigma_yx = sigma_xy;
sigma_zx = sigma_xz;
sigma_zy = sigma_yz;
if( gravity ){
// computes non-symmetric terms for gravity
compute_element_gravity(tx,working_element,d_ibool,d_minus_g,d_minus_deriv_gravity,
d_rhostore,wgll_cube,jacobianl,
s_dummyx_loc,s_dummyy_loc,s_dummyz_loc,
&sigma_xx,&sigma_yy,&sigma_xz,&sigma_yz,
&rho_s_H1,&rho_s_H2,&rho_s_H3);
}
// form dot product with test vector, non-symmetric form
s_tempx1[tx] = jacobianl * (sigma_xx*xixl + sigma_yx*xiyl + sigma_zx*xizl);
s_tempy1[tx] = jacobianl * (sigma_xy*xixl + sigma_yy*xiyl + sigma_zy*xizl);
s_tempz1[tx] = jacobianl * (sigma_xz*xixl + sigma_yz*xiyl + sigma_zz*xizl);
s_tempx2[tx] = jacobianl * (sigma_xx*etaxl + sigma_yx*etayl + sigma_zx*etazl);
s_tempy2[tx] = jacobianl * (sigma_xy*etaxl + sigma_yy*etayl + sigma_zy*etazl);
s_tempz2[tx] = jacobianl * (sigma_xz*etaxl + sigma_yz*etayl + sigma_zz*etazl);
s_tempx3[tx] = jacobianl * (sigma_xx*gammaxl + sigma_yx*gammayl + sigma_zx*gammazl);
s_tempy3[tx] = jacobianl * (sigma_xy*gammaxl + sigma_yy*gammayl + sigma_zy*gammazl);
s_tempz3[tx] = jacobianl * (sigma_xz*gammaxl + sigma_yz*gammayl + sigma_zz*gammazl);
}
// synchronize all the threads (one thread for each of the NGLL grid points of the
// current spectral element) because we need the whole element to be ready in order
// to be able to compute the matrix products along cut planes of the 3D element below
__syncthreads();
// JC JC here we will need to add GPU support for the new C-PML routines
if (active) {
#ifndef MANUALLY_UNROLLED_LOOPS
tempx1l = 0.f;
tempy1l = 0.f;
tempz1l = 0.f;
tempx2l = 0.f;
tempy2l = 0.f;
tempz2l = 0.f;
tempx3l = 0.f;
tempy3l = 0.f;
tempz3l = 0.f;
for (l=0;l<NGLLX;l++) {
fac1 = d_hprimewgll_xx[I*NGLLX+l];
offset = K*NGLL2+J*NGLLX+l;
tempx1l += s_tempx1[offset]*fac1;
tempy1l += s_tempy1[offset]*fac1;
tempz1l += s_tempz1[offset]*fac1;
// assumes hprimewgll_xx == hprimewgll_yy == hprimewgll_zz
fac2 = d_hprimewgll_xx[J*NGLLX+l];
offset = K*NGLL2+l*NGLLX+I;
tempx2l += s_tempx2[offset]*fac2;
tempy2l += s_tempy2[offset]*fac2;
tempz2l += s_tempz2[offset]*fac2;
fac3 = d_hprimewgll_xx[K*NGLLX+l];
offset = l*NGLL2+J*NGLLX+I;
tempx3l += s_tempx3[offset]*fac3;
tempy3l += s_tempy3[offset]*fac3;
tempz3l += s_tempz3[offset]*fac3;
}
#else
tempx1l = s_tempx1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempx1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempx1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempx1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempx1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempy1l = s_tempy1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempy1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempy1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempy1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempy1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempz1l = s_tempz1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempz1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempz1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempz1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempz1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempx2l = s_tempx2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempx2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempx2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempx2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempx2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempy2l = s_tempy2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempy2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempy2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempy2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempy2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempz2l = s_tempz2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempz2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempz2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempz2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempz2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempx3l = s_tempx3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempx3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempx3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempx3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempx3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
tempy3l = s_tempy3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempy3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempy3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempy3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempy3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
tempz3l = s_tempz3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempz3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempz3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempz3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempz3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
#endif
fac1 = d_wgllwgll_yz[K*NGLLX+J];
fac2 = d_wgllwgll_xz[K*NGLLX+I];
fac3 = d_wgllwgll_xy[J*NGLLX+I];
sum_terms1 = - (fac1*tempx1l + fac2*tempx2l + fac3*tempx3l);
sum_terms2 = - (fac1*tempy1l + fac2*tempy2l + fac3*tempy3l);
sum_terms3 = - (fac1*tempz1l + fac2*tempz2l + fac3*tempz3l);
// adds gravity term
if( gravity ){
sum_terms1 += rho_s_H1;
sum_terms2 += rho_s_H2;
sum_terms3 += rho_s_H3;
}
#ifdef USE_MESH_COLORING_GPU
// no atomic operation needed, colors don't share global points between elements
#ifdef USE_TEXTURES_FIELDS
d_accel[iglob*3] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3) + sum_terms1;
d_accel[iglob*3 + 1] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 1) + sum_terms2;
d_accel[iglob*3 + 2] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 2) + sum_terms3;
#else
d_accel[iglob*3] += sum_terms1;
d_accel[iglob*3 + 1] += sum_terms2;
d_accel[iglob*3 + 2] += sum_terms3;
#endif // USE_TEXTURES_FIELDS
// JC JC here we will need to add GPU support for the new C-PML routines
#else // MESH_COLORING
//mesh coloring
if( use_mesh_coloring_gpu ){
// no atomic operation needed, colors don't share global points between elements
#ifdef USE_TEXTURES_FIELDS
d_accel[iglob*3] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3) + sum_terms1;
d_accel[iglob*3 + 1] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 1) + sum_terms2;
d_accel[iglob*3 + 2] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 2) + sum_terms3;
#else
d_accel[iglob*3] += sum_terms1;
d_accel[iglob*3 + 1] += sum_terms2;
d_accel[iglob*3 + 2] += sum_terms3;
#endif // USE_TEXTURES_FIELDS
}else {
// for testing purposes only: w/out atomic updates
//d_accel[iglob*3] -= (0.00000001f*tempx1l + 0.00000001f*tempx2l + 0.00000001f*tempx3l);
//d_accel[iglob*3 + 1] -= (0.00000001f*tempy1l + 0.00000001f*tempy2l + 0.00000001f*tempy3l);
//d_accel[iglob*3 + 2] -= (0.00000001f*tempz1l + 0.00000001f*tempz2l + 0.00000001f*tempz3l);
// w/out atomic update
//d_accel[iglob*3] += sum_terms1;
//d_accel[iglob*3 + 1] += sum_terms2;
//d_accel[iglob*3 + 2] += sum_terms3;
atomicAdd(&d_accel[iglob*3], sum_terms1);
atomicAdd(&d_accel[iglob*3+1], sum_terms2);
atomicAdd(&d_accel[iglob*3+2], sum_terms3);
} // if(use_mesh_coloring_gpu)
#endif // MESH_COLORING
// save deviatoric strain for Runge-Kutta scheme
if( COMPUTE_AND_STORE_STRAIN ){
int ijk_ispec = tx + working_element*NGLL3;
// fortran: epsilondev_xx(:,:,:,ispec) = epsilondev_xx_loc(:,:,:)
epsilondev_xx[ijk_ispec] = epsilondev_xx_loc;
epsilondev_yy[ijk_ispec] = epsilondev_yy_loc;
epsilondev_xy[ijk_ispec] = epsilondev_xy_loc;
epsilondev_xz[ijk_ispec] = epsilondev_xz_loc;
epsilondev_yz[ijk_ispec] = epsilondev_yz_loc;
}
} // if(active)
// JC JC here we will need to add GPU support for the new C-PML routines
} // kernel_2_noatt_impl()
/* ----------------------------------------------------------------------------------------------- */
// kernel with attenuation
//
// we use templates to distinguish between calls with forward or adjoint texture fields
template<int FORWARD_OR_ADJOINT> __global__ void Kernel_2_att_impl(int nb_blocks_to_compute,
int NGLOB,
int* d_ibool,
int* d_phase_ispec_inner_elastic, int num_phase_ispec_elastic,
int d_iphase,
int use_mesh_coloring_gpu,
realw d_deltat,
realw* d_displ,realw* d_veloc,realw* d_accel,
realw* d_xix, realw* d_xiy, realw* d_xiz,
realw* d_etax, realw* d_etay, realw* d_etaz,
realw* d_gammax, realw* d_gammay, realw* d_gammaz,
realw* d_hprime_xx,
realw* d_hprimewgll_xx,
realw* d_wgllwgll_xy,realw* d_wgllwgll_xz,realw* d_wgllwgll_yz,
realw* d_kappav, realw* d_muv,
realw* epsilondev_xx,realw* epsilondev_yy,realw* epsilondev_xy,
realw* epsilondev_xz,realw* epsilondev_yz,
realw* epsilon_trace_over_3,
int SIMULATION_TYPE,
int NSPEC,
realw* one_minus_sum_beta,realw* factor_common,
realw* R_xx, realw* R_yy, realw* R_xy, realw* R_xz, realw* R_yz,
realw* alphaval,realw* betaval,realw* gammaval,
int ANISOTROPY,
realw* d_c11store,realw* d_c12store,realw* d_c13store,
realw* d_c14store,realw* d_c15store,realw* d_c16store,
realw* d_c22store,realw* d_c23store,realw* d_c24store,
realw* d_c25store,realw* d_c26store,realw* d_c33store,
realw* d_c34store,realw* d_c35store,realw* d_c36store,
realw* d_c44store,realw* d_c45store,realw* d_c46store,
realw* d_c55store,realw* d_c56store,realw* d_c66store,
int gravity,
realw* d_minus_g,
realw* d_minus_deriv_gravity,
realw* d_rhostore,
realw* wgll_cube){
// elastic compute kernel with attenuation
// holds for: ATTENUATION = .true.
// COMPUTE_AND_STORE_STRAIN = .true. (always true for attenuation)
int bx = blockIdx.y*gridDim.x+blockIdx.x;
int tx = threadIdx.x;
const int NGLL3_ALIGN = NGLL3_PADDED;
int K = (tx/NGLL2);
int J = ((tx-K*NGLL2)/NGLLX);
int I = (tx-K*NGLL2-J*NGLLX);
int active,offset;
int iglob = 0;
int working_element;
realw tempx1l,tempx2l,tempx3l,tempy1l,tempy2l,tempy3l,tempz1l,tempz2l,tempz3l;
realw xixl,xiyl,xizl,etaxl,etayl,etazl,gammaxl,gammayl,gammazl,jacobianl;
realw duxdxl,duxdyl,duxdzl,duydxl,duydyl,duydzl,duzdxl,duzdyl,duzdzl;
realw duxdxl_plus_duydyl,duxdxl_plus_duzdzl,duydyl_plus_duzdzl;
realw duxdyl_plus_duydxl,duzdxl_plus_duxdzl,duzdyl_plus_duydzl;
realw tempx1l_att,tempx2l_att,tempx3l_att,tempy1l_att,tempy2l_att,tempy3l_att,tempz1l_att,tempz2l_att,tempz3l_att;
realw duxdxl_att,duxdyl_att,duxdzl_att,duydxl_att,duydyl_att,duydzl_att,duzdxl_att,duzdyl_att,duzdzl_att;
realw duxdyl_plus_duydxl_att,duzdxl_plus_duxdzl_att,duzdyl_plus_duydzl_att;
realw fac1,fac2,fac3,lambdal,mul,lambdalplus2mul,kappal;
realw sigma_xx,sigma_yy,sigma_zz,sigma_xy,sigma_xz,sigma_yz;
realw epsilondev_xx_loc,epsilondev_yy_loc,epsilondev_xy_loc,epsilondev_xz_loc,epsilondev_yz_loc;
realw c11,c12,c13,c14,c15,c16,c22,c23,c24,c25,c26,c33,c34,c35,c36,c44,c45,c46,c55,c56,c66;
realw sum_terms1,sum_terms2,sum_terms3;
// gravity variables
realw sigma_yx,sigma_zx,sigma_zy;
realw rho_s_H1,rho_s_H2,rho_s_H3;
#ifndef MANUALLY_UNROLLED_LOOPS
int l;
realw hp1,hp2,hp3;
#endif
__shared__ realw s_dummyx_loc[NGLL3];
__shared__ realw s_dummyy_loc[NGLL3];
__shared__ realw s_dummyz_loc[NGLL3];
__shared__ realw s_dummyx_loc_att[NGLL3];
__shared__ realw s_dummyy_loc_att[NGLL3];
__shared__ realw s_dummyz_loc_att[NGLL3];
__shared__ realw s_tempx1[NGLL3];
__shared__ realw s_tempx2[NGLL3];
__shared__ realw s_tempx3[NGLL3];
__shared__ realw s_tempy1[NGLL3];
__shared__ realw s_tempy2[NGLL3];
__shared__ realw s_tempy3[NGLL3];
__shared__ realw s_tempz1[NGLL3];
__shared__ realw s_tempz2[NGLL3];
__shared__ realw s_tempz3[NGLL3];
__shared__ realw sh_hprime_xx[NGLL2];
// use only NGLL^3 = 125 active threads, plus 3 inactive/ghost threads,
// because we used memory padding from NGLL^3 = 125 to 128 to get coalescent memory accesses
active = (tx < NGLL3 && bx < nb_blocks_to_compute) ? 1:0;
// copy from global memory to shared memory
// each thread writes one of the NGLL^3 = 125 data points
if (active) {
#ifdef USE_MESH_COLORING_GPU
working_element = bx;
#else
//mesh coloring
if( use_mesh_coloring_gpu ){
working_element = bx;
}else{
// iphase-1 and working_element-1 for Fortran->C array conventions
working_element = d_phase_ispec_inner_elastic[bx + num_phase_ispec_elastic*(d_iphase-1)]-1;
}
#endif
iglob = d_ibool[working_element*NGLL3 + tx]-1;
#ifdef USE_TEXTURES_FIELDS
s_dummyx_loc[tx] = texfetch_displ<FORWARD_OR_ADJOINT>(iglob*3);
s_dummyy_loc[tx] = texfetch_displ<FORWARD_OR_ADJOINT>(iglob*3 + 1);
s_dummyz_loc[tx] = texfetch_displ<FORWARD_OR_ADJOINT>(iglob*3 + 2);
#else
// changing iglob indexing to match fortran row changes fast style
s_dummyx_loc[tx] = d_displ[iglob*3];
s_dummyy_loc[tx] = d_displ[iglob*3 + 1];
s_dummyz_loc[tx] = d_displ[iglob*3 + 2];
#endif
// JC JC here we will need to add GPU support for the new C-PML routines
// attenuation
// use first order Taylor expansion of displacement for local storage of stresses
// at this current time step, to fix attenuation in a consistent way
#ifdef USE_TEXTURES_FIELDS
s_dummyx_loc_att[tx] = s_dummyx_loc[tx] + d_deltat * texfetch_veloc<FORWARD_OR_ADJOINT>(iglob*3);
s_dummyy_loc_att[tx] = s_dummyy_loc[tx] + d_deltat * texfetch_veloc<FORWARD_OR_ADJOINT>(iglob*3 + 1);
s_dummyz_loc_att[tx] = s_dummyz_loc[tx] + d_deltat * texfetch_veloc<FORWARD_OR_ADJOINT>(iglob*3 + 2);
#else
s_dummyx_loc_att[tx] = s_dummyx_loc[tx] + d_deltat * d_veloc[iglob*3];
s_dummyy_loc_att[tx] = s_dummyy_loc[tx] + d_deltat * d_veloc[iglob*3 + 1];
s_dummyz_loc_att[tx] = s_dummyz_loc[tx] + d_deltat * d_veloc[iglob*3 + 2];
#endif
}
if (tx < NGLL2) {
#ifdef USE_TEXTURES_CONSTANTS
sh_hprime_xx[tx] = tex1Dfetch(d_hprime_xx_tex,tx);
#else
sh_hprime_xx[tx] = d_hprime_xx[tx];
#endif
}
// synchronize all the threads (one thread for each of the NGLL grid points of the
// current spectral element) because we need the whole element to be ready in order
// to be able to compute the matrix products along cut planes of the 3D element below
__syncthreads();
if (active) {
#ifndef MANUALLY_UNROLLED_LOOPS
tempx1l = 0.f;
tempx2l = 0.f;
tempx3l = 0.f;
tempy1l = 0.f;
tempy2l = 0.f;
tempy3l = 0.f;
tempz1l = 0.f;
tempz2l = 0.f;
tempz3l = 0.f;
for (l=0;l<NGLLX;l++) {
hp1 = sh_hprime_xx[l*NGLLX+I];
offset = K*NGLL2+J*NGLLX+l;
tempx1l += s_dummyx_loc[offset]*hp1;
tempy1l += s_dummyy_loc[offset]*hp1;
tempz1l += s_dummyz_loc[offset]*hp1;
//assumes that hprime_xx = hprime_yy = hprime_zz
hp2 = sh_hprime_xx[l*NGLLX+J];
offset = K*NGLL2+l*NGLLX+I;
tempx2l += s_dummyx_loc[offset]*hp2;
tempy2l += s_dummyy_loc[offset]*hp2;
tempz2l += s_dummyz_loc[offset]*hp2;
hp3 = sh_hprime_xx[l*NGLLX+K];
offset = l*NGLL2+J*NGLLX+I;
tempx3l += s_dummyx_loc[offset]*hp3;
tempy3l += s_dummyy_loc[offset]*hp3;
tempz3l += s_dummyz_loc[offset]*hp3;
}
// JC JC here we will need to add GPU support for the new C-PML routines
// attenuation
// temporary variables used for fixing attenuation in a consistent way
tempx1l_att = 0.f;
tempx2l_att = 0.f;
tempx3l_att = 0.f;
tempy1l_att = 0.f;
tempy2l_att = 0.f;
tempy3l_att = 0.f;
tempz1l_att = 0.f;
tempz2l_att = 0.f;
tempz3l_att = 0.f;
for (l=0;l<NGLLX;l++) {
hp1 = sh_hprime_xx[l*NGLLX+I];
offset = K*NGLL2+J*NGLLX+l;
tempx1l_att += s_dummyx_loc_att[offset]*hp1;
tempy1l_att += s_dummyy_loc_att[offset]*hp1;
tempz1l_att += s_dummyz_loc_att[offset]*hp1;
hp2 = sh_hprime_xx[l*NGLLX+J];
offset = K*NGLL2+l*NGLLX+I;
tempx2l_att += s_dummyx_loc_att[offset]*hp2;
tempy2l_att += s_dummyy_loc_att[offset]*hp2;
tempz2l_att += s_dummyz_loc_att[offset]*hp2;
hp3 = sh_hprime_xx[l*NGLLX+K];
offset = l*NGLL2+J*NGLLX+I;
tempx3l_att += s_dummyx_loc_att[offset]*hp3;
tempy3l_att += s_dummyy_loc_att[offset]*hp3;
tempz3l_att += s_dummyz_loc_att[offset]*hp3;
}
#else
tempx1l = s_dummyx_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyx_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempy1l = s_dummyy_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyy_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempz1l = s_dummyz_loc[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyz_loc[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempx2l = s_dummyx_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyx_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyx_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyx_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyx_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempy2l = s_dummyy_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyy_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyy_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyy_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyy_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempz2l = s_dummyz_loc[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyz_loc[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyz_loc[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyz_loc[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyz_loc[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempx3l = s_dummyx_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyx_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyx_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyx_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyx_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempy3l = s_dummyy_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyy_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyy_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyy_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyy_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempz3l = s_dummyz_loc[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyz_loc[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyz_loc[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyz_loc[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyz_loc[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
// JC JC here we will need to add GPU support for the new C-PML routines
// attenuation
// temporary variables used for fixing attenuation in a consistent way
tempx1l_att = s_dummyx_loc_att[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyx_loc_att[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempy1l_att = s_dummyy_loc_att[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyy_loc_att[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempz1l_att = s_dummyz_loc_att[K*NGLL2+J*NGLLX]*d_hprime_xx[I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+1]*d_hprime_xx[NGLLX+I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+2]*d_hprime_xx[2*NGLLX+I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+3]*d_hprime_xx[3*NGLLX+I]
+ s_dummyz_loc_att[K*NGLL2+J*NGLLX+4]*d_hprime_xx[4*NGLLX+I];
tempx2l_att = s_dummyx_loc_att[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyx_loc_att[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyx_loc_att[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyx_loc_att[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyx_loc_att[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempy2l_att = s_dummyy_loc_att[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyy_loc_att[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyy_loc_att[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyy_loc_att[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyy_loc_att[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempz2l_att = s_dummyz_loc_att[K*NGLL2+I]*d_hprime_xx[J]
+ s_dummyz_loc_att[K*NGLL2+NGLLX+I]*d_hprime_xx[NGLLX+J]
+ s_dummyz_loc_att[K*NGLL2+2*NGLLX+I]*d_hprime_xx[2*NGLLX+J]
+ s_dummyz_loc_att[K*NGLL2+3*NGLLX+I]*d_hprime_xx[3*NGLLX+J]
+ s_dummyz_loc_att[K*NGLL2+4*NGLLX+I]*d_hprime_xx[4*NGLLX+J];
tempx3l_att = s_dummyx_loc_att[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyx_loc_att[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyx_loc_att[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyx_loc_att[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyx_loc_att[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempy3l_att = s_dummyy_loc_att[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyy_loc_att[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyy_loc_att[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyy_loc_att[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyy_loc_att[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
tempz3l_att = s_dummyz_loc_att[J*NGLLX+I]*d_hprime_xx[K]
+ s_dummyz_loc_att[NGLL2+J*NGLLX+I]*d_hprime_xx[NGLLX+K]
+ s_dummyz_loc_att[2*NGLL2+J*NGLLX+I]*d_hprime_xx[2*NGLLX+K]
+ s_dummyz_loc_att[3*NGLL2+J*NGLLX+I]*d_hprime_xx[3*NGLLX+K]
+ s_dummyz_loc_att[4*NGLL2+J*NGLLX+I]*d_hprime_xx[4*NGLLX+K];
#endif
// compute derivatives of ux, uy and uz with respect to x, y and z
offset = working_element*NGLL3_ALIGN + tx;
xixl = d_xix[offset];
xiyl = d_xiy[offset];
xizl = d_xiz[offset];
etaxl = d_etax[offset];
etayl = d_etay[offset];
etazl = d_etaz[offset];
gammaxl = d_gammax[offset];
gammayl = d_gammay[offset];
gammazl = d_gammaz[offset];
duxdxl = xixl*tempx1l + etaxl*tempx2l + gammaxl*tempx3l;
duxdyl = xiyl*tempx1l + etayl*tempx2l + gammayl*tempx3l;
duxdzl = xizl*tempx1l + etazl*tempx2l + gammazl*tempx3l;
duydxl = xixl*tempy1l + etaxl*tempy2l + gammaxl*tempy3l;
duydyl = xiyl*tempy1l + etayl*tempy2l + gammayl*tempy3l;
duydzl = xizl*tempy1l + etazl*tempy2l + gammazl*tempy3l;
duzdxl = xixl*tempz1l + etaxl*tempz2l + gammaxl*tempz3l;
duzdyl = xiyl*tempz1l + etayl*tempz2l + gammayl*tempz3l;
duzdzl = xizl*tempz1l + etazl*tempz2l + gammazl*tempz3l;
// JC JC here we will need to add GPU support for the new C-PML routines
// precompute some sums to save CPU time
duxdxl_plus_duydyl = duxdxl + duydyl;
duxdxl_plus_duzdzl = duxdxl + duzdzl;
duydyl_plus_duzdzl = duydyl + duzdzl;
duxdyl_plus_duydxl = duxdyl + duydxl;
duzdxl_plus_duxdzl = duzdxl + duxdzl;
duzdyl_plus_duydzl = duzdyl + duydzl;
// JC JC here we will need to add GPU support for the new C-PML routines
// attenuation
// temporary variables used for fixing attenuation in a consistent way
duxdxl_att = xixl*tempx1l_att + etaxl*tempx2l_att + gammaxl*tempx3l_att;
duxdyl_att = xiyl*tempx1l_att + etayl*tempx2l_att + gammayl*tempx3l_att;
duxdzl_att = xizl*tempx1l_att + etazl*tempx2l_att + gammazl*tempx3l_att;
duydxl_att = xixl*tempy1l_att + etaxl*tempy2l_att + gammaxl*tempy3l_att;
duydyl_att = xiyl*tempy1l_att + etayl*tempy2l_att + gammayl*tempy3l_att;
duydzl_att = xizl*tempy1l_att + etazl*tempy2l_att + gammazl*tempy3l_att;
duzdxl_att = xixl*tempz1l_att + etaxl*tempz2l_att + gammaxl*tempz3l_att;
duzdyl_att = xiyl*tempz1l_att + etayl*tempz2l_att + gammayl*tempz3l_att;
duzdzl_att = xizl*tempz1l_att + etazl*tempz2l_att + gammazl*tempz3l_att;
// precompute some sums to save CPU time
duxdyl_plus_duydxl_att = duxdyl_att + duydxl_att;
duzdxl_plus_duxdzl_att = duzdxl_att + duxdzl_att;
duzdyl_plus_duydzl_att = duzdyl_att + duydzl_att;
// attenuation
// computes deviatoric strain attenuation and/or for kernel calculations
realw templ = 0.33333333333333333333f * (duxdxl_att + duydyl_att + duzdzl_att); // 1./3. = 0.33333
// local storage: stresses at this current time step
epsilondev_xx_loc = duxdxl_att - templ;
epsilondev_yy_loc = duydyl_att - templ;
epsilondev_xy_loc = 0.5f * duxdyl_plus_duydxl_att;
epsilondev_xz_loc = 0.5f * duzdxl_plus_duxdzl_att;
epsilondev_yz_loc = 0.5f * duzdyl_plus_duydzl_att;
if(SIMULATION_TYPE == 3) {
epsilon_trace_over_3[tx + working_element*NGLL3] = templ;
}
// compute elements with an elastic isotropic rheology
kappal = d_kappav[offset];
mul = d_muv[offset];
// attenuation
// use unrelaxed parameters if attenuation
mul = mul * one_minus_sum_beta[tx+working_element*NGLL3]; // (i,j,k,ispec)
// full anisotropic case, stress calculations
if(ANISOTROPY){
c11 = d_c11store[offset];
c12 = d_c12store[offset];
c13 = d_c13store[offset];
c14 = d_c14store[offset];
c15 = d_c15store[offset];
c16 = d_c16store[offset];
c22 = d_c22store[offset];
c23 = d_c23store[offset];
c24 = d_c24store[offset];
c25 = d_c25store[offset];
c26 = d_c26store[offset];
c33 = d_c33store[offset];
c34 = d_c34store[offset];
c35 = d_c35store[offset];
c36 = d_c36store[offset];
c44 = d_c44store[offset];
c45 = d_c45store[offset];
c46 = d_c46store[offset];
c55 = d_c55store[offset];
c56 = d_c56store[offset];
c66 = d_c66store[offset];
sigma_xx = c11*duxdxl + c16*duxdyl_plus_duydxl + c12*duydyl +
c15*duzdxl_plus_duxdzl + c14*duzdyl_plus_duydzl + c13*duzdzl;
sigma_yy = c12*duxdxl + c26*duxdyl_plus_duydxl + c22*duydyl +
c25*duzdxl_plus_duxdzl + c24*duzdyl_plus_duydzl + c23*duzdzl;
sigma_zz = c13*duxdxl + c36*duxdyl_plus_duydxl + c23*duydyl +
c35*duzdxl_plus_duxdzl + c34*duzdyl_plus_duydzl + c33*duzdzl;
sigma_xy = c16*duxdxl + c66*duxdyl_plus_duydxl + c26*duydyl +
c56*duzdxl_plus_duxdzl + c46*duzdyl_plus_duydzl + c36*duzdzl;
sigma_xz = c15*duxdxl + c56*duxdyl_plus_duydxl + c25*duydyl +
c55*duzdxl_plus_duxdzl + c45*duzdyl_plus_duydzl + c35*duzdzl;
sigma_yz = c14*duxdxl + c46*duxdyl_plus_duydxl + c24*duydyl +
c45*duzdxl_plus_duxdzl + c44*duzdyl_plus_duydzl + c34*duzdzl;
}else{
// isotropic case
lambdalplus2mul = kappal + 1.33333333333333333333f * mul; // 4./3. = 1.3333333
lambdal = lambdalplus2mul - 2.0f * mul;
// compute the six components of the stress tensor sigma
sigma_xx = lambdalplus2mul*duxdxl + lambdal*duydyl_plus_duzdzl;
sigma_yy = lambdalplus2mul*duydyl + lambdal*duxdxl_plus_duzdzl;
sigma_zz = lambdalplus2mul*duzdzl + lambdal*duxdxl_plus_duydyl;
sigma_xy = mul*duxdyl_plus_duydxl;
sigma_xz = mul*duzdxl_plus_duxdzl;
sigma_yz = mul*duzdyl_plus_duydzl;
}
// attenuation
// subtracts memory variables if attenuation
compute_element_att_stress(tx,working_element,NSPEC,
R_xx,R_yy,R_xy,R_xz,R_yz,
&sigma_xx,&sigma_yy,&sigma_zz,&sigma_xy,&sigma_xz,&sigma_yz);
jacobianl = 1.0f / (xixl*(etayl*gammazl-etazl*gammayl)-xiyl*(etaxl*gammazl-etazl*gammaxl)+xizl*(etaxl*gammayl-etayl*gammaxl));
// define symmetric components (needed for non-symmetric dot product and sigma for gravity)
sigma_yx = sigma_xy;
sigma_zx = sigma_xz;
sigma_zy = sigma_yz;
if( gravity ){
// computes non-symmetric terms for gravity
compute_element_gravity(tx,working_element,d_ibool,d_minus_g,d_minus_deriv_gravity,
d_rhostore,wgll_cube,jacobianl,
s_dummyx_loc,s_dummyy_loc,s_dummyz_loc,
&sigma_xx,&sigma_yy,&sigma_xz,&sigma_yz,
&rho_s_H1,&rho_s_H2,&rho_s_H3);
}
// form dot product with test vector, non-symmetric form
s_tempx1[tx] = jacobianl * (sigma_xx*xixl + sigma_yx*xiyl + sigma_zx*xizl);
s_tempy1[tx] = jacobianl * (sigma_xy*xixl + sigma_yy*xiyl + sigma_zy*xizl);
s_tempz1[tx] = jacobianl * (sigma_xz*xixl + sigma_yz*xiyl + sigma_zz*xizl);
s_tempx2[tx] = jacobianl * (sigma_xx*etaxl + sigma_yx*etayl + sigma_zx*etazl);
s_tempy2[tx] = jacobianl * (sigma_xy*etaxl + sigma_yy*etayl + sigma_zy*etazl);
s_tempz2[tx] = jacobianl * (sigma_xz*etaxl + sigma_yz*etayl + sigma_zz*etazl);
s_tempx3[tx] = jacobianl * (sigma_xx*gammaxl + sigma_yx*gammayl + sigma_zx*gammazl);
s_tempy3[tx] = jacobianl * (sigma_xy*gammaxl + sigma_yy*gammayl + sigma_zy*gammazl);
s_tempz3[tx] = jacobianl * (sigma_xz*gammaxl + sigma_yz*gammayl + sigma_zz*gammazl);
}
// synchronize all the threads (one thread for each of the NGLL grid points of the
// current spectral element) because we need the whole element to be ready in order
// to be able to compute the matrix products along cut planes of the 3D element below
__syncthreads();
// JC JC here we will need to add GPU support for the new C-PML routines
if (active) {
#ifndef MANUALLY_UNROLLED_LOOPS
tempx1l = 0.f;
tempy1l = 0.f;
tempz1l = 0.f;
tempx2l = 0.f;
tempy2l = 0.f;
tempz2l = 0.f;
tempx3l = 0.f;
tempy3l = 0.f;
tempz3l = 0.f;
for (l=0;l<NGLLX;l++) {
fac1 = d_hprimewgll_xx[I*NGLLX+l];
offset = K*NGLL2+J*NGLLX+l;
tempx1l += s_tempx1[offset]*fac1;
tempy1l += s_tempy1[offset]*fac1;
tempz1l += s_tempz1[offset]*fac1;
// assumes hprimewgll_xx == hprimewgll_yy == hprimewgll_zz
fac2 = d_hprimewgll_xx[J*NGLLX+l];
offset = K*NGLL2+l*NGLLX+I;
tempx2l += s_tempx2[offset]*fac2;
tempy2l += s_tempy2[offset]*fac2;
tempz2l += s_tempz2[offset]*fac2;
fac3 = d_hprimewgll_xx[K*NGLLX+l];
offset = l*NGLL2+J*NGLLX+I;
tempx3l += s_tempx3[offset]*fac3;
tempy3l += s_tempy3[offset]*fac3;
tempz3l += s_tempz3[offset]*fac3;
}
#else
tempx1l = s_tempx1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempx1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempx1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempx1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempx1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempy1l = s_tempy1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempy1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempy1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempy1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempy1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempz1l = s_tempz1[K*NGLL2+J*NGLLX]*d_hprimewgll_xx[I*NGLLX]
+ s_tempz1[K*NGLL2+J*NGLLX+1]*d_hprimewgll_xx[I*NGLLX+1]
+ s_tempz1[K*NGLL2+J*NGLLX+2]*d_hprimewgll_xx[I*NGLLX+2]
+ s_tempz1[K*NGLL2+J*NGLLX+3]*d_hprimewgll_xx[I*NGLLX+3]
+ s_tempz1[K*NGLL2+J*NGLLX+4]*d_hprimewgll_xx[I*NGLLX+4];
tempx2l = s_tempx2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempx2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempx2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempx2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempx2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempy2l = s_tempy2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempy2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempy2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempy2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempy2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempz2l = s_tempz2[K*NGLL2+I]*d_hprimewgll_xx[J*NGLLX]
+ s_tempz2[K*NGLL2+NGLLX+I]*d_hprimewgll_xx[J*NGLLX+1]
+ s_tempz2[K*NGLL2+2*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+2]
+ s_tempz2[K*NGLL2+3*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+3]
+ s_tempz2[K*NGLL2+4*NGLLX+I]*d_hprimewgll_xx[J*NGLLX+4];
tempx3l = s_tempx3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempx3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempx3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempx3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempx3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
tempy3l = s_tempy3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempy3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempy3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempy3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempy3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
tempz3l = s_tempz3[J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX]
+ s_tempz3[NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+1]
+ s_tempz3[2*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+2]
+ s_tempz3[3*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+3]
+ s_tempz3[4*NGLL2+J*NGLLX+I]*d_hprimewgll_xx[K*NGLLX+4];
#endif
fac1 = d_wgllwgll_yz[K*NGLLX+J];
fac2 = d_wgllwgll_xz[K*NGLLX+I];
fac3 = d_wgllwgll_xy[J*NGLLX+I];
sum_terms1 = - (fac1*tempx1l + fac2*tempx2l + fac3*tempx3l);
sum_terms2 = - (fac1*tempy1l + fac2*tempy2l + fac3*tempy3l);
sum_terms3 = - (fac1*tempz1l + fac2*tempz2l + fac3*tempz3l);
// adds gravity term
if( gravity ){
sum_terms1 += rho_s_H1;
sum_terms2 += rho_s_H2;
sum_terms3 += rho_s_H3;
}
#ifdef USE_MESH_COLORING_GPU
// no atomic operation needed, colors don't share global points between elements
#ifdef USE_TEXTURES_FIELDS
d_accel[iglob*3] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3) + sum_terms1;
d_accel[iglob*3 + 1] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 1) + sum_terms2;
d_accel[iglob*3 + 2] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 2) + sum_terms3;
#else
d_accel[iglob*3] += sum_terms1;
d_accel[iglob*3 + 1] += sum_terms2;
d_accel[iglob*3 + 2] += sum_terms3;
#endif // USE_TEXTURES_FIELDS
// JC JC here we will need to add GPU support for the new C-PML routines
#else // MESH_COLORING
//mesh coloring
if( use_mesh_coloring_gpu ){
// no atomic operation needed, colors don't share global points between elements
#ifdef USE_TEXTURES_FIELDS
d_accel[iglob*3] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3) + sum_terms1;
d_accel[iglob*3 + 1] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 1) + sum_terms2;
d_accel[iglob*3 + 2] = texfetch_accel<FORWARD_OR_ADJOINT>(iglob*3 + 2) + sum_terms3;
#else
d_accel[iglob*3] += sum_terms1;
d_accel[iglob*3 + 1] += sum_terms2;
d_accel[iglob*3 + 2] += sum_terms3;
#endif // USE_TEXTURES_FIELDS
}
else {
// for testing purposes only: w/out atomic updates
//d_accel[iglob*3] -= (0.00000001f*tempx1l + 0.00000001f*tempx2l + 0.00000001f*tempx3l);
//d_accel[iglob*3 + 1] -= (0.00000001f*tempy1l + 0.00000001f*tempy2l + 0.00000001f*tempy3l);
//d_accel[iglob*3 + 2] -= (0.00000001f*tempz1l + 0.00000001f*tempz2l + 0.00000001f*tempz3l);
// w/out atomic update
//d_accel[iglob*3] += sum_terms1;
//d_accel[iglob*3 + 1] += sum_terms2;
//d_accel[iglob*3 + 2] += sum_terms3;
atomicAdd(&d_accel[iglob*3], sum_terms1);
atomicAdd(&d_accel[iglob*3+1], sum_terms2);
atomicAdd(&d_accel[iglob*3+2], sum_terms3);
} // if(use_mesh_coloring_gpu)
#endif // MESH_COLORING
// attenuation
// update memory variables based upon the Runge-Kutta scheme
compute_element_att_memory(tx,working_element,NSPEC,
d_muv,
factor_common,alphaval,betaval,gammaval,
R_xx,R_yy,R_xy,R_xz,R_yz,
epsilondev_xx,epsilondev_yy,epsilondev_xy,epsilondev_xz,epsilondev_yz,
epsilondev_xx_loc,epsilondev_yy_loc,epsilondev_xy_loc,epsilondev_xz_loc,epsilondev_yz_loc);
// save deviatoric strain for Runge-Kutta scheme
int ijk_ispec = tx + working_element*NGLL3;
// fortran: epsilondev_xx(:,:,:,ispec) = epsilondev_xx_loc(:,:,:)
epsilondev_xx[ijk_ispec] = epsilondev_xx_loc;
epsilondev_yy[ijk_ispec] = epsilondev_yy_loc;
epsilondev_xy[ijk_ispec] = epsilondev_xy_loc;
epsilondev_xz[ijk_ispec] = epsilondev_xz_loc;
epsilondev_yz[ijk_ispec] = epsilondev_yz_loc;
} // if(active)
// JC JC here we will need to add GPU support for the new C-PML routines
} // kernel_2_att_impl()
/* ----------------------------------------------------------------------------------------------- */
void Kernel_2(int nb_blocks_to_compute,Mesh* mp,int d_iphase,realw d_deltat,
int COMPUTE_AND_STORE_STRAIN,
int ATTENUATION,int ANISOTROPY,
int* d_ibool,
realw* d_xix,realw* d_xiy,realw* d_xiz,
realw* d_etax,realw* d_etay,realw* d_etaz,
realw* d_gammax,realw* d_gammay,realw* d_gammaz,
realw* d_kappav,
realw* d_muv,
realw* d_epsilondev_xx,realw* d_epsilondev_yy,realw* d_epsilondev_xy,
realw* d_epsilondev_xz,realw* d_epsilondev_yz,
realw* d_epsilon_trace_over_3,
realw* d_one_minus_sum_beta,
realw* d_factor_common,
realw* d_R_xx,realw* d_R_yy,realw* d_R_xy,
realw* d_R_xz,realw* d_R_yz,
realw* d_b_epsilondev_xx,realw* d_b_epsilondev_yy,realw* d_b_epsilondev_xy,
realw* d_b_epsilondev_xz,realw* d_b_epsilondev_yz,
realw* d_b_epsilon_trace_over_3,
realw* d_b_R_xx,realw* d_b_R_yy,realw* d_b_R_xy,
realw* d_b_R_xz,realw* d_b_R_yz,
realw* d_c11store,realw* d_c12store,realw* d_c13store,
realw* d_c14store,realw* d_c15store,realw* d_c16store,
realw* d_c22store,realw* d_c23store,realw* d_c24store,
realw* d_c25store,realw* d_c26store,realw* d_c33store,
realw* d_c34store,realw* d_c35store,realw* d_c36store,
realw* d_c44store,realw* d_c45store,realw* d_c46store,
realw* d_c55store,realw* d_c56store,realw* d_c66store,
realw* d_rhostore){
TRACE("\tKernel_2");
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("before kernel Kernel 2");
#endif
/* if the grid can handle the number of blocks, we let it be 1D */
/* grid_2_x = nb_elem_color; */
/* nb_elem_color is just how many blocks we are computing now */
int blocksize = NGLL3_PADDED;
int num_blocks_x, num_blocks_y;
get_blocks_xy(nb_blocks_to_compute,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
// Cuda timing
// cudaEvent_t start, stop;
// realw time;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord( start, 0 );
if( ATTENUATION ){
// debug
//printf("Running Kernel_2 with attenuation\n");
// compute kernels with attenuation
// forward wavefields -> FORWARD_OR_ADJOINT == 1
Kernel_2_att_impl<1><<<grid,threads,0,mp->compute_stream>>>(nb_blocks_to_compute,
mp->NGLOB_AB,
d_ibool,
mp->d_phase_ispec_inner_elastic,
mp->num_phase_ispec_elastic,
d_iphase,
mp->use_mesh_coloring_gpu,
d_deltat,
mp->d_displ,mp->d_veloc,mp->d_accel,
d_xix, d_xiy, d_xiz,
d_etax, d_etay, d_etaz,
d_gammax, d_gammay, d_gammaz,
mp->d_hprime_xx,
mp->d_hprimewgll_xx,
mp->d_wgllwgll_xy, mp->d_wgllwgll_xz, mp->d_wgllwgll_yz,
d_kappav, d_muv,
d_epsilondev_xx,d_epsilondev_yy,d_epsilondev_xy,
d_epsilondev_xz,d_epsilondev_yz,
d_epsilon_trace_over_3,
mp->simulation_type,
mp->NSPEC_AB,
d_one_minus_sum_beta,
d_factor_common,
d_R_xx,d_R_yy,d_R_xy,d_R_xz,d_R_yz,
mp->d_alphaval,mp->d_betaval,mp->d_gammaval,
ANISOTROPY,
d_c11store,d_c12store,d_c13store,
d_c14store,d_c15store,d_c16store,
d_c22store,d_c23store,d_c24store,
d_c25store,d_c26store,d_c33store,
d_c34store,d_c35store,d_c36store,
d_c44store,d_c45store,d_c46store,
d_c55store,d_c56store,d_c66store,
mp->gravity,
mp->d_minus_g,
mp->d_minus_deriv_gravity,
d_rhostore,
mp->d_wgll_cube);
if(mp->simulation_type == 3) {
// backward/reconstructed wavefields -> FORWARD_OR_ADJOINT == 3
Kernel_2_att_impl<3><<< grid,threads,0,mp->compute_stream>>>(nb_blocks_to_compute,
mp->NGLOB_AB,
d_ibool,
mp->d_phase_ispec_inner_elastic,
mp->num_phase_ispec_elastic,
d_iphase,
mp->use_mesh_coloring_gpu,
d_deltat,
mp->d_b_displ,mp->d_b_veloc,mp->d_b_accel,
d_xix, d_xiy, d_xiz,
d_etax, d_etay, d_etaz,
d_gammax, d_gammay, d_gammaz,
mp->d_hprime_xx,
mp->d_hprimewgll_xx,
mp->d_wgllwgll_xy, mp->d_wgllwgll_xz, mp->d_wgllwgll_yz,
d_kappav, d_muv,
d_b_epsilondev_xx,d_b_epsilondev_yy,d_b_epsilondev_xy,
d_b_epsilondev_xz,d_b_epsilondev_yz,
d_b_epsilon_trace_over_3,
mp->simulation_type,
mp->NSPEC_AB,
d_one_minus_sum_beta,
d_factor_common,
d_b_R_xx,d_b_R_yy,d_b_R_xy,d_b_R_xz,d_b_R_yz,
mp->d_b_alphaval,mp->d_b_betaval,mp->d_b_gammaval,
ANISOTROPY,
d_c11store,d_c12store,d_c13store,
d_c14store,d_c15store,d_c16store,
d_c22store,d_c23store,d_c24store,
d_c25store,d_c26store,d_c33store,
d_c34store,d_c35store,d_c36store,
d_c44store,d_c45store,d_c46store,
d_c55store,d_c56store,d_c66store,
mp->gravity,
mp->d_minus_g,
mp->d_minus_deriv_gravity,
d_rhostore,
mp->d_wgll_cube);
}
}else{
// debug
//printf("Running Kernel_2 without attenuation\n");
// compute kernels without attenuation
// forward wavefields -> FORWARD_OR_ADJOINT == 1
Kernel_2_noatt_impl<1><<<grid,threads,0,mp->compute_stream>>>(nb_blocks_to_compute,
mp->NGLOB_AB,
d_ibool,
mp->d_phase_ispec_inner_elastic,mp->num_phase_ispec_elastic,
d_iphase,
mp->use_mesh_coloring_gpu,
mp->d_displ,mp->d_veloc,mp->d_accel,
d_xix, d_xiy, d_xiz,
d_etax, d_etay, d_etaz,
d_gammax, d_gammay, d_gammaz,
mp->d_hprime_xx,
mp->d_hprimewgll_xx,
mp->d_wgllwgll_xy, mp->d_wgllwgll_xz, mp->d_wgllwgll_yz,
d_kappav, d_muv,
COMPUTE_AND_STORE_STRAIN,
d_epsilondev_xx,d_epsilondev_yy,d_epsilondev_xy,
d_epsilondev_xz,d_epsilondev_yz,
d_epsilon_trace_over_3,
mp->simulation_type,
mp->NSPEC_AB,
d_one_minus_sum_beta,d_factor_common,
d_R_xx,d_R_yy,d_R_xy,d_R_xz,d_R_yz,
mp->d_alphaval,mp->d_betaval,mp->d_gammaval,
ANISOTROPY,
d_c11store,d_c12store,d_c13store,
d_c14store,d_c15store,d_c16store,
d_c22store,d_c23store,d_c24store,
d_c25store,d_c26store,d_c33store,
d_c34store,d_c35store,d_c36store,
d_c44store,d_c45store,d_c46store,
d_c55store,d_c56store,d_c66store,
mp->gravity,
mp->d_minus_g,
mp->d_minus_deriv_gravity,
d_rhostore,
mp->d_wgll_cube );
// backward/reconstructed wavefield
if(mp->simulation_type == 3) {
// backward/reconstructed wavefields -> FORWARD_OR_ADJOINT == 3
Kernel_2_noatt_impl<3><<< grid,threads,0,mp->compute_stream>>>(nb_blocks_to_compute,
mp->NGLOB_AB,
d_ibool,
mp->d_phase_ispec_inner_elastic,mp->num_phase_ispec_elastic,
d_iphase,
mp->use_mesh_coloring_gpu,
mp->d_b_displ,mp->d_b_veloc,mp->d_b_accel,
d_xix, d_xiy, d_xiz,
d_etax, d_etay, d_etaz,
d_gammax, d_gammay, d_gammaz,
mp->d_hprime_xx,
mp->d_hprimewgll_xx,
mp->d_wgllwgll_xy, mp->d_wgllwgll_xz, mp->d_wgllwgll_yz,
d_kappav, d_muv,
COMPUTE_AND_STORE_STRAIN,
d_b_epsilondev_xx,d_b_epsilondev_yy,d_b_epsilondev_xy,
d_b_epsilondev_xz,d_b_epsilondev_yz,
d_b_epsilon_trace_over_3,
mp->simulation_type,
mp->NSPEC_AB,
d_one_minus_sum_beta,d_factor_common,
d_b_R_xx,d_b_R_yy,d_b_R_xy,d_b_R_xz,d_b_R_yz,
mp->d_b_alphaval,mp->d_b_betaval,mp->d_b_gammaval,
ANISOTROPY,
d_c11store,d_c12store,d_c13store,
d_c14store,d_c15store,d_c16store,
d_c22store,d_c23store,d_c24store,
d_c25store,d_c26store,d_c33store,
d_c34store,d_c35store,d_c36store,
d_c44store,d_c45store,d_c46store,
d_c55store,d_c56store,d_c66store,
mp->gravity,
mp->d_minus_g,
mp->d_minus_deriv_gravity,
d_rhostore,
mp->d_wgll_cube );
}
}
// cudaEventRecord( stop, 0 );
// cudaEventSynchronize( stop );
// cudaEventElapsedTime( &time, start, stop );
// cudaEventDestroy( start );
// cudaEventDestroy( stop );
// printf("Kernel2 Execution Time: %f ms\n",time);
// cudaThreadSynchronize(); //
// LOG("Kernel 2 finished"); //
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("Kernel_2_impl");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(compute_forces_viscoelastic_cuda,
COMPUTE_FORCES_VISCOELASTIC_CUDA)(long* Mesh_pointer,
int* iphase,
realw* deltat,
int* nspec_outer_elastic,
int* nspec_inner_elastic,
int* COMPUTE_AND_STORE_STRAIN,
int* ATTENUATION,
int* ANISOTROPY) {
TRACE("\tcompute_forces_viscoelastic_cuda");
// EPIK_TRACER("compute_forces_viscoelastic_cuda");
//printf("Running compute_forces\n");
//double start_time = get_time();
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
int num_elements;
if( *iphase == 1 )
num_elements = *nspec_outer_elastic;
else
num_elements = *nspec_inner_elastic;
// checks if anything to do
if( num_elements == 0 ) return;
// mesh coloring
if( mp->use_mesh_coloring_gpu ){
// note: array offsets require sorted arrays, such that e.g. ibool starts with elastic elements
// and followed by acoustic ones.
// elastic elements also start with outer than inner element ordering
int nb_colors,nb_blocks_to_compute;
int istart;
int offset,offset_nonpadded,offset_nonpadded_att2;
// sets up color loop
if( *iphase == 1 ){
// outer elements
nb_colors = mp->num_colors_outer_elastic;
istart = 0;
// array offsets
offset = 0;
offset_nonpadded = 0;
offset_nonpadded_att2 = 0;
}else{
// inner elements (start after outer elements)
nb_colors = mp->num_colors_outer_elastic + mp->num_colors_inner_elastic;
istart = mp->num_colors_outer_elastic;
// array offsets
offset = (*nspec_outer_elastic) * NGLL3_PADDED;
offset_nonpadded = (*nspec_outer_elastic) * NGLL3;
offset_nonpadded_att2 = (*nspec_outer_elastic) * NGLL3 * N_SLS;
}
// loops over colors
for(int icolor = istart; icolor < nb_colors; icolor++){
nb_blocks_to_compute = mp->h_num_elem_colors_elastic[icolor];
// checks
//if( nb_blocks_to_compute <= 0 ){
// printf("error number of elastic color blocks: %d -- color = %d \n",nb_blocks_to_compute,icolor);
// exit(EXIT_FAILURE);
//}
Kernel_2(nb_blocks_to_compute,mp,*iphase,*deltat,
*COMPUTE_AND_STORE_STRAIN,
*ATTENUATION,*ANISOTROPY,
mp->d_ibool + offset_nonpadded,
mp->d_xix + offset,mp->d_xiy + offset,mp->d_xiz + offset,
mp->d_etax + offset,mp->d_etay + offset,mp->d_etaz + offset,
mp->d_gammax + offset,mp->d_gammay + offset,mp->d_gammaz + offset,
mp->d_kappav + offset,
mp->d_muv + offset,
mp->d_epsilondev_xx + offset_nonpadded,mp->d_epsilondev_yy + offset_nonpadded,mp->d_epsilondev_xy + offset_nonpadded,
mp->d_epsilondev_xz + offset_nonpadded,mp->d_epsilondev_yz + offset_nonpadded,
mp->d_epsilon_trace_over_3 + offset_nonpadded,
mp->d_one_minus_sum_beta + offset_nonpadded,
mp->d_factor_common + offset_nonpadded_att2,
mp->d_R_xx + offset_nonpadded,mp->d_R_yy + offset_nonpadded,mp->d_R_xy + offset_nonpadded,
mp->d_R_xz + offset_nonpadded,mp->d_R_yz + offset_nonpadded,
mp->d_b_epsilondev_xx + offset_nonpadded,mp->d_b_epsilondev_yy + offset_nonpadded,mp->d_b_epsilondev_xy + offset_nonpadded,
mp->d_b_epsilondev_xz + offset_nonpadded,mp->d_b_epsilondev_yz + offset_nonpadded,
mp->d_b_epsilon_trace_over_3 + offset_nonpadded,
mp->d_b_R_xx + offset_nonpadded,mp->d_b_R_yy + offset_nonpadded,mp->d_b_R_xy + offset_nonpadded,
mp->d_b_R_xz + offset_nonpadded,mp->d_b_R_yz + offset_nonpadded,
mp->d_c11store + offset,mp->d_c12store + offset,mp->d_c13store + offset,
mp->d_c14store + offset,mp->d_c15store + offset,mp->d_c16store + offset,
mp->d_c22store + offset,mp->d_c23store + offset,mp->d_c24store + offset,
mp->d_c25store + offset,mp->d_c26store + offset,mp->d_c33store + offset,
mp->d_c34store + offset,mp->d_c35store + offset,mp->d_c36store + offset,
mp->d_c44store + offset,mp->d_c45store + offset,mp->d_c46store + offset,
mp->d_c55store + offset,mp->d_c56store + offset,mp->d_c66store + offset,
mp->d_rhostore + offset);
// for padded and aligned arrays
offset += nb_blocks_to_compute * NGLL3_PADDED;
// for no-aligned arrays
offset_nonpadded += nb_blocks_to_compute * NGLL3;
// for factor_common array
offset_nonpadded_att2 += nb_blocks_to_compute * NGLL3 * N_SLS;
//note: we use the same stream, so kernels are executed one after the other
// thus, there should be no need to synchronize in case we run on only 1 process to avoid race-conditions
}
}else{
// no mesh coloring: uses atomic updates
Kernel_2(num_elements,mp,*iphase,*deltat,
*COMPUTE_AND_STORE_STRAIN,
*ATTENUATION,*ANISOTROPY,
mp->d_ibool,
mp->d_xix,mp->d_xiy,mp->d_xiz,
mp->d_etax,mp->d_etay,mp->d_etaz,
mp->d_gammax,mp->d_gammay,mp->d_gammaz,
mp->d_kappav,
mp->d_muv,
mp->d_epsilondev_xx,mp->d_epsilondev_yy,mp->d_epsilondev_xy,
mp->d_epsilondev_xz,mp->d_epsilondev_yz,
mp->d_epsilon_trace_over_3,
mp->d_one_minus_sum_beta,
mp->d_factor_common,
mp->d_R_xx,mp->d_R_yy,mp->d_R_xy,
mp->d_R_xz,mp->d_R_yz,
mp->d_b_epsilondev_xx,mp->d_b_epsilondev_yy,mp->d_b_epsilondev_xy,
mp->d_b_epsilondev_xz,mp->d_b_epsilondev_yz,
mp->d_b_epsilon_trace_over_3,
mp->d_b_R_xx,mp->d_b_R_yy,mp->d_b_R_xy,
mp->d_b_R_xz,mp->d_b_R_yz,
mp->d_c11store,mp->d_c12store,mp->d_c13store,
mp->d_c14store,mp->d_c15store,mp->d_c16store,
mp->d_c22store,mp->d_c23store,mp->d_c24store,
mp->d_c25store,mp->d_c26store,mp->d_c33store,
mp->d_c34store,mp->d_c35store,mp->d_c36store,
mp->d_c44store,mp->d_c45store,mp->d_c46store,
mp->d_c55store,mp->d_c56store,mp->d_c66store,
mp->d_rhostore);
}
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(sync_copy_from_device,
SYNC_copy_FROM_DEVICE)(long* Mesh_pointer,
int* iphase,
realw* send_buffer) {
TRACE("sync_copy_from_device");
Mesh* mp = (Mesh*)(*Mesh_pointer); // get Mesh from fortran integer wrapper
// Wait until async-memcpy of outer elements is finished and start MPI.
if( *iphase != 2 ){ exit_on_cuda_error("sync_copy_from_device must be called for iphase == 2"); }
if( mp->size_mpi_buffer > 0 ){
// waits for asynchronous copy to finish
cudaStreamSynchronize(mp->copy_stream);
// There have been problems using the pinned-memory with MPI, so
// we copy the buffer into a non-pinned region.
memcpy(send_buffer,mp->h_send_accel_buffer,mp->size_mpi_buffer*sizeof(float));
}
// memory copy is now finished, so non-blocking MPI send can proceed
}
|
7b64d7c5ae2b1ae79e4f71de7f0b83b44e437a6f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "headers.h"
/**
* Host main routine
*/
int main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
printf("Doing operations on matrix of size %d x %d with mask of size %d x %d\n", width, width, mask_width, mask_width);
size_t mat_size = width*width*sizeof(float);
size_t mask_size = mask_width*mask_width*sizeof(float);
float *h_A = (float*)malloc(mat_size);
// Verify that allocations succeeded
if (h_A == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < width; ++i)
{
for(int j=0; j<width; j++)
h_A[i*width+j] = rand()/(float)RAND_MAX;
}
printf("MATRIX:\n");
for (int i = 0; i < width; ++i)
{
for(int j=0; j<width; j++)
printf("%f ",h_A[i*width+j]);
printf("\n");
}
float *h_B = (float*)malloc(mask_size);
// Verify that allocations succeeded
if (h_B == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < mask_width; ++i)
{
for(int j=0; j<mask_width; j++)
h_B[i*mask_width+j] = rand()/(float)RAND_MAX;
}
printf("MASK:\n");
for (int i = 0; i <mask_width; ++i)
{
for(int j=0; j<mask_width; j++)
printf("%f ",h_B[i*mask_width+j]);
printf("\n");
}
float *h_C = (float*)malloc(mat_size);
// Verify that allocations succeeded
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
//*************************************************************************************************
// Allocate the device input vector A
// Every function with a "cuda" prefix has a error code returned which can be used to track error
float *d_A = NULL;
err = hipMalloc((void **)&d_A, mat_size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector A
// Every function with a "cuda" prefix has a error code returned which can be used to track error
float *d_B = NULL;
err = hipMalloc((void **)&d_B, mask_size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_C = NULL;
err = hipMalloc((void **)&d_C, mat_size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
///////////////////////////////////// Operation 1 //////////////////////////////////////////////
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
err = hipMemcpy(d_A, h_A, mat_size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, mask_size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_C, h_C, mat_size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Process Kernel 1
dim3 grid(2,2,1);
dim3 block(4,4,1);
hipLaunchKernelGGL(( conv), dim3(grid),dim3(block), 0, 0, d_A,d_B,d_C, mask_width, width, TILE_SIZE);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Operation completed");
// Copy the device result vector in device memory to the host result vector
// in host memory.
err = hipMemcpy(h_C, d_C, width*width*sizeof(float), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("\n Convolved Matrix\n");
for (int i = 0; i < width; ++i)
{
for(int j=0; j<width; j++)
printf("%f ",h_C[i*width+j]);
printf("\n");
}
printf("%f\n", h_A[0*width+1+3]*h_B[1*mask_width+0] + h_A[0*width+2+3]*h_B[1*mask_width+1] + h_A[1*width+3+3]*h_B[1*mask_width+2] + h_A[1*width+1+3]*h_B[2*mask_width+0]+ h_A[1*width+2+3]*h_B[2*mask_width+1]+ h_A[1*width+3+3]*h_B[2*mask_width+2]);
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
// Reset the device and exit
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
| 7b64d7c5ae2b1ae79e4f71de7f0b83b44e437a6f.cu | #include "headers.h"
/**
* Host main routine
*/
int main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
printf("Doing operations on matrix of size %d x %d with mask of size %d x %d\n", width, width, mask_width, mask_width);
size_t mat_size = width*width*sizeof(float);
size_t mask_size = mask_width*mask_width*sizeof(float);
float *h_A = (float*)malloc(mat_size);
// Verify that allocations succeeded
if (h_A == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < width; ++i)
{
for(int j=0; j<width; j++)
h_A[i*width+j] = rand()/(float)RAND_MAX;
}
printf("MATRIX:\n");
for (int i = 0; i < width; ++i)
{
for(int j=0; j<width; j++)
printf("%f ",h_A[i*width+j]);
printf("\n");
}
float *h_B = (float*)malloc(mask_size);
// Verify that allocations succeeded
if (h_B == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < mask_width; ++i)
{
for(int j=0; j<mask_width; j++)
h_B[i*mask_width+j] = rand()/(float)RAND_MAX;
}
printf("MASK:\n");
for (int i = 0; i <mask_width; ++i)
{
for(int j=0; j<mask_width; j++)
printf("%f ",h_B[i*mask_width+j]);
printf("\n");
}
float *h_C = (float*)malloc(mat_size);
// Verify that allocations succeeded
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
//*************************************************************************************************
// Allocate the device input vector A
// Every function with a "cuda" prefix has a error code returned which can be used to track error
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, mat_size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector A
// Every function with a "cuda" prefix has a error code returned which can be used to track error
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, mask_size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, mat_size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
///////////////////////////////////// Operation 1 //////////////////////////////////////////////
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
err = cudaMemcpy(d_A, h_A, mat_size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, mask_size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_C, h_C, mat_size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Process Kernel 1
dim3 grid(2,2,1);
dim3 block(4,4,1);
conv<<<grid,block>>>(d_A,d_B,d_C, mask_width, width, TILE_SIZE);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Operation completed");
// Copy the device result vector in device memory to the host result vector
// in host memory.
err = cudaMemcpy(h_C, d_C, width*width*sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("\n Convolved Matrix\n");
for (int i = 0; i < width; ++i)
{
for(int j=0; j<width; j++)
printf("%f ",h_C[i*width+j]);
printf("\n");
}
printf("%f\n", h_A[0*width+1+3]*h_B[1*mask_width+0] + h_A[0*width+2+3]*h_B[1*mask_width+1] + h_A[1*width+3+3]*h_B[1*mask_width+2] + h_A[1*width+1+3]*h_B[2*mask_width+0]+ h_A[1*width+2+3]*h_B[2*mask_width+1]+ h_A[1*width+3+3]*h_B[2*mask_width+2]);
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
00537af6b3859f2d8af3b942a9e8dc110e2f1e7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 06.02.2019
// @author [email protected]
//
#include <helpers/PointersManager.h>
#include <exceptions/cuda_exception.h>
#include <helpers/StringUtils.h>
#include <helpers/logger.h>
#include <memory/Workspace.h>
namespace sd {
//////////////////////////////////////////////////////////////////////////
PointersManager::PointersManager(const sd::LaunchContext* context, const std::string& funcName) {
_context = const_cast<sd::LaunchContext*>(context);
_funcName = funcName;
}
//////////////////////////////////////////////////////////////////////////
void* PointersManager::replicatePointer(const void* src, const size_t numberOfBytes) {
void* dst = nullptr;
if (_context->getWorkspace() == nullptr) {
hipError_t cudaResult = hipMalloc(reinterpret_cast<void **>(&dst), numberOfBytes);
if (cudaResult != 0)
throw cuda_exception::build(_funcName + ": cannot allocate global memory on device!", cudaResult);
} else {
dst = _context->getWorkspace()->allocateBytes(sd::memory::MemoryType::DEVICE, numberOfBytes);
}
if (_context != nullptr)
hipMemcpyAsync(dst, src, numberOfBytes, hipMemcpyHostToDevice, *_context->getCudaStream());
else
hipMemcpy(dst, src, numberOfBytes, hipMemcpyHostToDevice);
_pOnGlobMem.emplace_back(dst);
return dst;
}
//////////////////////////////////////////////////////////////////////////
void PointersManager::synchronize() const {
if (_context != nullptr) {
hipError_t cudaResult = hipStreamSynchronize(*_context->getCudaStream());
if (cudaResult != 0)
throw cuda_exception::build(_funcName + ": cuda stream synchronization failed !", cudaResult);
} else {
nd4j_printf("<%s> syncStream isn't possible: no stream set!", _funcName.c_str());
}
}
//////////////////////////////////////////////////////////////////////////
PointersManager::~PointersManager() {
for (auto& p :_pOnGlobMem)
hipFree(p);
}
////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void printDevContentOnDev_(const void* pDev, const Nd4jLong len, const int tid) {
PointersManager::printDevContentOnDev<T>(pDev, len, tid);
}
////////////////////////////////////////////////////////////////////////
template<typename T>
void PointersManager::printDevContentOnDevFromHost(const void* pDev, const Nd4jLong len, const int tid) {
hipLaunchKernelGGL(( printDevContentOnDev_<T>), dim3(512), dim3(512), 1024, *sd::LaunchContext ::defaultContext()->getCudaStream(), pDev, len, tid);
auto res = hipStreamSynchronize(*sd::LaunchContext ::defaultContext()->getCudaStream());
if (res != 0)
throw std::runtime_error("PointersManager::printDevContentOnDevFromHost: hipStreamSynchronize failed!");
}
template void PointersManager::printDevContentOnDevFromHost<Nd4jLong>(const void* pDev, const Nd4jLong len, const int tid);
template void PointersManager::printDevContentOnDevFromHost<int>(const void* pDev, const Nd4jLong len, const int tid);
template void PointersManager::printDevContentOnDevFromHost<float>(const void* pDev, const Nd4jLong len, const int tid);
template void PointersManager::printDevContentOnDevFromHost<double>(const void* pDev, const Nd4jLong len, const int tid);
//BUILD_SINGLE_TEMPLATE(template void PointersManager::printDevContentOnDevFromHost, (void* pDev, Nd4jLong len, int tid), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
template<typename T>
void PointersManager::printDevContentOnHost(const void* pDev, const Nd4jLong len) const {
printf("host print out\n");
void* pHost = operator new(sizeof(T) * len);
hipMemcpyAsync(pHost, pDev, sizeof(T) * len, hipMemcpyDeviceToHost, *_context->getCudaStream());
hipError_t cudaResult = hipStreamSynchronize(*_context->getCudaStream());
if(cudaResult != 0)
throw std::runtime_error("PointersManager::printCudaHost: hipStreamSynchronize failed!");
for(Nd4jLong i = 0; i < len; ++i)
printf("%f, ", (double)reinterpret_cast<T*>(pHost)[i]);
printf("\n");
operator delete(pHost);
}
template void PointersManager::printDevContentOnHost<Nd4jLong>(const void* pDev, const Nd4jLong len) const;
template void PointersManager::printDevContentOnHost<int>(const void* pDev, const Nd4jLong len) const;
template void PointersManager::printDevContentOnHost<float>(const void* pDev, const Nd4jLong len) const;
template void PointersManager::printDevContentOnHost<double>(const void* pDev, const Nd4jLong len) const;
}
| 00537af6b3859f2d8af3b942a9e8dc110e2f1e7c.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 06.02.2019
// @author [email protected]
//
#include <helpers/PointersManager.h>
#include <exceptions/cuda_exception.h>
#include <helpers/StringUtils.h>
#include <helpers/logger.h>
#include <memory/Workspace.h>
namespace sd {
//////////////////////////////////////////////////////////////////////////
PointersManager::PointersManager(const sd::LaunchContext* context, const std::string& funcName) {
_context = const_cast<sd::LaunchContext*>(context);
_funcName = funcName;
}
//////////////////////////////////////////////////////////////////////////
void* PointersManager::replicatePointer(const void* src, const size_t numberOfBytes) {
void* dst = nullptr;
if (_context->getWorkspace() == nullptr) {
cudaError_t cudaResult = cudaMalloc(reinterpret_cast<void **>(&dst), numberOfBytes);
if (cudaResult != 0)
throw cuda_exception::build(_funcName + ": cannot allocate global memory on device!", cudaResult);
} else {
dst = _context->getWorkspace()->allocateBytes(sd::memory::MemoryType::DEVICE, numberOfBytes);
}
if (_context != nullptr)
cudaMemcpyAsync(dst, src, numberOfBytes, cudaMemcpyHostToDevice, *_context->getCudaStream());
else
cudaMemcpy(dst, src, numberOfBytes, cudaMemcpyHostToDevice);
_pOnGlobMem.emplace_back(dst);
return dst;
}
//////////////////////////////////////////////////////////////////////////
void PointersManager::synchronize() const {
if (_context != nullptr) {
cudaError_t cudaResult = cudaStreamSynchronize(*_context->getCudaStream());
if (cudaResult != 0)
throw cuda_exception::build(_funcName + ": cuda stream synchronization failed !", cudaResult);
} else {
nd4j_printf("<%s> syncStream isn't possible: no stream set!", _funcName.c_str());
}
}
//////////////////////////////////////////////////////////////////////////
PointersManager::~PointersManager() {
for (auto& p :_pOnGlobMem)
cudaFree(p);
}
////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void printDevContentOnDev_(const void* pDev, const Nd4jLong len, const int tid) {
PointersManager::printDevContentOnDev<T>(pDev, len, tid);
}
////////////////////////////////////////////////////////////////////////
template<typename T>
void PointersManager::printDevContentOnDevFromHost(const void* pDev, const Nd4jLong len, const int tid) {
printDevContentOnDev_<T><<<512, 512, 1024, *sd::LaunchContext ::defaultContext()->getCudaStream()>>>(pDev, len, tid);
auto res = cudaStreamSynchronize(*sd::LaunchContext ::defaultContext()->getCudaStream());
if (res != 0)
throw std::runtime_error("PointersManager::printDevContentOnDevFromHost: cudaStreamSynchronize failed!");
}
template void PointersManager::printDevContentOnDevFromHost<Nd4jLong>(const void* pDev, const Nd4jLong len, const int tid);
template void PointersManager::printDevContentOnDevFromHost<int>(const void* pDev, const Nd4jLong len, const int tid);
template void PointersManager::printDevContentOnDevFromHost<float>(const void* pDev, const Nd4jLong len, const int tid);
template void PointersManager::printDevContentOnDevFromHost<double>(const void* pDev, const Nd4jLong len, const int tid);
//BUILD_SINGLE_TEMPLATE(template void PointersManager::printDevContentOnDevFromHost, (void* pDev, Nd4jLong len, int tid), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////
template<typename T>
void PointersManager::printDevContentOnHost(const void* pDev, const Nd4jLong len) const {
printf("host print out\n");
void* pHost = operator new(sizeof(T) * len);
cudaMemcpyAsync(pHost, pDev, sizeof(T) * len, cudaMemcpyDeviceToHost, *_context->getCudaStream());
cudaError_t cudaResult = cudaStreamSynchronize(*_context->getCudaStream());
if(cudaResult != 0)
throw std::runtime_error("PointersManager::printCudaHost: cudaStreamSynchronize failed!");
for(Nd4jLong i = 0; i < len; ++i)
printf("%f, ", (double)reinterpret_cast<T*>(pHost)[i]);
printf("\n");
operator delete(pHost);
}
template void PointersManager::printDevContentOnHost<Nd4jLong>(const void* pDev, const Nd4jLong len) const;
template void PointersManager::printDevContentOnHost<int>(const void* pDev, const Nd4jLong len) const;
template void PointersManager::printDevContentOnHost<float>(const void* pDev, const Nd4jLong len) const;
template void PointersManager::printDevContentOnHost<double>(const void* pDev, const Nd4jLong len) const;
}
|
fa83628219025b9f1951d7500c8a926f936c4e0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
//#include"formats.h"
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err ){
fprintf(stderr, "ERROR[CUDA]:%s{%s}.\n", msg, hipGetErrorString( err ) );
exit(EXIT_FAILURE);
}
}
//Copy RGB data from shared memory region..
inline void copy_shmrgb_to_device(unsigned char*rgbs,
unsigned char*devmem,//already allocated throuth cuMemAlloc()
int rgbleft,int rgbtop,
int rgbwidth,int rgbheight,
int width,int height)
{
int offset=(rgbtop*width)<<2;
int offset_left=rgbleft<<2;
int line_siz=width<<2;
int h=0;
for(h=rgbtop;h<rgbheight+rgbtop;h++){
hipMemcpy(devmem+offset+offset_left,rgbs+offset+offset_left,rgbwidth<<2,hipMemcpyHostToDevice);
offset+=line_siz;
}
}
//for TEST ONLY,
inline void copy_caprgb_to_device(unsigned char*rgbs,
unsigned char*devmem,//already allocated throuth cuMemAlloc()
int patch_left,int patch_top,
int patch_width,int patch_height,
int width,int height)
{
int rgb_offset=0;
int offset=(patch_top*width)<<2;
int offset_left=patch_left<<2;
int line_siz=width<<2;
int h;
for(h=0;h<patch_height;h++){
hipMemcpy(devmem+offset+offset_left,rgbs+rgb_offset,patch_width<<2,hipMemcpyHostToDevice);
offset+=line_siz;
rgb_offset+=(patch_width<<2);
}
}
__global__ void
convert_line_rgb_to_nv12(unsigned char*devrgb,int rgbstride,/*device mem*/
unsigned char*oyuv,int ostride,int ovstride,/*device mem*/
int width,int left,int top)
{
int curline=threadIdx.x;
unsigned char*rgb_p=devrgb+(curline+top)*rgbstride*4;
unsigned char*luma_p=oyuv+(curline+top)*ostride;
unsigned char*chroma_p=oyuv+(ovstride*ostride)+((curline+top)>>1)*ostride;
int r,g,b;
int y,u,v;
int j;
if(curline%2==0){
//even line
for(j=left;j<width+left;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
if(j%2==0){
u= -0.169*r - 0.331*g + 0.5*b+128;
*(chroma_p+j)=(char)u&0xff;
}
}
}else{
//odd line
for(j=left;j<width+left;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
if(j%2==0){
v= 0.5*r - 0.419*g - 0.081*b+128;
*(chroma_p+j+1)=(char)v&0xff;
}
}
}
}
//FIXME
__global__ void
convert_line_yv12_to_nv12(unsigned char*pdev,int istride,
unsigned char*oyuv,int ostride,
int width,int height)
{
int curline=threadIdx.x;
int yv12_luma_siz = istride*height;
int yv12_chroma_siz = yv12_luma_siz>>2;
int curpos=curline*istride;
unsigned char*yv12_luma_p=pdev+curpos;
unsigned char*yv12_v_p=pdev+yv12_luma_siz+(curpos>>1);
unsigned char*yv12_u_p=pdev+yv12_luma_siz+yv12_chroma_siz+(curpos>>1);
curpos=curline*ostride;
unsigned char*nv12_luma_p=oyuv+curpos;
unsigned char*nv12_chroma_p=oyuv+(height*ostride)+(curpos>>1);
char val;
int j;
for(j=0;j<width;j++){
val=*(yv12_luma_p+j);
*(nv12_luma_p+j)=val;
val=*(yv12_u_p+j);
*(nv12_chroma_p)=val;
val=*(yv12_v_p+j);
*(nv12_chroma_p+1)=val;
}
}
extern "C" void load_rgb_bgrx_cuda(
unsigned char* oyuv,/*device*/
unsigned char* devrgb,/*device */
unsigned char*rgb, /*input data host*/
int left,int top,int width,int height,//rgb patch rect
int rgbwidth,int rgbheight,//rgb data size
int ostride //yuv data height<pixel>
)
{
//Copy date from shared Memory to Device;
#if 1
// Read rects from shm region.
copy_shmrgb_to_device((unsigned char*)rgb,
(unsigned char*)devrgb,//already allocated throuth cuMemAlloc()
left,top,
width,height,
rgbwidth,rgbheight);
#else
//for TEST :read rects from capture file.
copy_caprgb_to_device((unsigned char*)rgb,
(unsigned char*)devrgb,//already allocated throuth cuMemAlloc()
left,top,
width,height,
rgbwidth,rgbheight);
#endif
int ovstride=rgbheight;
// fprintf(stderr,"rgbwidth:%d ostride:%d ovstride:%d, width:%d, left:%d, top:%d\n",rgbwidth,ostride,ovstride,width,left,top);
hipLaunchKernelGGL(( convert_line_rgb_to_nv12), dim3(1),dim3(height), 0, 0, devrgb,rgbwidth,
oyuv,ostride,ovstride,
width,left,top);
hipDeviceSynchronize();
checkCUDAError("Convert BGRA to NV12\n");
}
extern "C" void load_yuv_yv12_cuda(
unsigned char* oyuv,/*device*/
unsigned char* devyv12,/*device */
unsigned char*iyuv, /*input data host*/
int width,int height,/*real size*/
int istride,int ostride
)
{
// Load yv12 to device buffer
//TODO
int in_luma_siz=istride*height;
int out_luma_siz=ostride*height;
int in_chroma_siz=in_luma_siz>>2;
int out_chroma_siz=out_luma_siz>>2;
unsigned char*in_luma_p=iyuv;
unsigned char*out_luma_p=devyv12;
unsigned char*in_v_p=iyuv+in_luma_siz;
unsigned char*out_v_p=devyv12+out_luma_siz;
unsigned char*in_u_p=iyuv+in_luma_siz+in_chroma_siz;
unsigned char*out_u_p=devyv12+out_luma_siz+out_chroma_siz;
int j;
for(j=0;j<height;j++){
//y
memcpy(out_luma_p+j*ostride,in_luma_p+j*istride,width);
}
for(j=0;j<(height>>1);j++){
//v
memcpy(out_v_p+((j*ostride)>>1),in_v_p+((j*istride)>>1),width>>1);
//u
memcpy(out_u_p+((j*ostride)>>1),in_u_p+((j*istride)>>1),width>>1);
}
// fprintf(stderr,"rgbwidth:%d ostride:%d ovstride:%d, width:%d, left:%d, top:%d\n",rgbwidth,ostride,ovstride,width,left,top);
hipLaunchKernelGGL(( convert_line_yv12_to_nv12), dim3(1),dim3(height), 0, 0, devyv12,istride,
oyuv,ostride,
width,height);
hipDeviceSynchronize();
checkCUDAError("Convert YV12 to NV12\n");
}
/***************************************************/
/***************************************************/
/***************************************************/
/***************************************************/
extern"C"{
inline void rgb2yuv_pixel(
unsigned char r,
unsigned char g,
unsigned char b,
unsigned char*y,
unsigned char*u,
unsigned char*v
){
#if 0
//YCbCr
*y=(0.257*r)+(0.504*g)+(0.098*b)+16;
*u=-(0.148 * r) - (0.291 * g) + (0.439 * b) + 128;
*v=(0.439*r)-(0.368*g)+(0.071*b)+128;
#else
//YUV Intel IPPBT.709
*y= 0.299*r + 0.587*g + 0.114*b;
*u= -0.169*r - 0.331*g + 0.5*b+128;
*v= 0.5*r - 0.419*g - 0.081*b+128;
#endif
}
/*For Test*/
void load_rgb_bgrx_(unsigned char*yuv,unsigned char*rgb,
int left,int top,int width,int height,//patch rectangle
int rgbheight,
int ostride)
{
//assert left top width height are even;
//
int luma_off=ostride*rgbheight;
unsigned char*luma_p;
unsigned char*chroma_p;
unsigned char*rgb_p;
int r,g,b;
int y,u,v;
// fprintf(stderr,"LOAD {x:%d, y:%d, w:%d, h:%d, ww:%d, hh:%d }\n",left,top,width,height,stride,vstride);
int i,j;
for(i=top;i<height+top;i++){
//rows
rgb_p=rgb+width*(i-top)*4;
luma_p=yuv+ostride*i;
chroma_p=yuv+luma_off+ostride*(i/2);
for(j=left;j<width+left;j++){
b=*(rgb_p+(j-left)*4);
g=*(rgb_p+(j-left)*4+1);
r=*(rgb_p+(j-left)*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
if(i%2==0 && j%2==0){
u= -0.169*r - 0.331*g + 0.5*b+128;
*(chroma_p+j)=(char)u&0xff;
}
if(i%2==1 && j%2==0){
v= 0.5*r - 0.419*g - 0.081*b+128;
*(chroma_p+j+1)=(char)v&0xff;
}
}
}
}
void load_rgb_bgrx_2(unsigned char*yuv,unsigned char*rgb,
int left,int top,int width,int height,//patch rectangle
int rgbheight,
int ostride)
{
//assert left top width height are even;
//
int luma_off=ostride*rgbheight;
unsigned char*luma_p0,*luma_p1;
unsigned char*chroma_p;
unsigned char*rgb_p0,*rgb_p1;
int au;//(u1+u2+u3+u4)/4
int av;//
unsigned char r,g,b;
unsigned char y,u,v;
// fprintf(stderr,"LOAD {x:%d, y:%d, w:%d, h:%d }\n",left,top,width,height);//,stride,vstride);
int i,j;
for(i=top;i<height+top;i+=2){
//rows
rgb_p0=rgb+width*(i-top)*4;
rgb_p1=rgb+width*(i-top+1)*4;
luma_p0=yuv+ostride*i;
luma_p1=yuv+ostride*(i+1);
chroma_p=yuv+luma_off+ostride*(i/2);
for(j=left;j<width+left;j++){
b=*(rgb_p0+(j-left)*4);
g=*(rgb_p0+(j-left)*4+1);
r=*(rgb_p0+(j-left)*4+2);
rgb2yuv_pixel(r,g,b,&y,&u,&v);
*(luma_p0+j)=(char)y&0xff;
au+=u;
av+=v;
///////////
b=*(rgb_p1+(j-left)*4);
g=*(rgb_p1+(j-left)*4+1);
r=*(rgb_p1+(j-left)*4+2);
rgb2yuv_pixel(r,g,b,&y,&u,&v);
*(luma_p1+j)=(char)y&0xff;
au+=u;
av+=v;
if(j%2==0){
*(chroma_p+j)=(au>>2)&0xff;
*(chroma_p+j+1)=(av>>2)&0xff;
av=au=0;
}
}
}
}
/*
void load_rgb_bgrx(unsigned char*yuv,unsigned char*rgb,
int left,int top,int width,int height,//patch rectangle
int rgbheight,
int ostride)
*/
void load_rgb_bgrx(
unsigned char*bgrx,
unsigned char*nv12,
int pleft,int ptop,int pwidth,int pheight,//rgb patch rect
int width,int height,//rgb data size
int sstride,
int dstride //yuv data stride<pixel>
)
{
//assert left top width height are even;
//
if (sstride == 0)
sstride = width;
if (dstride == 0)
dstride = width;
int luma_off=dstride*height;
unsigned char*luma_p0,*luma_p1;
unsigned char*chroma_p;
unsigned char*rgb_p0,*rgb_p1;
int au;//(u1+u2+u3+u4)/4
int av;//
unsigned char r,g,b;
unsigned char y,u,v;
// fprintf(stderr,"LOAD {x:%d, y:%d, w:%d, h:%d, ww:%d, hh:%d }\n",left,top,width,height,stride,vstride);
int i,j;
for(i=ptop;i<pheight+ptop;i+=2){
//rows
rgb_p0=bgrx+sstride*(i)*4;
rgb_p1=bgrx+sstride*(i+1)*4;
luma_p0=nv12+dstride*i;
luma_p1=nv12+dstride*(i+1);
chroma_p=nv12+luma_off+dstride*(i/2);
for(j=pleft;j<pwidth+pleft;j++){
b=*(rgb_p0+j*4);
g=*(rgb_p0+j*4+1);
r=*(rgb_p0+j*4+2);
rgb2yuv_pixel(r,g,b,&y,&u,&v);
*(luma_p0+j)=(char)y&0xff;
au=u;
// av=v;
///////////
b=*(rgb_p1+j*4);
g=*(rgb_p1+j*4+1);
r=*(rgb_p1+j*4+2);
rgb2yuv_pixel(r,g,b,&y,&u,&v);
*(luma_p1+j)=(char)y&0xff;
// au+=u;
av=v;
if(j%2==0){
*(chroma_p+j)=au&0xff;
*(chroma_p+j+1)=av&0xff;
// av=au=0;
}
}
}
}
#if 0
void load_rgb_bgrx__(
unsigned char*bgrx,
unsigned char*nv12,
int pleft,int ptop,int pwidth,int pheight,//rgb patch rect
int width,int height,//rgb data size
int sstride,
int dstride //yuv data stride<pixel>
)
{
unsigned char*luma_p=nv12;
unsigned char*chroma_p;
unsigned char*rgb_p=bgrx;
if (sstride == 0)
sstride = width;
if (dstride == 0)
dstride = width;
chroma_p=luma_p+dstride*height;
unsigned char b,g,r;
unsigned char y,u,v;
int i,j;
for(i=ptop;i<pheight;i+=2){//vertical
//==============
rgb_p=bgrx+i*sstride*4;
luma_p=nv12+dstride*i;
chroma_p=nv12+dstride+height+dstride*(i/2);
for(j=pleft;j<pwidth+pleft;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
// if(j%2==0){
u= -0.169*r - 0.331*g + 0.5*b+128;
*(chroma_p+j)=(char)u&0xff;
// }
}
//odd line
rgb_p+=sstride*4;
luma_p+=dstride;
for(j=pleft;j<pwidth+pleft;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
// if(j%2==0){
v= 0.5*r - 0.419*g - 0.081*b+128;
*(chroma_p+j+1)=(char)v&0xff;
// }
}
// }
}
}
#endif
void load_yuv_yv12(unsigned char*yv12,unsigned char*nv12,int width,int height,int sstride,int dstride)
{
unsigned char*nv12_luma=nv12;
unsigned char*nv12_chroma;
unsigned char*yv12_luma=yv12;
unsigned char*yv12_v;
unsigned char*yv12_u;
if (sstride == 0)
sstride = width;
if (dstride == 0)
dstride = width;
nv12_chroma=nv12_luma+dstride*height;
yv12_v=yv12_luma+sstride*height;
yv12_u=yv12_v+sstride*height/4;
int y;
int x;
for (y = 0 ; y < height ; y++){
memcpy(nv12_luma + (dstride*y), yv12_luma + (sstride*y) , width);
}
for (y = 0 ; y < height/2 ; y++){
for (x= 0 ; x < width; x=x+2){
nv12_chroma[(y*dstride) + x] = yv12_v[((sstride/2)*y) + (x >>1)];
nv12_chroma[(y*dstride) +(x+1)] = yv12_u[((sstride/2)*y) + (x >>1)];
}
}
}
void load_yuv_nv12(unsigned char*inyuv, unsigned char*outyuv,int width,int height,int istride,int ostride)
{
if(istride==0)
istride=width;
if(ostride==0)
ostride=width;
unsigned char*inyuv_chroma=inyuv+width*istride;
unsigned char*outyuv_chroma=outyuv+width*ostride;
int y;
for(y=0;y<height;y++){
memcpy(outyuv+y*ostride,inyuv+y*istride,width);
}
for(y=0;y<height/2;y++){
memcpy(outyuv_chroma+y*ostride/2,inyuv_chroma+y*istride/2,width/2);
}
}
}//extern "C"
| fa83628219025b9f1951d7500c8a926f936c4e0d.cu |
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
//#include"formats.h"
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err ){
fprintf(stderr, "ERROR[CUDA]:%s{%s}.\n", msg, cudaGetErrorString( err ) );
exit(EXIT_FAILURE);
}
}
//Copy RGB data from shared memory region..
inline void copy_shmrgb_to_device(unsigned char*rgbs,
unsigned char*devmem,//already allocated throuth cuMemAlloc()
int rgbleft,int rgbtop,
int rgbwidth,int rgbheight,
int width,int height)
{
int offset=(rgbtop*width)<<2;
int offset_left=rgbleft<<2;
int line_siz=width<<2;
int h=0;
for(h=rgbtop;h<rgbheight+rgbtop;h++){
cudaMemcpy(devmem+offset+offset_left,rgbs+offset+offset_left,rgbwidth<<2,cudaMemcpyHostToDevice);
offset+=line_siz;
}
}
//for TEST ONLY,
inline void copy_caprgb_to_device(unsigned char*rgbs,
unsigned char*devmem,//already allocated throuth cuMemAlloc()
int patch_left,int patch_top,
int patch_width,int patch_height,
int width,int height)
{
int rgb_offset=0;
int offset=(patch_top*width)<<2;
int offset_left=patch_left<<2;
int line_siz=width<<2;
int h;
for(h=0;h<patch_height;h++){
cudaMemcpy(devmem+offset+offset_left,rgbs+rgb_offset,patch_width<<2,cudaMemcpyHostToDevice);
offset+=line_siz;
rgb_offset+=(patch_width<<2);
}
}
__global__ void
convert_line_rgb_to_nv12(unsigned char*devrgb,int rgbstride,/*device mem*/
unsigned char*oyuv,int ostride,int ovstride,/*device mem*/
int width,int left,int top)
{
int curline=threadIdx.x;
unsigned char*rgb_p=devrgb+(curline+top)*rgbstride*4;
unsigned char*luma_p=oyuv+(curline+top)*ostride;
unsigned char*chroma_p=oyuv+(ovstride*ostride)+((curline+top)>>1)*ostride;
int r,g,b;
int y,u,v;
int j;
if(curline%2==0){
//even line
for(j=left;j<width+left;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
if(j%2==0){
u= -0.169*r - 0.331*g + 0.5*b+128;
*(chroma_p+j)=(char)u&0xff;
}
}
}else{
//odd line
for(j=left;j<width+left;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
if(j%2==0){
v= 0.5*r - 0.419*g - 0.081*b+128;
*(chroma_p+j+1)=(char)v&0xff;
}
}
}
}
//FIXME
__global__ void
convert_line_yv12_to_nv12(unsigned char*pdev,int istride,
unsigned char*oyuv,int ostride,
int width,int height)
{
int curline=threadIdx.x;
int yv12_luma_siz = istride*height;
int yv12_chroma_siz = yv12_luma_siz>>2;
int curpos=curline*istride;
unsigned char*yv12_luma_p=pdev+curpos;
unsigned char*yv12_v_p=pdev+yv12_luma_siz+(curpos>>1);
unsigned char*yv12_u_p=pdev+yv12_luma_siz+yv12_chroma_siz+(curpos>>1);
curpos=curline*ostride;
unsigned char*nv12_luma_p=oyuv+curpos;
unsigned char*nv12_chroma_p=oyuv+(height*ostride)+(curpos>>1);
char val;
int j;
for(j=0;j<width;j++){
val=*(yv12_luma_p+j);
*(nv12_luma_p+j)=val;
val=*(yv12_u_p+j);
*(nv12_chroma_p)=val;
val=*(yv12_v_p+j);
*(nv12_chroma_p+1)=val;
}
}
extern "C" void load_rgb_bgrx_cuda(
unsigned char* oyuv,/*device*/
unsigned char* devrgb,/*device */
unsigned char*rgb, /*input data host*/
int left,int top,int width,int height,//rgb patch rect
int rgbwidth,int rgbheight,//rgb data size
int ostride //yuv data height<pixel>
)
{
//Copy date from shared Memory to Device;
#if 1
// Read rects from shm region.
copy_shmrgb_to_device((unsigned char*)rgb,
(unsigned char*)devrgb,//already allocated throuth cuMemAlloc()
left,top,
width,height,
rgbwidth,rgbheight);
#else
//for TEST :read rects from capture file.
copy_caprgb_to_device((unsigned char*)rgb,
(unsigned char*)devrgb,//already allocated throuth cuMemAlloc()
left,top,
width,height,
rgbwidth,rgbheight);
#endif
int ovstride=rgbheight;
// fprintf(stderr,"rgbwidth:%d ostride:%d ovstride:%d, width:%d, left:%d, top:%d\n",rgbwidth,ostride,ovstride,width,left,top);
convert_line_rgb_to_nv12<<<1,height>>>(devrgb,rgbwidth,
oyuv,ostride,ovstride,
width,left,top);
cudaThreadSynchronize();
checkCUDAError("Convert BGRA to NV12\n");
}
extern "C" void load_yuv_yv12_cuda(
unsigned char* oyuv,/*device*/
unsigned char* devyv12,/*device */
unsigned char*iyuv, /*input data host*/
int width,int height,/*real size*/
int istride,int ostride
)
{
// Load yv12 to device buffer
//TODO
int in_luma_siz=istride*height;
int out_luma_siz=ostride*height;
int in_chroma_siz=in_luma_siz>>2;
int out_chroma_siz=out_luma_siz>>2;
unsigned char*in_luma_p=iyuv;
unsigned char*out_luma_p=devyv12;
unsigned char*in_v_p=iyuv+in_luma_siz;
unsigned char*out_v_p=devyv12+out_luma_siz;
unsigned char*in_u_p=iyuv+in_luma_siz+in_chroma_siz;
unsigned char*out_u_p=devyv12+out_luma_siz+out_chroma_siz;
int j;
for(j=0;j<height;j++){
//y
memcpy(out_luma_p+j*ostride,in_luma_p+j*istride,width);
}
for(j=0;j<(height>>1);j++){
//v
memcpy(out_v_p+((j*ostride)>>1),in_v_p+((j*istride)>>1),width>>1);
//u
memcpy(out_u_p+((j*ostride)>>1),in_u_p+((j*istride)>>1),width>>1);
}
// fprintf(stderr,"rgbwidth:%d ostride:%d ovstride:%d, width:%d, left:%d, top:%d\n",rgbwidth,ostride,ovstride,width,left,top);
convert_line_yv12_to_nv12<<<1,height>>>(devyv12,istride,
oyuv,ostride,
width,height);
cudaThreadSynchronize();
checkCUDAError("Convert YV12 to NV12\n");
}
/***************************************************/
/***************************************************/
/***************************************************/
/***************************************************/
extern"C"{
inline void rgb2yuv_pixel(
unsigned char r,
unsigned char g,
unsigned char b,
unsigned char*y,
unsigned char*u,
unsigned char*v
){
#if 0
//YCbCr
*y=(0.257*r)+(0.504*g)+(0.098*b)+16;
*u=-(0.148 * r) - (0.291 * g) + (0.439 * b) + 128;
*v=(0.439*r)-(0.368*g)+(0.071*b)+128;
#else
//YUV Intel IPP的BT.709
*y= 0.299*r + 0.587*g + 0.114*b;
*u= -0.169*r - 0.331*g + 0.5*b+128;
*v= 0.5*r - 0.419*g - 0.081*b+128;
#endif
}
/*For Test*/
void load_rgb_bgrx_(unsigned char*yuv,unsigned char*rgb,
int left,int top,int width,int height,//patch rectangle
int rgbheight,
int ostride)
{
//assert left top width height are even;
//
int luma_off=ostride*rgbheight;
unsigned char*luma_p;
unsigned char*chroma_p;
unsigned char*rgb_p;
int r,g,b;
int y,u,v;
// fprintf(stderr,"LOAD {x:%d, y:%d, w:%d, h:%d, ww:%d, hh:%d }\n",left,top,width,height,stride,vstride);
int i,j;
for(i=top;i<height+top;i++){
//rows
rgb_p=rgb+width*(i-top)*4;
luma_p=yuv+ostride*i;
chroma_p=yuv+luma_off+ostride*(i/2);
for(j=left;j<width+left;j++){
b=*(rgb_p+(j-left)*4);
g=*(rgb_p+(j-left)*4+1);
r=*(rgb_p+(j-left)*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
if(i%2==0 && j%2==0){
u= -0.169*r - 0.331*g + 0.5*b+128;
*(chroma_p+j)=(char)u&0xff;
}
if(i%2==1 && j%2==0){
v= 0.5*r - 0.419*g - 0.081*b+128;
*(chroma_p+j+1)=(char)v&0xff;
}
}
}
}
void load_rgb_bgrx_2(unsigned char*yuv,unsigned char*rgb,
int left,int top,int width,int height,//patch rectangle
int rgbheight,
int ostride)
{
//assert left top width height are even;
//
int luma_off=ostride*rgbheight;
unsigned char*luma_p0,*luma_p1;
unsigned char*chroma_p;
unsigned char*rgb_p0,*rgb_p1;
int au;//(u1+u2+u3+u4)/4
int av;//
unsigned char r,g,b;
unsigned char y,u,v;
// fprintf(stderr,"LOAD {x:%d, y:%d, w:%d, h:%d }\n",left,top,width,height);//,stride,vstride);
int i,j;
for(i=top;i<height+top;i+=2){
//rows
rgb_p0=rgb+width*(i-top)*4;
rgb_p1=rgb+width*(i-top+1)*4;
luma_p0=yuv+ostride*i;
luma_p1=yuv+ostride*(i+1);
chroma_p=yuv+luma_off+ostride*(i/2);
for(j=left;j<width+left;j++){
b=*(rgb_p0+(j-left)*4);
g=*(rgb_p0+(j-left)*4+1);
r=*(rgb_p0+(j-left)*4+2);
rgb2yuv_pixel(r,g,b,&y,&u,&v);
*(luma_p0+j)=(char)y&0xff;
au+=u;
av+=v;
///////////
b=*(rgb_p1+(j-left)*4);
g=*(rgb_p1+(j-left)*4+1);
r=*(rgb_p1+(j-left)*4+2);
rgb2yuv_pixel(r,g,b,&y,&u,&v);
*(luma_p1+j)=(char)y&0xff;
au+=u;
av+=v;
if(j%2==0){
*(chroma_p+j)=(au>>2)&0xff;
*(chroma_p+j+1)=(av>>2)&0xff;
av=au=0;
}
}
}
}
/*
void load_rgb_bgrx(unsigned char*yuv,unsigned char*rgb,
int left,int top,int width,int height,//patch rectangle
int rgbheight,
int ostride)
*/
void load_rgb_bgrx(
unsigned char*bgrx,
unsigned char*nv12,
int pleft,int ptop,int pwidth,int pheight,//rgb patch rect
int width,int height,//rgb data size
int sstride,
int dstride //yuv data stride<pixel>
)
{
//assert left top width height are even;
//
if (sstride == 0)
sstride = width;
if (dstride == 0)
dstride = width;
int luma_off=dstride*height;
unsigned char*luma_p0,*luma_p1;
unsigned char*chroma_p;
unsigned char*rgb_p0,*rgb_p1;
int au;//(u1+u2+u3+u4)/4
int av;//
unsigned char r,g,b;
unsigned char y,u,v;
// fprintf(stderr,"LOAD {x:%d, y:%d, w:%d, h:%d, ww:%d, hh:%d }\n",left,top,width,height,stride,vstride);
int i,j;
for(i=ptop;i<pheight+ptop;i+=2){
//rows
rgb_p0=bgrx+sstride*(i)*4;
rgb_p1=bgrx+sstride*(i+1)*4;
luma_p0=nv12+dstride*i;
luma_p1=nv12+dstride*(i+1);
chroma_p=nv12+luma_off+dstride*(i/2);
for(j=pleft;j<pwidth+pleft;j++){
b=*(rgb_p0+j*4);
g=*(rgb_p0+j*4+1);
r=*(rgb_p0+j*4+2);
rgb2yuv_pixel(r,g,b,&y,&u,&v);
*(luma_p0+j)=(char)y&0xff;
au=u;
// av=v;
///////////
b=*(rgb_p1+j*4);
g=*(rgb_p1+j*4+1);
r=*(rgb_p1+j*4+2);
rgb2yuv_pixel(r,g,b,&y,&u,&v);
*(luma_p1+j)=(char)y&0xff;
// au+=u;
av=v;
if(j%2==0){
*(chroma_p+j)=au&0xff;
*(chroma_p+j+1)=av&0xff;
// av=au=0;
}
}
}
}
#if 0
void load_rgb_bgrx__(
unsigned char*bgrx,
unsigned char*nv12,
int pleft,int ptop,int pwidth,int pheight,//rgb patch rect
int width,int height,//rgb data size
int sstride,
int dstride //yuv data stride<pixel>
)
{
unsigned char*luma_p=nv12;
unsigned char*chroma_p;
unsigned char*rgb_p=bgrx;
if (sstride == 0)
sstride = width;
if (dstride == 0)
dstride = width;
chroma_p=luma_p+dstride*height;
unsigned char b,g,r;
unsigned char y,u,v;
int i,j;
for(i=ptop;i<pheight;i+=2){//vertical
//==============
rgb_p=bgrx+i*sstride*4;
luma_p=nv12+dstride*i;
chroma_p=nv12+dstride+height+dstride*(i/2);
for(j=pleft;j<pwidth+pleft;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
// if(j%2==0){
u= -0.169*r - 0.331*g + 0.5*b+128;
*(chroma_p+j)=(char)u&0xff;
// }
}
//odd line
rgb_p+=sstride*4;
luma_p+=dstride;
for(j=pleft;j<pwidth+pleft;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
// if(j%2==0){
v= 0.5*r - 0.419*g - 0.081*b+128;
*(chroma_p+j+1)=(char)v&0xff;
// }
}
// }
}
}
#endif
void load_yuv_yv12(unsigned char*yv12,unsigned char*nv12,int width,int height,int sstride,int dstride)
{
unsigned char*nv12_luma=nv12;
unsigned char*nv12_chroma;
unsigned char*yv12_luma=yv12;
unsigned char*yv12_v;
unsigned char*yv12_u;
if (sstride == 0)
sstride = width;
if (dstride == 0)
dstride = width;
nv12_chroma=nv12_luma+dstride*height;
yv12_v=yv12_luma+sstride*height;
yv12_u=yv12_v+sstride*height/4;
int y;
int x;
for (y = 0 ; y < height ; y++){
memcpy(nv12_luma + (dstride*y), yv12_luma + (sstride*y) , width);
}
for (y = 0 ; y < height/2 ; y++){
for (x= 0 ; x < width; x=x+2){
nv12_chroma[(y*dstride) + x] = yv12_v[((sstride/2)*y) + (x >>1)];
nv12_chroma[(y*dstride) +(x+1)] = yv12_u[((sstride/2)*y) + (x >>1)];
}
}
}
void load_yuv_nv12(unsigned char*inyuv, unsigned char*outyuv,int width,int height,int istride,int ostride)
{
if(istride==0)
istride=width;
if(ostride==0)
ostride=width;
unsigned char*inyuv_chroma=inyuv+width*istride;
unsigned char*outyuv_chroma=outyuv+width*ostride;
int y;
for(y=0;y<height;y++){
memcpy(outyuv+y*ostride,inyuv+y*istride,width);
}
for(y=0;y<height/2;y++){
memcpy(outyuv_chroma+y*ostride/2,inyuv_chroma+y*istride/2,width/2);
}
}
}//extern "C"
|
4673652ac7e0de9efb446dc4b566f9e84042aea8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* kernel routine starts with keyword __global__ */
__global__ void vecadd(float* A, float* B, float* C)
{
int i = threadIdx.x; // threadIdx is a CUDA built-in variable
C[i] = sqrt(sin(cos(A[i]))) + sqrt(sin(cos(B[i])));
}
int main(int argc, char * argv[])
{
float *host_A, *host_B, *host_C;
float *dev_A, *dev_B, *dev_C;
int n;
if (argc == 1) n = 1024;
else n = atoi(argv[1]);
/* 1. allocate host memory */
host_A = (float*)malloc( n*sizeof(float) );
host_B = (float*)malloc( n*sizeof(float) );
host_C = (float*)malloc( n*sizeof(float) );
/* 2. allocate GPU memory */
hipMalloc( &dev_A, n*sizeof(float) );
hipMalloc( &dev_B, n*sizeof(float) );
hipMalloc( &dev_C, n*sizeof(float) );
/* initialize array A and B */
for( int i = 0; i < n; ++i ) {
host_A[i] = (float) i;
host_B[i] = (float) i;
}
/* 3. Copydata (host_A and host_B) to GPU */
hipMemcpy( dev_A, host_A, n*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( dev_B, host_B, n*sizeof(float), hipMemcpyHostToDevice );
/* 4. call kernel routine to execute on GPU */
/* launch 1 thread per vector-element, 1024 threads per block */
hipLaunchKernelGGL(( vecadd), dim3(1),dim3(n), 0, 0, dev_A, dev_B, dev_C );
/* transfer results from GPU (dev_C) to CPU (host_C) */
hipMemcpy( host_C, dev_C, n*sizeof(float), hipMemcpyDeviceToHost );
/* free host and GPU memory */
free(host_A);
free(host_B);
free(host_C);
hipFree(dev_A);
hipFree(dev_B);
hipFree(dev_C);
return( 0 );
}
| 4673652ac7e0de9efb446dc4b566f9e84042aea8.cu | /* kernel routine starts with keyword __global__ */
__global__ void vecadd(float* A, float* B, float* C)
{
int i = threadIdx.x; // threadIdx is a CUDA built-in variable
C[i] = sqrt(sin(cos(A[i]))) + sqrt(sin(cos(B[i])));
}
int main(int argc, char * argv[])
{
float *host_A, *host_B, *host_C;
float *dev_A, *dev_B, *dev_C;
int n;
if (argc == 1) n = 1024;
else n = atoi(argv[1]);
/* 1. allocate host memory */
host_A = (float*)malloc( n*sizeof(float) );
host_B = (float*)malloc( n*sizeof(float) );
host_C = (float*)malloc( n*sizeof(float) );
/* 2. allocate GPU memory */
cudaMalloc( &dev_A, n*sizeof(float) );
cudaMalloc( &dev_B, n*sizeof(float) );
cudaMalloc( &dev_C, n*sizeof(float) );
/* initialize array A and B */
for( int i = 0; i < n; ++i ) {
host_A[i] = (float) i;
host_B[i] = (float) i;
}
/* 3. Copydata (host_A and host_B) to GPU */
cudaMemcpy( dev_A, host_A, n*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( dev_B, host_B, n*sizeof(float), cudaMemcpyHostToDevice );
/* 4. call kernel routine to execute on GPU */
/* launch 1 thread per vector-element, 1024 threads per block */
vecadd<<<1,n>>>( dev_A, dev_B, dev_C );
/* transfer results from GPU (dev_C) to CPU (host_C) */
cudaMemcpy( host_C, dev_C, n*sizeof(float), cudaMemcpyDeviceToHost );
/* free host and GPU memory */
free(host_A);
free(host_B);
free(host_C);
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
return( 0 );
}
|
61da732e8ba33330116b556b8564d07ac2ad518e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "caffe/layers/roi_pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
| 61da732e8ba33330116b556b8564d07ac2ad518e.cu | // ------------------------------------------------------------------
// Fast R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Ross Girshick
// ------------------------------------------------------------------
#include <cfloat>
#include "caffe/layers/roi_pooling_layer.hpp"
#include "caffe/util/math_functions.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.