hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
e7152101f56b393d2db37bff8b0bf64e48f31614.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
__shared__ float tilevar[4][32*32];
float t[4], b[4];
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-8);
for (int __iter_0__=0; __iter_0__<N-1; __iter_0__+=FORMA_BLOCKDIM_X-8) {
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) {
tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*__iter_4__];
t[0] = input[__iter_5__+N*(__iter_4__+M)];
}
// Rest of the computation
for (int __iter_2__ = 1; __iter_2__ < L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){
b[0] = tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[0];
t[0] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+1))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))) {
float __temp_a3__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a7__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a23__ = (__temp_a18__ + 0.165f * t[0]);
float __temp_a28__ = (__temp_a23__ + 0.166f * b[0]);
float __temp_a32__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
b[1] = tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[1];
t[1] = __temp_a33__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))) {
float __temp_a50__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a54__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a70__ = (__temp_a65__ + 0.165f * t[1]);
float __temp_a75__ = (__temp_a70__ + 0.166f * b[1]);
float __temp_a79__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
b[2] = tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[2];
t[2] = __temp_a80__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))) {
float __temp_a94__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a95__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a102__ = (__temp_a100__ + 0.165f * t[2]);
float __temp_a104__ = (__temp_a102__ + 0.166f * b[2]);
float __temp_a105__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
b[3] = tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[3];
t[3] = __temp_a106__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2))) {
float __temp_a120__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a121__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a128__ = (__temp_a126__ + 0.165f * t[3]);
float __temp_a130__ = (__temp_a128__ + 0.166f * b[3]);
float __temp_a131__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-3,0))] = __temp_a132__;
}
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d7pt(float * h_input, int L, int M, int N, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = 1;
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-8);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), 0, 0, input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
}
/*Host Free End*/
| e7152101f56b393d2db37bff8b0bf64e48f31614.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
__shared__ float tilevar[4][32*32];
float t[4], b[4];
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)-8);
for (int __iter_0__=0; __iter_0__<N-1; __iter_0__+=FORMA_BLOCKDIM_X-8) {
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))) {
tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*__iter_4__];
t[0] = input[__iter_5__+N*(__iter_4__+M)];
}
// Rest of the computation
for (int __iter_2__ = 1; __iter_2__ < L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){
b[0] = tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[0];
t[0] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__+1))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))) {
float __temp_a3__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a7__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a8__ = (0.161f * __temp_a3__ + 0.162f * __temp_a7__);
float __temp_a12__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a13__ = (__temp_a8__ + 0.163f * __temp_a12__);
float __temp_a17__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a18__ = (__temp_a13__ + 0.164f * __temp_a17__);
float __temp_a23__ = (__temp_a18__ + 0.165f * t[0]);
float __temp_a28__ = (__temp_a23__ + 0.166f * b[0]);
float __temp_a32__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a33__ = (__temp_a28__ - 1.670f * __temp_a32__);
b[1] = tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[1];
t[1] = __temp_a33__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))) {
float __temp_a50__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a54__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a55__ = (0.161f * __temp_a50__ + 0.162f * __temp_a54__);
float __temp_a59__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a60__ = (__temp_a55__ + 0.163f * __temp_a59__);
float __temp_a64__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a65__ = (__temp_a60__ + 0.164f * __temp_a64__);
float __temp_a70__ = (__temp_a65__ + 0.165f * t[1]);
float __temp_a75__ = (__temp_a70__ + 0.166f * b[1]);
float __temp_a79__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a80__ = (__temp_a75__ - 1.670f * __temp_a79__);
b[2] = tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[2];
t[2] = __temp_a80__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))) {
float __temp_a94__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a95__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a96__ = (0.161f * __temp_a94__ + 0.162f * __temp_a95__);
float __temp_a97__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a98__ = (__temp_a96__ + 0.163f * __temp_a97__);
float __temp_a99__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a100__ = (__temp_a98__ + 0.164f * __temp_a99__);
float __temp_a102__ = (__temp_a100__ + 0.165f * t[2]);
float __temp_a104__ = (__temp_a102__ + 0.166f * b[2]);
float __temp_a105__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a106__ = (__temp_a104__ - 1.670f * __temp_a105__);
b[3] = tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = t[3];
t[3] = __temp_a106__;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2))) {
float __temp_a120__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a121__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a122__ = (0.161f * __temp_a120__ + 0.162f * __temp_a121__);
float __temp_a123__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_a124__ = (__temp_a122__ + 0.163f * __temp_a123__);
float __temp_a125__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_a126__ = (__temp_a124__ + 0.164f * __temp_a125__);
float __temp_a128__ = (__temp_a126__ + 0.165f * t[3]);
float __temp_a130__ = (__temp_a128__ + 0.166f * b[3]);
float __temp_a131__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_a132__ = (__temp_a130__ - 1.670f * __temp_a131__);
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-3,0))] = __temp_a132__;
}
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d7pt(float * h_input, int L, int M, int N, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = 1;
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y-8);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig>>> (input, L, M, N, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
}
/*Host Free End*/
|
ecb353c126c2b8c3a4889385e13b85233d814782.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define SIZEOFINT sizeof(int)
const int INF = ((1 << 30) - 1);
const dim3 block_dim(4, 32);
const int B = 32;
const int Share_Mem_Size = 64;
int n, m;
int *Dist;
int *Dist_cuda;
void show_mat(int *start_p, int vertex_num){
for(int i = 0; i < vertex_num; i++){
for(int j = 0; j < vertex_num; j++){
if(start_p[i * vertex_num + j] == INF){
printf("INF\t ");
}else{
printf("%d\t ", start_p[i * vertex_num + j]);
}
}
printf("\n");
}
}
void malloc_Dist(int vertex_num){Dist = (int*)malloc(SIZEOFINT * vertex_num * vertex_num);}
int getDist(int i, int j, int vertex_num){return Dist[i * vertex_num + j];}
int *getDistAddr(int i, int j, int vertex_num){return &(Dist[i * vertex_num + j]);}
void setDist(int i, int j, int val, int vertex_num){Dist[i * vertex_num + j] = val;}
void setup_DistCuda(int vertex_num){
hipMalloc((void **)&Dist_cuda, SIZEOFINT * vertex_num * vertex_num);
hipMemcpy(Dist_cuda, Dist, (n * n * SIZEOFINT), hipMemcpyHostToDevice);
}
void back_DistCuda(int vertex_num){
hipMemcpy(Dist, Dist_cuda, (n * n * SIZEOFINT), hipMemcpyDeviceToHost);
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
malloc_Dist(n);
// malloc_DistCuda(n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (i == j) {
setDist(i, j, 0, n);
// Dist[i][j] = 0;
} else {
setDist(i, j, INF, n);
// Dist[i][j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < m; i++) {
fread(pair, sizeof(int), 3, file);
setDist(pair[0], pair[1], pair[2], n);
// Dist[pair[0]][pair[1]] = pair[2];
}
// hipMemcpy(Dist_cuda, Dist, (n * n * SIZEOFINT), hipMemcpyHostToDevice);
fclose(file);
}
void output(char* outFileName) {
FILE* outfile = fopen(outFileName, "w");
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
// if (Dist[i][j] >= INF) Dist[i][j] = INF;
if (getDist(i, j, n) >= INF) setDist(i, j, INF, n);
}
// fwrite(Dist[i], sizeof(int), n, outfile);
// fwrite(getDistAddr(i, 0, n), sizeof(int), n, outfile);
}
fwrite(getDistAddr(0, 0, n), sizeof(int), n * n, outfile);
fclose(outfile);
}
__device__ void relax(int (*AM)[Share_Mem_Size][Share_Mem_Size], int (*BM)[Share_Mem_Size][Share_Mem_Size], int (*CM)[Share_Mem_Size][Share_Mem_Size], int vertex_num, int Round, int block_internal_start_x, int block_internal_end_x, int block_internal_start_y, int block_internal_end_y){
// Relax Path
for (int k = Round * B; k < (Round + 1) * B && k < vertex_num; k++) {
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
int d = (*BM)[i - block_internal_start_x][k - Round * B] + (*CM)[k - Round * B][j - block_internal_start_y];
// __syncthreads();
if (d < (*AM)[i - block_internal_start_x][j - block_internal_start_y]) {
(*AM)[i - block_internal_start_x][j - block_internal_start_y] = d;
// dist[i * vertex_num + j] = d;
}
}
}
__syncthreads();
}
}
__device__ void flush(int *dist, int (*AM)[Share_Mem_Size][Share_Mem_Size], int vertex_num, int block_internal_start_x, int block_internal_end_x, int block_internal_start_y, int block_internal_end_y){
// Move modified block to global memory
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
dist[i * vertex_num + j] = (*AM)[i - block_internal_start_x][j - block_internal_start_y];
}
}
}
__global__ void phase3_cal_cuda(int *dist, int vertex_num, int edge_num, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) {
const int Share_Mem_Row_Size = B;
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
// printf("%d\n", dist[1]);
// i-j block
__shared__ int a[Share_Mem_Size * Share_Mem_Size];
// i-k block
__shared__ int b[Share_Mem_Size * Share_Mem_Size];
// k-j block
__shared__ int c[Share_Mem_Size * Share_Mem_Size];
for (int b_i = block_start_x + blockIdx.x; b_i < block_end_x; b_i+=gridDim.x) {
for (int b_j = block_start_y + blockIdx.y; b_j < block_end_y; b_j+=gridDim.y) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int block_internal_start_x = b_i * B;
int block_internal_end_x = (b_i + 1) * B;
int block_internal_start_y = b_j * B;
int block_internal_end_y = (b_j + 1) * B;
if (block_internal_end_x > vertex_num) block_internal_end_x = vertex_num;
if (block_internal_end_y > vertex_num) block_internal_end_y = vertex_num;
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
a[(i - block_internal_start_x) * Share_Mem_Row_Size + (j - block_internal_start_y)] = dist[i * vertex_num + j];
}
}
for (int k = Round * B + threadIdx.x; k < (Round + 1) * B && k < vertex_num; k+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
c[(k - Round * B) * Share_Mem_Row_Size + (j - block_internal_start_y)] = dist[k * vertex_num + j];
}
}
// Reverse the row and column to ensure column-major iteration
for (int k = Round * B + threadIdx.y; k < (Round + 1) * B && k < vertex_num; k+=blockDim.y) {
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
b[(k - Round * B) * Share_Mem_Row_Size + (i - block_internal_start_x)] = dist[i * vertex_num + k];
}
}
__syncthreads();
// Relax Path
for (int k = Round * B; k < (Round + 1) * B && k < vertex_num; k++) {
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
int d = b[(k - Round * B) * Share_Mem_Row_Size + (i - block_internal_start_x)] + c[(k - Round * B) * Share_Mem_Row_Size + (j - block_internal_start_y)];
if (d < a[(i - block_internal_start_x) * Share_Mem_Row_Size + (j - block_internal_start_y)]) {
a[(i - block_internal_start_x) * Share_Mem_Row_Size + (j - block_internal_start_y)] = d;
}
}
}
__syncthreads();
}
// Move modified block to global memory
// flush(dist, AM, vertex_num, block_internal_start_x, block_internal_end_x, block_internal_start_y, block_internal_end_y);
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
dist[i * vertex_num + j] = a[(i - block_internal_start_x) * Share_Mem_Row_Size + (j - block_internal_start_y)];
}
}
}
}
}
__global__ void phase2_cal_cuda(int *dist, int vertex_num, int edge_num, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) {
// const int Share_Mem_Size = 64;
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
// printf("%d\n", dist[1]);
// i-j block
int (*AM)[Share_Mem_Size][Share_Mem_Size];
__shared__ int a[Share_Mem_Size][Share_Mem_Size];
// i-k block
int (*BM)[Share_Mem_Size][Share_Mem_Size];
__shared__ int b[Share_Mem_Size][Share_Mem_Size];
// k-j block
int (*CM)[Share_Mem_Size][Share_Mem_Size];
__shared__ int c[Share_Mem_Size][Share_Mem_Size];
for (int b_i = block_start_x + blockIdx.x; b_i < block_end_x; b_i+=gridDim.x) {
for (int b_j = block_start_y + blockIdx.y; b_j < block_end_y; b_j+=gridDim.y) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int block_internal_start_x = b_i * B;
int block_internal_end_x = (b_i + 1) * B;
int block_internal_start_y = b_j * B;
int block_internal_end_y = (b_j + 1) * B;
if (block_internal_end_x > vertex_num) block_internal_end_x = vertex_num;
if (block_internal_end_y > vertex_num) block_internal_end_y = vertex_num;
AM = &a;
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
a[i - block_internal_start_x][j - block_internal_start_y] = dist[i * vertex_num + j];
}
}
CM = &c;
for (int k = Round * B + threadIdx.x; k < (Round + 1) * B && k < vertex_num; k+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
c[k - Round * B][j - block_internal_start_y] = dist[k * vertex_num + j];
}
}
BM = &b;
for (int k = Round * B + threadIdx.y; k < (Round + 1) * B && k < vertex_num; k+=blockDim.y) {
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
b[i - block_internal_start_x][k - Round * B] = dist[i * vertex_num + k];
}
}
__syncthreads();
// Relax Path
relax(AM, BM, CM, vertex_num, Round, block_internal_start_x, block_internal_end_x, block_internal_start_y, block_internal_end_y);
// Move modified block to global memory
flush(dist, AM, vertex_num, block_internal_start_x, block_internal_end_x, block_internal_start_y, block_internal_end_y);
}
}
}
__global__ void cal_cuda(int *dist, int vertex_num, int edge_num, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) {
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
// printf("%d\n", dist[1]);
// i-j block
int (*AM)[Share_Mem_Size][Share_Mem_Size];
__shared__ int a[Share_Mem_Size][Share_Mem_Size];
// i-k block
int (*BM)[Share_Mem_Size][Share_Mem_Size];
__shared__ int b[Share_Mem_Size][Share_Mem_Size];
// k-j block
int (*CM)[Share_Mem_Size][Share_Mem_Size];
__shared__ int c[Share_Mem_Size][Share_Mem_Size];
for (int b_i = block_start_x + blockIdx.x; b_i < block_end_x; b_i+=gridDim.x) {
for (int b_j = block_start_y + blockIdx.y; b_j < block_end_y; b_j+=gridDim.y) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int block_internal_start_x = b_i * B;
int block_internal_end_x = (b_i + 1) * B;
int block_internal_start_y = b_j * B;
int block_internal_end_y = (b_j + 1) * B;
if (block_internal_end_x > vertex_num) block_internal_end_x = vertex_num;
if (block_internal_end_y > vertex_num) block_internal_end_y = vertex_num;
// if(threadIdx.x == 0 && threadIdx.y == 0){
// printf("(%d %d) A(%d:%d, %d:%d) B(%d:%d, %d:%d) C(%d:%d, %d:%d) CAL(%d:%d, %d:%d, %d:%d)\n",
// blockDim.x, blockDim.y,
// block_internal_start_x + threadIdx.x, block_internal_end_x, block_internal_start_y + threadIdx.y, block_internal_end_y,
// block_internal_start_x + threadIdx.x, block_internal_end_x, Round * B, (Round + 1) * B < vertex_num? (Round + 1) * B : vertex_num,
// Round * B, (Round + 1) * B < vertex_num? (Round + 1) * B : vertex_num, block_internal_start_y + threadIdx.y, block_internal_end_y,
// block_internal_start_x + threadIdx.x, block_internal_end_x, block_internal_start_y + threadIdx.y, block_internal_end_y, Round * B, (Round + 1) * B < vertex_num? (Round + 1) * B : vertex_num
// );
// }
AM = &a;
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
a[i - block_internal_start_x][j - block_internal_start_y] = dist[i * vertex_num + j];
}
}
if(Round != b_i){
CM = &c;
for (int k = Round * B + threadIdx.x; k < (Round + 1) * B && k < vertex_num; k+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
c[k - Round * B][j - block_internal_start_y] = dist[k * vertex_num + j];
}
}
}else{CM = &a;}
if(Round != b_j){
BM = &b;
for (int k = Round * B + threadIdx.y; k < (Round + 1) * B && k < vertex_num; k+=blockDim.y) {
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
b[i - block_internal_start_x][k - Round * B] = dist[i * vertex_num + k];
}
}
}else{BM = &a;}
__syncthreads();
// Relax Path
// for (int k = Round * B; k < (Round + 1) * B && k < vertex_num; k++) {
// for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
// for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
// int d = (*BM)[i - block_internal_start_x][k - Round * B] + (*CM)[k - Round * B][j - block_internal_start_y];
// // __syncthreads();
// if (d < (*AM)[i - block_internal_start_x][j - block_internal_start_y]) {
// (*AM)[i - block_internal_start_x][j - block_internal_start_y] = d;
// // dist[i * vertex_num + j] = d;
// }
// }
// }
// __syncthreads();
// }
relax(AM, BM, CM, vertex_num, Round, block_internal_start_x, block_internal_end_x, block_internal_start_y, block_internal_end_y);
// Move modified block to global memory
// for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
// for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
// dist[i * vertex_num + j] = (*AM)[i - block_internal_start_x][j - block_internal_start_y];
// }
// }
flush(dist, AM, vertex_num, block_internal_start_x, block_internal_end_x, block_internal_start_y, block_internal_end_y);
}
}
}
void block_FW_cuda(int B) {
int round = (n + B - 1) / B;
for (int r = 0; r < round; r++) {
// printf("Round: %d in total: %d\n", r, round);
fflush(stdout);
/* Phase 1*/
hipLaunchKernelGGL(( cal_cuda), dim3(1), dim3(block_dim), 0, 0, Dist_cuda, n, m, B, r, r, r, 1, 1);
/* Phase 2*/
hipLaunchKernelGGL(( cal_cuda), dim3(r), dim3(block_dim), 0, 0, Dist_cuda, n, m, B, r, r, 0, r, 1);
hipLaunchKernelGGL(( cal_cuda), dim3(round - r - 1), dim3(block_dim), 0, 0, Dist_cuda, n, m, B, r, r, r + 1, round - r - 1, 1);
hipLaunchKernelGGL(( cal_cuda), dim3(r), dim3(block_dim), 0, 0, Dist_cuda, n, m, B, r, 0, r, 1, r);
hipLaunchKernelGGL(( cal_cuda), dim3(round - r - 1), dim3(block_dim), 0, 0, Dist_cuda, n, m, B, r, r + 1, r, 1, round - r - 1);
/* Phase 3*/
const dim3 grid_dim(round, round);
hipLaunchKernelGGL(( phase3_cal_cuda), dim3(grid_dim), dim3(block_dim), 0, 0, Dist_cuda, n, m, B, r, 0, 0, round, round);
}
}
int main(int argc, char* argv[]) {
input(argv[1]);
// show_mat(getDistAddr(0, 0, n), n);
setup_DistCuda(n);
printf("Vertice: %d, Edge: %d, B: %d\n", n, m, B);
block_FW_cuda(B);
back_DistCuda(n);
// show_mat(getDistAddr(0, 0, n), n);
output(argv[2]);
// show_mat(getDistAddr(0, 0, n), n);
return 0;
} | ecb353c126c2b8c3a4889385e13b85233d814782.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cuda.h>
#define SIZEOFINT sizeof(int)
const int INF = ((1 << 30) - 1);
const dim3 block_dim(4, 32);
const int B = 32;
const int Share_Mem_Size = 64;
int n, m;
int *Dist;
int *Dist_cuda;
void show_mat(int *start_p, int vertex_num){
for(int i = 0; i < vertex_num; i++){
for(int j = 0; j < vertex_num; j++){
if(start_p[i * vertex_num + j] == INF){
printf("INF\t ");
}else{
printf("%d\t ", start_p[i * vertex_num + j]);
}
}
printf("\n");
}
}
void malloc_Dist(int vertex_num){Dist = (int*)malloc(SIZEOFINT * vertex_num * vertex_num);}
int getDist(int i, int j, int vertex_num){return Dist[i * vertex_num + j];}
int *getDistAddr(int i, int j, int vertex_num){return &(Dist[i * vertex_num + j]);}
void setDist(int i, int j, int val, int vertex_num){Dist[i * vertex_num + j] = val;}
void setup_DistCuda(int vertex_num){
cudaMalloc((void **)&Dist_cuda, SIZEOFINT * vertex_num * vertex_num);
cudaMemcpy(Dist_cuda, Dist, (n * n * SIZEOFINT), cudaMemcpyHostToDevice);
}
void back_DistCuda(int vertex_num){
cudaMemcpy(Dist, Dist_cuda, (n * n * SIZEOFINT), cudaMemcpyDeviceToHost);
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
malloc_Dist(n);
// malloc_DistCuda(n);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (i == j) {
setDist(i, j, 0, n);
// Dist[i][j] = 0;
} else {
setDist(i, j, INF, n);
// Dist[i][j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < m; i++) {
fread(pair, sizeof(int), 3, file);
setDist(pair[0], pair[1], pair[2], n);
// Dist[pair[0]][pair[1]] = pair[2];
}
// cudaMemcpy(Dist_cuda, Dist, (n * n * SIZEOFINT), cudaMemcpyHostToDevice);
fclose(file);
}
void output(char* outFileName) {
FILE* outfile = fopen(outFileName, "w");
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
// if (Dist[i][j] >= INF) Dist[i][j] = INF;
if (getDist(i, j, n) >= INF) setDist(i, j, INF, n);
}
// fwrite(Dist[i], sizeof(int), n, outfile);
// fwrite(getDistAddr(i, 0, n), sizeof(int), n, outfile);
}
fwrite(getDistAddr(0, 0, n), sizeof(int), n * n, outfile);
fclose(outfile);
}
__device__ void relax(int (*AM)[Share_Mem_Size][Share_Mem_Size], int (*BM)[Share_Mem_Size][Share_Mem_Size], int (*CM)[Share_Mem_Size][Share_Mem_Size], int vertex_num, int Round, int block_internal_start_x, int block_internal_end_x, int block_internal_start_y, int block_internal_end_y){
// Relax Path
for (int k = Round * B; k < (Round + 1) * B && k < vertex_num; k++) {
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
int d = (*BM)[i - block_internal_start_x][k - Round * B] + (*CM)[k - Round * B][j - block_internal_start_y];
// __syncthreads();
if (d < (*AM)[i - block_internal_start_x][j - block_internal_start_y]) {
(*AM)[i - block_internal_start_x][j - block_internal_start_y] = d;
// dist[i * vertex_num + j] = d;
}
}
}
__syncthreads();
}
}
__device__ void flush(int *dist, int (*AM)[Share_Mem_Size][Share_Mem_Size], int vertex_num, int block_internal_start_x, int block_internal_end_x, int block_internal_start_y, int block_internal_end_y){
// Move modified block to global memory
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
dist[i * vertex_num + j] = (*AM)[i - block_internal_start_x][j - block_internal_start_y];
}
}
}
__global__ void phase3_cal_cuda(int *dist, int vertex_num, int edge_num, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) {
const int Share_Mem_Row_Size = B;
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
// printf("%d\n", dist[1]);
// i-j block
__shared__ int a[Share_Mem_Size * Share_Mem_Size];
// i-k block
__shared__ int b[Share_Mem_Size * Share_Mem_Size];
// k-j block
__shared__ int c[Share_Mem_Size * Share_Mem_Size];
for (int b_i = block_start_x + blockIdx.x; b_i < block_end_x; b_i+=gridDim.x) {
for (int b_j = block_start_y + blockIdx.y; b_j < block_end_y; b_j+=gridDim.y) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int block_internal_start_x = b_i * B;
int block_internal_end_x = (b_i + 1) * B;
int block_internal_start_y = b_j * B;
int block_internal_end_y = (b_j + 1) * B;
if (block_internal_end_x > vertex_num) block_internal_end_x = vertex_num;
if (block_internal_end_y > vertex_num) block_internal_end_y = vertex_num;
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
a[(i - block_internal_start_x) * Share_Mem_Row_Size + (j - block_internal_start_y)] = dist[i * vertex_num + j];
}
}
for (int k = Round * B + threadIdx.x; k < (Round + 1) * B && k < vertex_num; k+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
c[(k - Round * B) * Share_Mem_Row_Size + (j - block_internal_start_y)] = dist[k * vertex_num + j];
}
}
// Reverse the row and column to ensure column-major iteration
for (int k = Round * B + threadIdx.y; k < (Round + 1) * B && k < vertex_num; k+=blockDim.y) {
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
b[(k - Round * B) * Share_Mem_Row_Size + (i - block_internal_start_x)] = dist[i * vertex_num + k];
}
}
__syncthreads();
// Relax Path
for (int k = Round * B; k < (Round + 1) * B && k < vertex_num; k++) {
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
int d = b[(k - Round * B) * Share_Mem_Row_Size + (i - block_internal_start_x)] + c[(k - Round * B) * Share_Mem_Row_Size + (j - block_internal_start_y)];
if (d < a[(i - block_internal_start_x) * Share_Mem_Row_Size + (j - block_internal_start_y)]) {
a[(i - block_internal_start_x) * Share_Mem_Row_Size + (j - block_internal_start_y)] = d;
}
}
}
__syncthreads();
}
// Move modified block to global memory
// flush(dist, AM, vertex_num, block_internal_start_x, block_internal_end_x, block_internal_start_y, block_internal_end_y);
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
dist[i * vertex_num + j] = a[(i - block_internal_start_x) * Share_Mem_Row_Size + (j - block_internal_start_y)];
}
}
}
}
}
__global__ void phase2_cal_cuda(int *dist, int vertex_num, int edge_num, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) {
// const int Share_Mem_Size = 64;
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
// printf("%d\n", dist[1]);
// i-j block
int (*AM)[Share_Mem_Size][Share_Mem_Size];
__shared__ int a[Share_Mem_Size][Share_Mem_Size];
// i-k block
int (*BM)[Share_Mem_Size][Share_Mem_Size];
__shared__ int b[Share_Mem_Size][Share_Mem_Size];
// k-j block
int (*CM)[Share_Mem_Size][Share_Mem_Size];
__shared__ int c[Share_Mem_Size][Share_Mem_Size];
for (int b_i = block_start_x + blockIdx.x; b_i < block_end_x; b_i+=gridDim.x) {
for (int b_j = block_start_y + blockIdx.y; b_j < block_end_y; b_j+=gridDim.y) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int block_internal_start_x = b_i * B;
int block_internal_end_x = (b_i + 1) * B;
int block_internal_start_y = b_j * B;
int block_internal_end_y = (b_j + 1) * B;
if (block_internal_end_x > vertex_num) block_internal_end_x = vertex_num;
if (block_internal_end_y > vertex_num) block_internal_end_y = vertex_num;
AM = &a;
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
a[i - block_internal_start_x][j - block_internal_start_y] = dist[i * vertex_num + j];
}
}
CM = &c;
for (int k = Round * B + threadIdx.x; k < (Round + 1) * B && k < vertex_num; k+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
c[k - Round * B][j - block_internal_start_y] = dist[k * vertex_num + j];
}
}
BM = &b;
for (int k = Round * B + threadIdx.y; k < (Round + 1) * B && k < vertex_num; k+=blockDim.y) {
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
b[i - block_internal_start_x][k - Round * B] = dist[i * vertex_num + k];
}
}
__syncthreads();
// Relax Path
relax(AM, BM, CM, vertex_num, Round, block_internal_start_x, block_internal_end_x, block_internal_start_y, block_internal_end_y);
// Move modified block to global memory
flush(dist, AM, vertex_num, block_internal_start_x, block_internal_end_x, block_internal_start_y, block_internal_end_y);
}
}
}
__global__ void cal_cuda(int *dist, int vertex_num, int edge_num, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) {
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
// printf("%d\n", dist[1]);
// i-j block
int (*AM)[Share_Mem_Size][Share_Mem_Size];
__shared__ int a[Share_Mem_Size][Share_Mem_Size];
// i-k block
int (*BM)[Share_Mem_Size][Share_Mem_Size];
__shared__ int b[Share_Mem_Size][Share_Mem_Size];
// k-j block
int (*CM)[Share_Mem_Size][Share_Mem_Size];
__shared__ int c[Share_Mem_Size][Share_Mem_Size];
for (int b_i = block_start_x + blockIdx.x; b_i < block_end_x; b_i+=gridDim.x) {
for (int b_j = block_start_y + blockIdx.y; b_j < block_end_y; b_j+=gridDim.y) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int block_internal_start_x = b_i * B;
int block_internal_end_x = (b_i + 1) * B;
int block_internal_start_y = b_j * B;
int block_internal_end_y = (b_j + 1) * B;
if (block_internal_end_x > vertex_num) block_internal_end_x = vertex_num;
if (block_internal_end_y > vertex_num) block_internal_end_y = vertex_num;
// if(threadIdx.x == 0 && threadIdx.y == 0){
// printf("(%d %d) A(%d:%d, %d:%d) B(%d:%d, %d:%d) C(%d:%d, %d:%d) CAL(%d:%d, %d:%d, %d:%d)\n",
// blockDim.x, blockDim.y,
// block_internal_start_x + threadIdx.x, block_internal_end_x, block_internal_start_y + threadIdx.y, block_internal_end_y,
// block_internal_start_x + threadIdx.x, block_internal_end_x, Round * B, (Round + 1) * B < vertex_num? (Round + 1) * B : vertex_num,
// Round * B, (Round + 1) * B < vertex_num? (Round + 1) * B : vertex_num, block_internal_start_y + threadIdx.y, block_internal_end_y,
// block_internal_start_x + threadIdx.x, block_internal_end_x, block_internal_start_y + threadIdx.y, block_internal_end_y, Round * B, (Round + 1) * B < vertex_num? (Round + 1) * B : vertex_num
// );
// }
AM = &a;
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
a[i - block_internal_start_x][j - block_internal_start_y] = dist[i * vertex_num + j];
}
}
if(Round != b_i){
CM = &c;
for (int k = Round * B + threadIdx.x; k < (Round + 1) * B && k < vertex_num; k+=blockDim.x) {
for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
c[k - Round * B][j - block_internal_start_y] = dist[k * vertex_num + j];
}
}
}else{CM = &a;}
if(Round != b_j){
BM = &b;
for (int k = Round * B + threadIdx.y; k < (Round + 1) * B && k < vertex_num; k+=blockDim.y) {
for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
b[i - block_internal_start_x][k - Round * B] = dist[i * vertex_num + k];
}
}
}else{BM = &a;}
__syncthreads();
// Relax Path
// for (int k = Round * B; k < (Round + 1) * B && k < vertex_num; k++) {
// for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
// for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
// int d = (*BM)[i - block_internal_start_x][k - Round * B] + (*CM)[k - Round * B][j - block_internal_start_y];
// // __syncthreads();
// if (d < (*AM)[i - block_internal_start_x][j - block_internal_start_y]) {
// (*AM)[i - block_internal_start_x][j - block_internal_start_y] = d;
// // dist[i * vertex_num + j] = d;
// }
// }
// }
// __syncthreads();
// }
relax(AM, BM, CM, vertex_num, Round, block_internal_start_x, block_internal_end_x, block_internal_start_y, block_internal_end_y);
// Move modified block to global memory
// for (int i = block_internal_start_x + threadIdx.x; i < block_internal_end_x; i+=blockDim.x) {
// for (int j = block_internal_start_y + threadIdx.y; j < block_internal_end_y; j+=blockDim.y) {
// dist[i * vertex_num + j] = (*AM)[i - block_internal_start_x][j - block_internal_start_y];
// }
// }
flush(dist, AM, vertex_num, block_internal_start_x, block_internal_end_x, block_internal_start_y, block_internal_end_y);
}
}
}
void block_FW_cuda(int B) {
int round = (n + B - 1) / B;
for (int r = 0; r < round; r++) {
// printf("Round: %d in total: %d\n", r, round);
fflush(stdout);
/* Phase 1*/
cal_cuda<<<1, block_dim>>>(Dist_cuda, n, m, B, r, r, r, 1, 1);
/* Phase 2*/
cal_cuda<<<r, block_dim>>>(Dist_cuda, n, m, B, r, r, 0, r, 1);
cal_cuda<<<round - r - 1, block_dim>>>(Dist_cuda, n, m, B, r, r, r + 1, round - r - 1, 1);
cal_cuda<<<r, block_dim>>>(Dist_cuda, n, m, B, r, 0, r, 1, r);
cal_cuda<<<round - r - 1, block_dim>>>(Dist_cuda, n, m, B, r, r + 1, r, 1, round - r - 1);
/* Phase 3*/
const dim3 grid_dim(round, round);
phase3_cal_cuda<<<grid_dim, block_dim>>>(Dist_cuda, n, m, B, r, 0, 0, round, round);
}
}
int main(int argc, char* argv[]) {
input(argv[1]);
// show_mat(getDistAddr(0, 0, n), n);
setup_DistCuda(n);
printf("Vertice: %d, Edge: %d, B: %d\n", n, m, B);
block_FW_cuda(B);
back_DistCuda(n);
// show_mat(getDistAddr(0, 0, n), n);
output(argv[2]);
// show_mat(getDistAddr(0, 0, n), n);
return 0;
} |
02489c6e0497aa542c08a81f4c6051b72e1eb8ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/collective/c_embedding_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T, typename IndexT>
__global__ void CEmbedding(T *out, const T *table, const IndexT *ids,
const int rows, const int columns, const int64_t N,
const int64_t start_idx, const int64_t end_idx,
const int64_t limit) {
CUDA_KERNEL_LOOP(i, limit) {
size_t row = i / columns;
size_t col = i % columns;
auto id = ids[row];
if (id >= start_idx && id < end_idx) {
auto real_idx = id - start_idx;
PADDLE_ENFORCE(real_idx < N,
"The index is out of bounds, "
"please check whether the dimensions of index and "
"input meet the requirements. It should "
"be less than [%d], but received [%d]",
N, real_idx);
out[i] = table[real_idx * columns + col];
} else {
out[i] = static_cast<T>(0);
}
}
}
template <typename T, typename IndexT>
__global__ void CEmbeddingGrad(T *table, const T *output, const IndexT *ids,
const int rows, const int columns,
const int64_t N, const int64_t start_idx,
const int64_t end_idx, const int64_t limit) {
CUDA_KERNEL_LOOP(i, limit) {
size_t row = i / columns;
size_t col = i % columns;
auto id = ids[row];
if (id >= start_idx && id < end_idx) {
auto real_idx = id - start_idx;
paddle::platform::CudaAtomicAdd(&table[real_idx * columns + col],
output[i]);
}
}
}
template <typename T>
class CEmbeddingCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *table_t = context.Input<LoDTensor>("W");
auto *ids_t = context.Input<LoDTensor>("Ids");
auto *output_t = context.Output<LoDTensor>("Out");
const auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
const int64_t start_idx = context.Attr<int64_t>("start_index");
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t->numel();
const int64_t end_idx = start_idx + N;
auto *table = table_t->data<T>();
auto *output = output_t->mutable_data<T>(context.GetPlace());
auto limit = K * D;
int blocks = NumBlocks(limit);
int threads = kNumCUDAThreads;
const auto &index_type = ids_t->type();
if (index_type == framework::proto::VarType::INT32) {
hipLaunchKernelGGL(( CEmbedding<T, int32_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output, table, ids_t->data<int32_t>(), K, D, N, start_idx, end_idx,
limit);
} else if (index_type == framework::proto::VarType::INT64) {
hipLaunchKernelGGL(( CEmbedding<T, int64_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output, table, ids_t->data<int64_t>(), K, D, N, start_idx, end_idx,
limit);
}
}
};
template <typename T>
class CEmbeddingGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
const int64_t start_idx = context.Attr<int64_t>("start_index");
auto ids_t = context.Input<LoDTensor>("Ids");
auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t->numel();
const int64_t end_idx = start_idx + N;
auto limit = K * D;
int blocks = NumBlocks(limit);
int threads = kNumCUDAThreads;
const T *d_output = d_output_t->data<T>();
T *d_table = d_table_t->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*d_table_t);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
const auto &index_type = ids_t->type();
if (index_type == framework::proto::VarType::INT32) {
hipLaunchKernelGGL(( CEmbeddingGrad<T, int32_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
d_table, d_output, ids_t->data<int32_t>(), K, D, N, start_idx,
end_idx, limit);
} else if (index_type == framework::proto::VarType::INT64) {
hipLaunchKernelGGL(( CEmbeddingGrad<T, int64_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
d_table, d_output, ids_t->data<int64_t>(), K, D, N, start_idx,
end_idx, limit);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(c_embedding, ops::CEmbeddingCUDAKernel<float>,
ops::CEmbeddingCUDAKernel<double>,
ops::CEmbeddingCUDAKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(c_embedding_grad, ops::CEmbeddingGradCUDAKernel<float>,
ops::CEmbeddingGradCUDAKernel<double>,
ops::CEmbeddingGradCUDAKernel<plat::float16>);
| 02489c6e0497aa542c08a81f4c6051b72e1eb8ec.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/collective/c_embedding_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T, typename IndexT>
__global__ void CEmbedding(T *out, const T *table, const IndexT *ids,
const int rows, const int columns, const int64_t N,
const int64_t start_idx, const int64_t end_idx,
const int64_t limit) {
CUDA_KERNEL_LOOP(i, limit) {
size_t row = i / columns;
size_t col = i % columns;
auto id = ids[row];
if (id >= start_idx && id < end_idx) {
auto real_idx = id - start_idx;
PADDLE_ENFORCE(real_idx < N,
"The index is out of bounds, "
"please check whether the dimensions of index and "
"input meet the requirements. It should "
"be less than [%d], but received [%d]",
N, real_idx);
out[i] = table[real_idx * columns + col];
} else {
out[i] = static_cast<T>(0);
}
}
}
template <typename T, typename IndexT>
__global__ void CEmbeddingGrad(T *table, const T *output, const IndexT *ids,
const int rows, const int columns,
const int64_t N, const int64_t start_idx,
const int64_t end_idx, const int64_t limit) {
CUDA_KERNEL_LOOP(i, limit) {
size_t row = i / columns;
size_t col = i % columns;
auto id = ids[row];
if (id >= start_idx && id < end_idx) {
auto real_idx = id - start_idx;
paddle::platform::CudaAtomicAdd(&table[real_idx * columns + col],
output[i]);
}
}
}
template <typename T>
class CEmbeddingCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *table_t = context.Input<LoDTensor>("W");
auto *ids_t = context.Input<LoDTensor>("Ids");
auto *output_t = context.Output<LoDTensor>("Out");
const auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
const int64_t start_idx = context.Attr<int64_t>("start_index");
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t->numel();
const int64_t end_idx = start_idx + N;
auto *table = table_t->data<T>();
auto *output = output_t->mutable_data<T>(context.GetPlace());
auto limit = K * D;
int blocks = NumBlocks(limit);
int threads = kNumCUDAThreads;
const auto &index_type = ids_t->type();
if (index_type == framework::proto::VarType::INT32) {
CEmbedding<T, int32_t><<<blocks, threads, 0, dev_ctx.stream()>>>(
output, table, ids_t->data<int32_t>(), K, D, N, start_idx, end_idx,
limit);
} else if (index_type == framework::proto::VarType::INT64) {
CEmbedding<T, int64_t><<<blocks, threads, 0, dev_ctx.stream()>>>(
output, table, ids_t->data<int64_t>(), K, D, N, start_idx, end_idx,
limit);
}
}
};
template <typename T>
class CEmbeddingGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const auto &dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
const int64_t start_idx = context.Attr<int64_t>("start_index");
auto ids_t = context.Input<LoDTensor>("Ids");
auto d_output_t = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto d_table_t = context.Output<LoDTensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t->numel();
const int64_t end_idx = start_idx + N;
auto limit = K * D;
int blocks = NumBlocks(limit);
int threads = kNumCUDAThreads;
const T *d_output = d_output_t->data<T>();
T *d_table = d_table_t->mutable_data<T>(context.GetPlace());
auto t = framework::EigenVector<T>::Flatten(*d_table_t);
t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0));
const auto &index_type = ids_t->type();
if (index_type == framework::proto::VarType::INT32) {
CEmbeddingGrad<T, int32_t><<<blocks, threads, 0, dev_ctx.stream()>>>(
d_table, d_output, ids_t->data<int32_t>(), K, D, N, start_idx,
end_idx, limit);
} else if (index_type == framework::proto::VarType::INT64) {
CEmbeddingGrad<T, int64_t><<<blocks, threads, 0, dev_ctx.stream()>>>(
d_table, d_output, ids_t->data<int64_t>(), K, D, N, start_idx,
end_idx, limit);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(c_embedding, ops::CEmbeddingCUDAKernel<float>,
ops::CEmbeddingCUDAKernel<double>,
ops::CEmbeddingCUDAKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(c_embedding_grad, ops::CEmbeddingGradCUDAKernel<float>,
ops::CEmbeddingGradCUDAKernel<double>,
ops::CEmbeddingGradCUDAKernel<plat::float16>);
|
5d4e438085f52719f708d53bbb4effd8c7b71f03.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "jacobi_iteration.h"
__device__ void lock(int *mutex) {
while(atomicCAS(mutex, 0, 1) != 0);
return;
}
__device__ void unlock(int *mutex) {
atomicExch(mutex, 0);
return;
}
__global__ void jacobi_iteration_kernel_naive(float *A, float *B, unsigned int num_cols, unsigned int num_rows, float *x, double *ssd, int *mutex) {
__shared__ double s_ssd[THREAD_BLOCK_1D_SIZE];
int row = blockIdx.x * blockDim.x + threadIdx.x;
float old = x[row];
double sum = -A[row * num_cols + row] * x[row];
for(int j = 0; j < num_cols; j++) {
sum += A[row * num_cols + j] * x[j];
}
__syncthreads();
x[row] = (B[row] - sum) / A[row * num_cols + row];
double val_diff = x[row] - old;
s_ssd[threadIdx.x] = val_diff * val_diff;
__syncthreads();
for(int stride = blockDim.x >> 1; stride > 0; stride = stride >> 1) {
if(threadIdx.x < stride) {
s_ssd[threadIdx.x] += s_ssd[threadIdx.x + stride];
}
__syncthreads();
}
if(threadIdx.x == 0) {
lock(mutex);
*ssd += s_ssd[0];
unlock(mutex);
}
return;
}
__global__ void jacobi_iteration_kernel_optimized(float *A, float *B, unsigned int num_cols, unsigned int num_rows, float *x, double *ssd, int *mutex)
{
__shared__ double s_ssd[THREAD_BLOCK_1D_SIZE];
int row = blockIdx.x * blockDim.x + threadIdx.x;
float diag_val = A[row * num_cols + row];
double sum = -diag_val * x[row];
for(int j = 0; j < num_cols; j++) {
sum += A[j * num_cols + row] * x[j];
}
float new_val = (B[row] - sum) / diag_val;
double val_diff = new_val - x[row];
s_ssd[threadIdx.x] = val_diff * val_diff;
__syncthreads();
for(int stride = blockDim.x >> 1; stride > 0; stride = stride >> 1) {
if(threadIdx.x < stride) {
s_ssd[threadIdx.x] += s_ssd[threadIdx.x + stride];
}
__syncthreads();
}
if(threadIdx.x == 0) {
lock(mutex);
*ssd += s_ssd[0];
unlock(mutex);
}
x[row] = new_val;
return;
}
__global__ void row_to_col_major_kernel(float *A, unsigned int num_cols, unsigned int num_rows, float *col_major_A) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
col_major_A[col * num_rows + row] = A[row * num_cols + col];
}
| 5d4e438085f52719f708d53bbb4effd8c7b71f03.cu | #include "jacobi_iteration.h"
__device__ void lock(int *mutex) {
while(atomicCAS(mutex, 0, 1) != 0);
return;
}
__device__ void unlock(int *mutex) {
atomicExch(mutex, 0);
return;
}
__global__ void jacobi_iteration_kernel_naive(float *A, float *B, unsigned int num_cols, unsigned int num_rows, float *x, double *ssd, int *mutex) {
__shared__ double s_ssd[THREAD_BLOCK_1D_SIZE];
int row = blockIdx.x * blockDim.x + threadIdx.x;
float old = x[row];
double sum = -A[row * num_cols + row] * x[row];
for(int j = 0; j < num_cols; j++) {
sum += A[row * num_cols + j] * x[j];
}
__syncthreads();
x[row] = (B[row] - sum) / A[row * num_cols + row];
double val_diff = x[row] - old;
s_ssd[threadIdx.x] = val_diff * val_diff;
__syncthreads();
for(int stride = blockDim.x >> 1; stride > 0; stride = stride >> 1) {
if(threadIdx.x < stride) {
s_ssd[threadIdx.x] += s_ssd[threadIdx.x + stride];
}
__syncthreads();
}
if(threadIdx.x == 0) {
lock(mutex);
*ssd += s_ssd[0];
unlock(mutex);
}
return;
}
__global__ void jacobi_iteration_kernel_optimized(float *A, float *B, unsigned int num_cols, unsigned int num_rows, float *x, double *ssd, int *mutex)
{
__shared__ double s_ssd[THREAD_BLOCK_1D_SIZE];
int row = blockIdx.x * blockDim.x + threadIdx.x;
float diag_val = A[row * num_cols + row];
double sum = -diag_val * x[row];
for(int j = 0; j < num_cols; j++) {
sum += A[j * num_cols + row] * x[j];
}
float new_val = (B[row] - sum) / diag_val;
double val_diff = new_val - x[row];
s_ssd[threadIdx.x] = val_diff * val_diff;
__syncthreads();
for(int stride = blockDim.x >> 1; stride > 0; stride = stride >> 1) {
if(threadIdx.x < stride) {
s_ssd[threadIdx.x] += s_ssd[threadIdx.x + stride];
}
__syncthreads();
}
if(threadIdx.x == 0) {
lock(mutex);
*ssd += s_ssd[0];
unlock(mutex);
}
x[row] = new_val;
return;
}
__global__ void row_to_col_major_kernel(float *A, unsigned int num_cols, unsigned int num_rows, float *col_major_A) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
col_major_A[col * num_rows + row] = A[row * num_cols + col];
}
|
8f3611f62295faf0f1fde49367fcd07580c3cfb3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "voxelize.cuh"
// CUDA Global Memory variables
//__device__ size_t voxel_count = 0; // How many voxels did we count
//__device__ size_t triangles_seen_count = 0; // Sanity check
__constant__ uint32_t morton256_x[256];
__constant__ uint32_t morton256_y[256];
__constant__ uint32_t morton256_z[256];
// Encode morton code using LUT table
__device__ inline uint64_t mortonEncode_LUT(unsigned int x, unsigned int y, unsigned int z){
uint64_t answer = 0;
answer = morton256_z[(z >> 16) & 0xFF] |
morton256_y[(y >> 16) & 0xFF] |
morton256_x[(x >> 16) & 0xFF];
answer = answer << 48 |
morton256_z[(z >> 8) & 0xFF] |
morton256_y[(y >> 8) & 0xFF] |
morton256_x[(x >> 8) & 0xFF];
answer = answer << 24 |
morton256_z[(z)& 0xFF] |
morton256_y[(y)& 0xFF] |
morton256_x[(x)& 0xFF];
return answer;
}
// Possible optimization: buffer bitsets (for now: Disabled because too much overhead)
//struct bufferedBitSetter{
// unsigned int* voxel_table;
// size_t current_int_location;
// unsigned int current_mask;
//
// __device__ __inline__ bufferedBitSetter(unsigned int* voxel_table, size_t index) :
// voxel_table(voxel_table), current_mask(0) {
// current_int_location = int(index / 32.0f);
// }
//
// __device__ __inline__ void setBit(size_t index){
// size_t new_int_location = int(index / 32.0f);
// if (current_int_location != new_int_location){
// flush();
// current_int_location = new_int_location;
// }
// unsigned int bit_pos = 31 - (unsigned int)(int(index) % 32);
// current_mask = current_mask | (1 << bit_pos);
// }
//
// __device__ __inline__ void flush(){
// if (current_mask != 0){
// atomicOr(&(voxel_table[current_int_location]), current_mask);
// }
// }
//};
// Possible optimization: check bit before you set it - don't need to do atomic operation if it's already set to 1
// For now: overhead, so it seems
//__device__ __inline__ bool checkBit(unsigned int* voxel_table, size_t index){
// size_t int_location = index / size_t(32);
// unsigned int bit_pos = size_t(31) - (index % size_t(32)); // we count bit positions RtL, but array indices LtR
// return ((voxel_table[int_location]) & (1 << bit_pos));
//}
// Set a bit in the giant voxel table. This involves doing an atomic operation on a 32-bit word in memory.
// Blocking other threads writing to it for a very short time
__device__ __inline__ void setBit(unsigned int* voxel_table, size_t index){
size_t int_location = index / size_t(32);
unsigned int bit_pos = size_t(31) - (index % size_t(32)); // we count bit positions RtL, but array indices LtR
unsigned int mask = 1 << bit_pos;
atomicOr(&(voxel_table[int_location]), mask);
}
// Main triangle voxelization method
__global__ void voxelize_triangle(voxinfo info, float* triangle_data, unsigned int* voxel_table, bool morton_order){
size_t thread_id = threadIdx.x + blockIdx.x * blockDim.x;
size_t stride = blockDim.x * gridDim.x;
// Common variables used in the voxelization process
glm::vec3 delta_p(info.unit.x, info.unit.y, info.unit.z);
glm::vec3 c(0.0f, 0.0f, 0.0f); // critical point
glm::vec3 grid_max(info.gridsize.x - 1, info.gridsize.y - 1, info.gridsize.z - 1); // grid max (grid runs from 0 to gridsize-1)
while (thread_id < info.n_triangles){ // every thread works on specific triangles in its stride
size_t t = thread_id * 9; // triangle contains 9 vertices
// COMPUTE COMMON TRIANGLE PROPERTIES
// Move vertices to origin using bbox
glm::vec3 v0 = glm::vec3(triangle_data[t], triangle_data[t + 1], triangle_data[t + 2]) - info.bbox.min;
glm::vec3 v1 = glm::vec3(triangle_data[t + 3], triangle_data[t + 4], triangle_data[t + 5]) - info.bbox.min;
glm::vec3 v2 = glm::vec3(triangle_data[t + 6], triangle_data[t + 7], triangle_data[t + 8]) - info.bbox.min;
// Edge vectors
glm::vec3 e0 = v1 - v0;
glm::vec3 e1 = v2 - v1;
glm::vec3 e2 = v0 - v2;
// Normal vector pointing up from the triangle
glm::vec3 n = glm::normalize(glm::cross(e0, e1));
// COMPUTE TRIANGLE BBOX IN GRID
// Triangle bounding box in world coordinates is min(v0,v1,v2) and max(v0,v1,v2)
AABox<glm::vec3> t_bbox_world(glm::min(v0, glm::min(v1, v2)), glm::max(v0, glm::max(v1, v2)));
// Triangle bounding box in voxel grid coordinates is the world bounding box divided by the grid unit vector
AABox<glm::ivec3> t_bbox_grid;
t_bbox_grid.min = glm::clamp(t_bbox_world.min / info.unit, glm::vec3(0.0f, 0.0f, 0.0f), grid_max);
t_bbox_grid.max = glm::clamp(t_bbox_world.max / info.unit, glm::vec3(0.0f, 0.0f, 0.0f), grid_max);
// PREPARE PLANE TEST PROPERTIES
if (n.x > 0.0f) { c.x = info.unit.x; }
if (n.y > 0.0f) { c.y = info.unit.y; }
if (n.z > 0.0f) { c.z = info.unit.z; }
float d1 = glm::dot(n, (c - v0));
float d2 = glm::dot(n, ((delta_p - c) - v0));
// PREPARE PROJECTION TEST PROPERTIES
// XY plane
glm::vec2 n_xy_e0(-1.0f*e0.y, e0.x);
glm::vec2 n_xy_e1(-1.0f*e1.y, e1.x);
glm::vec2 n_xy_e2(-1.0f*e2.y, e2.x);
if (n.z < 0.0f) {
n_xy_e0 = -n_xy_e0;
n_xy_e1 = -n_xy_e1;
n_xy_e2 = -n_xy_e2;
}
float d_xy_e0 = (-1.0f * glm::dot(n_xy_e0, glm::vec2(v0.x, v0.y))) + glm::max(0.0f, info.unit.x*n_xy_e0[0]) + glm::max(0.0f, info.unit.y*n_xy_e0[1]);
float d_xy_e1 = (-1.0f * glm::dot(n_xy_e1, glm::vec2(v1.x, v1.y))) + glm::max(0.0f, info.unit.x*n_xy_e1[0]) + glm::max(0.0f, info.unit.y*n_xy_e1[1]);
float d_xy_e2 = (-1.0f * glm::dot(n_xy_e2, glm::vec2(v2.x, v2.y))) + glm::max(0.0f, info.unit.x*n_xy_e2[0]) + glm::max(0.0f, info.unit.y*n_xy_e2[1]);
// YZ plane
glm::vec2 n_yz_e0(-1.0f*e0.z, e0.y);
glm::vec2 n_yz_e1(-1.0f*e1.z, e1.y);
glm::vec2 n_yz_e2(-1.0f*e2.z, e2.y);
if (n.x < 0.0f) {
n_yz_e0 = -n_yz_e0;
n_yz_e1 = -n_yz_e1;
n_yz_e2 = -n_yz_e2;
}
float d_yz_e0 = (-1.0f * glm::dot(n_yz_e0, glm::vec2(v0.y, v0.z))) + glm::max(0.0f, info.unit.y*n_yz_e0[0]) + glm::max(0.0f, info.unit.z*n_yz_e0[1]);
float d_yz_e1 = (-1.0f * glm::dot(n_yz_e1, glm::vec2(v1.y, v1.z))) + glm::max(0.0f, info.unit.y*n_yz_e1[0]) + glm::max(0.0f, info.unit.z*n_yz_e1[1]);
float d_yz_e2 = (-1.0f * glm::dot(n_yz_e2, glm::vec2(v2.y, v2.z))) + glm::max(0.0f, info.unit.y*n_yz_e2[0]) + glm::max(0.0f, info.unit.z*n_yz_e2[1]);
// ZX plane
glm::vec2 n_zx_e0(-1.0f*e0.x, e0.z);
glm::vec2 n_zx_e1(-1.0f*e1.x, e1.z);
glm::vec2 n_zx_e2(-1.0f*e2.x, e2.z);
if (n.y < 0.0f) {
n_zx_e0 = -n_zx_e0;
n_zx_e1 = -n_zx_e1;
n_zx_e2 = -n_zx_e2;
}
float d_xz_e0 = (-1.0f * glm::dot(n_zx_e0, glm::vec2(v0.z, v0.x))) + glm::max(0.0f, info.unit.x*n_zx_e0[0]) + glm::max(0.0f, info.unit.z*n_zx_e0[1]);
float d_xz_e1 = (-1.0f * glm::dot(n_zx_e1, glm::vec2(v1.z, v1.x))) + glm::max(0.0f, info.unit.x*n_zx_e1[0]) + glm::max(0.0f, info.unit.z*n_zx_e1[1]);
float d_xz_e2 = (-1.0f * glm::dot(n_zx_e2, glm::vec2(v2.z, v2.x))) + glm::max(0.0f, info.unit.x*n_zx_e2[0]) + glm::max(0.0f, info.unit.z*n_zx_e2[1]);
// test possible grid boxes for overlap
for (int z = t_bbox_grid.min.z; z <= t_bbox_grid.max.z; z++){
for (int y = t_bbox_grid.min.y; y <= t_bbox_grid.max.y; y++){
for (int x = t_bbox_grid.min.x; x <= t_bbox_grid.max.x; x++){
// size_t location = x + (y*info.gridsize) + (z*info.gridsize*info.gridsize);
// if (checkBit(voxel_table, location)){ continue; }
// TRIANGLE PLANE THROUGH BOX TEST
glm::vec3 p(x*info.unit.x, y*info.unit.y, z*info.unit.z);
float nDOTp = glm::dot(n, p);
if ((nDOTp + d1) * (nDOTp + d2) > 0.0f){ continue; }
// PROJECTION TESTS
// XY
glm::vec2 p_xy(p.x, p.y);
if ((glm::dot(n_xy_e0, p_xy) + d_xy_e0) < 0.0f){ continue; }
if ((glm::dot(n_xy_e1, p_xy) + d_xy_e1) < 0.0f){ continue; }
if ((glm::dot(n_xy_e2, p_xy) + d_xy_e2) < 0.0f){ continue; }
// YZ
glm::vec2 p_yz(p.y, p.z);
if ((glm::dot(n_yz_e0, p_yz) + d_yz_e0) < 0.0f){ continue; }
if ((glm::dot(n_yz_e1, p_yz) + d_yz_e1) < 0.0f){ continue; }
if ((glm::dot(n_yz_e2, p_yz) + d_yz_e2) < 0.0f){ continue; }
// XZ
glm::vec2 p_zx(p.z, p.x);
if ((glm::dot(n_zx_e0, p_zx) + d_xz_e0) < 0.0f){ continue; }
if ((glm::dot(n_zx_e1, p_zx) + d_xz_e1) < 0.0f){ continue; }
if ((glm::dot(n_zx_e2, p_zx) + d_xz_e2) < 0.0f){ continue; }
//atomicAdd(&voxel_count, 1);
if (morton_order){
size_t location = mortonEncode_LUT(x, y, z);
setBit(voxel_table, location);
} else {
size_t location = x + (y*info.gridsize.y) + (z*info.gridsize.y*info.gridsize.z);
setBit(voxel_table, location);
}
continue;
}
}
}
// sanity check: atomically count triangles
//atomicAdd(&triangles_seen_count, 1);
thread_id += stride;
}
}
void voxelize(const voxinfo& v, float* triangle_data, unsigned int* vtable, bool useThrustPath, bool morton_code) {
float elapsedTime;
// These are only used when we're not using UNIFIED memory
unsigned int* dev_vtable; // DEVICE pointer to voxel_data
size_t vtable_size; // vtable size
// Create timers, set start time
hipEvent_t start_vox, stop_vox;
checkCudaErrors(hipEventCreate(&start_vox));
checkCudaErrors(hipEventCreate(&stop_vox));
// Copy morton LUT if we're encoding to morton
if (morton_code){
checkCudaErrors(hipMemcpyToSymbol(morton256_x, host_morton256_x, 256 * sizeof(uint32_t)));
checkCudaErrors(hipMemcpyToSymbol(morton256_y, host_morton256_y, 256 * sizeof(uint32_t)));
checkCudaErrors(hipMemcpyToSymbol(morton256_z, host_morton256_z, 256 * sizeof(uint32_t)));
}
// Estimate best block and grid size using CUDA Occupancy Calculator
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, voxelize_triangle, 0, 0);
// Round up according to array size
gridSize = (v.n_triangles + blockSize - 1) / blockSize;
if (useThrustPath) { // We're not using UNIFIED memory
vtable_size = ((size_t)v.gridsize.x * v.gridsize.y * v.gridsize.z) / (size_t) 8.0;
fprintf(stdout, "[Voxel Grid] Allocating %llu kB of DEVICE memory\n", size_t(vtable_size / 1024.0f));
checkCudaErrors(hipMalloc(&dev_vtable, vtable_size));
checkCudaErrors(hipMemset(dev_vtable, 0, vtable_size));
// Start voxelization
checkCudaErrors(hipEventRecord(start_vox, 0));
voxelize_triangle << <gridSize, blockSize >> > (v, triangle_data, dev_vtable, morton_code);
}
else { // UNIFIED MEMORY
checkCudaErrors(hipEventRecord(start_vox, 0));
voxelize_triangle << <gridSize, blockSize >> > (v, triangle_data, vtable, morton_code);
}
hipDeviceSynchronize();
checkCudaErrors(hipEventRecord(stop_vox, 0));
checkCudaErrors(hipEventSynchronize(stop_vox));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start_vox, stop_vox));
printf("[Voxelization] GPU time: %3.1f ms\n", elapsedTime);
// If we're not using UNIFIED memory, copy the voxel table back and free all
if (useThrustPath){
fprintf(stdout, "[Voxel Grid] Copying %llu kB to page-locked HOST memory\n", size_t(vtable_size / 1024.0f));
checkCudaErrors(hipMemcpy((void*)vtable, dev_vtable, vtable_size, hipMemcpyDefault));
fprintf(stdout, "[Voxel Grid] Freeing %llu kB of DEVICE memory\n", size_t(vtable_size / 1024.0f));
checkCudaErrors(hipFree(dev_vtable));
}
// SANITY CHECKS
//size_t t_seen, v_count;
//HANDLE_CUDA_ERROR(hipMemcpyFromSymbol((void*)&(t_seen),triangles_seen_count, sizeof(t_seen), 0, hipMemcpyDeviceToHost));
//HANDLE_CUDA_ERROR(hipMemcpyFromSymbol((void*)&(v_count), voxel_count, sizeof(v_count), 0, hipMemcpyDeviceToHost));
//printf("We've seen %llu triangles on the GPU \n", t_seen);
//printf("We've found %llu voxels on the GPU \n", v_count);
// Destroy timers
checkCudaErrors(hipEventDestroy(start_vox));
checkCudaErrors(hipEventDestroy(stop_vox));
}
| 8f3611f62295faf0f1fde49367fcd07580c3cfb3.cu | #include "voxelize.cuh"
// CUDA Global Memory variables
//__device__ size_t voxel_count = 0; // How many voxels did we count
//__device__ size_t triangles_seen_count = 0; // Sanity check
__constant__ uint32_t morton256_x[256];
__constant__ uint32_t morton256_y[256];
__constant__ uint32_t morton256_z[256];
// Encode morton code using LUT table
__device__ inline uint64_t mortonEncode_LUT(unsigned int x, unsigned int y, unsigned int z){
uint64_t answer = 0;
answer = morton256_z[(z >> 16) & 0xFF] |
morton256_y[(y >> 16) & 0xFF] |
morton256_x[(x >> 16) & 0xFF];
answer = answer << 48 |
morton256_z[(z >> 8) & 0xFF] |
morton256_y[(y >> 8) & 0xFF] |
morton256_x[(x >> 8) & 0xFF];
answer = answer << 24 |
morton256_z[(z)& 0xFF] |
morton256_y[(y)& 0xFF] |
morton256_x[(x)& 0xFF];
return answer;
}
// Possible optimization: buffer bitsets (for now: Disabled because too much overhead)
//struct bufferedBitSetter{
// unsigned int* voxel_table;
// size_t current_int_location;
// unsigned int current_mask;
//
// __device__ __inline__ bufferedBitSetter(unsigned int* voxel_table, size_t index) :
// voxel_table(voxel_table), current_mask(0) {
// current_int_location = int(index / 32.0f);
// }
//
// __device__ __inline__ void setBit(size_t index){
// size_t new_int_location = int(index / 32.0f);
// if (current_int_location != new_int_location){
// flush();
// current_int_location = new_int_location;
// }
// unsigned int bit_pos = 31 - (unsigned int)(int(index) % 32);
// current_mask = current_mask | (1 << bit_pos);
// }
//
// __device__ __inline__ void flush(){
// if (current_mask != 0){
// atomicOr(&(voxel_table[current_int_location]), current_mask);
// }
// }
//};
// Possible optimization: check bit before you set it - don't need to do atomic operation if it's already set to 1
// For now: overhead, so it seems
//__device__ __inline__ bool checkBit(unsigned int* voxel_table, size_t index){
// size_t int_location = index / size_t(32);
// unsigned int bit_pos = size_t(31) - (index % size_t(32)); // we count bit positions RtL, but array indices LtR
// return ((voxel_table[int_location]) & (1 << bit_pos));
//}
// Set a bit in the giant voxel table. This involves doing an atomic operation on a 32-bit word in memory.
// Blocking other threads writing to it for a very short time
__device__ __inline__ void setBit(unsigned int* voxel_table, size_t index){
size_t int_location = index / size_t(32);
unsigned int bit_pos = size_t(31) - (index % size_t(32)); // we count bit positions RtL, but array indices LtR
unsigned int mask = 1 << bit_pos;
atomicOr(&(voxel_table[int_location]), mask);
}
// Main triangle voxelization method
__global__ void voxelize_triangle(voxinfo info, float* triangle_data, unsigned int* voxel_table, bool morton_order){
size_t thread_id = threadIdx.x + blockIdx.x * blockDim.x;
size_t stride = blockDim.x * gridDim.x;
// Common variables used in the voxelization process
glm::vec3 delta_p(info.unit.x, info.unit.y, info.unit.z);
glm::vec3 c(0.0f, 0.0f, 0.0f); // critical point
glm::vec3 grid_max(info.gridsize.x - 1, info.gridsize.y - 1, info.gridsize.z - 1); // grid max (grid runs from 0 to gridsize-1)
while (thread_id < info.n_triangles){ // every thread works on specific triangles in its stride
size_t t = thread_id * 9; // triangle contains 9 vertices
// COMPUTE COMMON TRIANGLE PROPERTIES
// Move vertices to origin using bbox
glm::vec3 v0 = glm::vec3(triangle_data[t], triangle_data[t + 1], triangle_data[t + 2]) - info.bbox.min;
glm::vec3 v1 = glm::vec3(triangle_data[t + 3], triangle_data[t + 4], triangle_data[t + 5]) - info.bbox.min;
glm::vec3 v2 = glm::vec3(triangle_data[t + 6], triangle_data[t + 7], triangle_data[t + 8]) - info.bbox.min;
// Edge vectors
glm::vec3 e0 = v1 - v0;
glm::vec3 e1 = v2 - v1;
glm::vec3 e2 = v0 - v2;
// Normal vector pointing up from the triangle
glm::vec3 n = glm::normalize(glm::cross(e0, e1));
// COMPUTE TRIANGLE BBOX IN GRID
// Triangle bounding box in world coordinates is min(v0,v1,v2) and max(v0,v1,v2)
AABox<glm::vec3> t_bbox_world(glm::min(v0, glm::min(v1, v2)), glm::max(v0, glm::max(v1, v2)));
// Triangle bounding box in voxel grid coordinates is the world bounding box divided by the grid unit vector
AABox<glm::ivec3> t_bbox_grid;
t_bbox_grid.min = glm::clamp(t_bbox_world.min / info.unit, glm::vec3(0.0f, 0.0f, 0.0f), grid_max);
t_bbox_grid.max = glm::clamp(t_bbox_world.max / info.unit, glm::vec3(0.0f, 0.0f, 0.0f), grid_max);
// PREPARE PLANE TEST PROPERTIES
if (n.x > 0.0f) { c.x = info.unit.x; }
if (n.y > 0.0f) { c.y = info.unit.y; }
if (n.z > 0.0f) { c.z = info.unit.z; }
float d1 = glm::dot(n, (c - v0));
float d2 = glm::dot(n, ((delta_p - c) - v0));
// PREPARE PROJECTION TEST PROPERTIES
// XY plane
glm::vec2 n_xy_e0(-1.0f*e0.y, e0.x);
glm::vec2 n_xy_e1(-1.0f*e1.y, e1.x);
glm::vec2 n_xy_e2(-1.0f*e2.y, e2.x);
if (n.z < 0.0f) {
n_xy_e0 = -n_xy_e0;
n_xy_e1 = -n_xy_e1;
n_xy_e2 = -n_xy_e2;
}
float d_xy_e0 = (-1.0f * glm::dot(n_xy_e0, glm::vec2(v0.x, v0.y))) + glm::max(0.0f, info.unit.x*n_xy_e0[0]) + glm::max(0.0f, info.unit.y*n_xy_e0[1]);
float d_xy_e1 = (-1.0f * glm::dot(n_xy_e1, glm::vec2(v1.x, v1.y))) + glm::max(0.0f, info.unit.x*n_xy_e1[0]) + glm::max(0.0f, info.unit.y*n_xy_e1[1]);
float d_xy_e2 = (-1.0f * glm::dot(n_xy_e2, glm::vec2(v2.x, v2.y))) + glm::max(0.0f, info.unit.x*n_xy_e2[0]) + glm::max(0.0f, info.unit.y*n_xy_e2[1]);
// YZ plane
glm::vec2 n_yz_e0(-1.0f*e0.z, e0.y);
glm::vec2 n_yz_e1(-1.0f*e1.z, e1.y);
glm::vec2 n_yz_e2(-1.0f*e2.z, e2.y);
if (n.x < 0.0f) {
n_yz_e0 = -n_yz_e0;
n_yz_e1 = -n_yz_e1;
n_yz_e2 = -n_yz_e2;
}
float d_yz_e0 = (-1.0f * glm::dot(n_yz_e0, glm::vec2(v0.y, v0.z))) + glm::max(0.0f, info.unit.y*n_yz_e0[0]) + glm::max(0.0f, info.unit.z*n_yz_e0[1]);
float d_yz_e1 = (-1.0f * glm::dot(n_yz_e1, glm::vec2(v1.y, v1.z))) + glm::max(0.0f, info.unit.y*n_yz_e1[0]) + glm::max(0.0f, info.unit.z*n_yz_e1[1]);
float d_yz_e2 = (-1.0f * glm::dot(n_yz_e2, glm::vec2(v2.y, v2.z))) + glm::max(0.0f, info.unit.y*n_yz_e2[0]) + glm::max(0.0f, info.unit.z*n_yz_e2[1]);
// ZX plane
glm::vec2 n_zx_e0(-1.0f*e0.x, e0.z);
glm::vec2 n_zx_e1(-1.0f*e1.x, e1.z);
glm::vec2 n_zx_e2(-1.0f*e2.x, e2.z);
if (n.y < 0.0f) {
n_zx_e0 = -n_zx_e0;
n_zx_e1 = -n_zx_e1;
n_zx_e2 = -n_zx_e2;
}
float d_xz_e0 = (-1.0f * glm::dot(n_zx_e0, glm::vec2(v0.z, v0.x))) + glm::max(0.0f, info.unit.x*n_zx_e0[0]) + glm::max(0.0f, info.unit.z*n_zx_e0[1]);
float d_xz_e1 = (-1.0f * glm::dot(n_zx_e1, glm::vec2(v1.z, v1.x))) + glm::max(0.0f, info.unit.x*n_zx_e1[0]) + glm::max(0.0f, info.unit.z*n_zx_e1[1]);
float d_xz_e2 = (-1.0f * glm::dot(n_zx_e2, glm::vec2(v2.z, v2.x))) + glm::max(0.0f, info.unit.x*n_zx_e2[0]) + glm::max(0.0f, info.unit.z*n_zx_e2[1]);
// test possible grid boxes for overlap
for (int z = t_bbox_grid.min.z; z <= t_bbox_grid.max.z; z++){
for (int y = t_bbox_grid.min.y; y <= t_bbox_grid.max.y; y++){
for (int x = t_bbox_grid.min.x; x <= t_bbox_grid.max.x; x++){
// size_t location = x + (y*info.gridsize) + (z*info.gridsize*info.gridsize);
// if (checkBit(voxel_table, location)){ continue; }
// TRIANGLE PLANE THROUGH BOX TEST
glm::vec3 p(x*info.unit.x, y*info.unit.y, z*info.unit.z);
float nDOTp = glm::dot(n, p);
if ((nDOTp + d1) * (nDOTp + d2) > 0.0f){ continue; }
// PROJECTION TESTS
// XY
glm::vec2 p_xy(p.x, p.y);
if ((glm::dot(n_xy_e0, p_xy) + d_xy_e0) < 0.0f){ continue; }
if ((glm::dot(n_xy_e1, p_xy) + d_xy_e1) < 0.0f){ continue; }
if ((glm::dot(n_xy_e2, p_xy) + d_xy_e2) < 0.0f){ continue; }
// YZ
glm::vec2 p_yz(p.y, p.z);
if ((glm::dot(n_yz_e0, p_yz) + d_yz_e0) < 0.0f){ continue; }
if ((glm::dot(n_yz_e1, p_yz) + d_yz_e1) < 0.0f){ continue; }
if ((glm::dot(n_yz_e2, p_yz) + d_yz_e2) < 0.0f){ continue; }
// XZ
glm::vec2 p_zx(p.z, p.x);
if ((glm::dot(n_zx_e0, p_zx) + d_xz_e0) < 0.0f){ continue; }
if ((glm::dot(n_zx_e1, p_zx) + d_xz_e1) < 0.0f){ continue; }
if ((glm::dot(n_zx_e2, p_zx) + d_xz_e2) < 0.0f){ continue; }
//atomicAdd(&voxel_count, 1);
if (morton_order){
size_t location = mortonEncode_LUT(x, y, z);
setBit(voxel_table, location);
} else {
size_t location = x + (y*info.gridsize.y) + (z*info.gridsize.y*info.gridsize.z);
setBit(voxel_table, location);
}
continue;
}
}
}
// sanity check: atomically count triangles
//atomicAdd(&triangles_seen_count, 1);
thread_id += stride;
}
}
void voxelize(const voxinfo& v, float* triangle_data, unsigned int* vtable, bool useThrustPath, bool morton_code) {
float elapsedTime;
// These are only used when we're not using UNIFIED memory
unsigned int* dev_vtable; // DEVICE pointer to voxel_data
size_t vtable_size; // vtable size
// Create timers, set start time
cudaEvent_t start_vox, stop_vox;
checkCudaErrors(cudaEventCreate(&start_vox));
checkCudaErrors(cudaEventCreate(&stop_vox));
// Copy morton LUT if we're encoding to morton
if (morton_code){
checkCudaErrors(cudaMemcpyToSymbol(morton256_x, host_morton256_x, 256 * sizeof(uint32_t)));
checkCudaErrors(cudaMemcpyToSymbol(morton256_y, host_morton256_y, 256 * sizeof(uint32_t)));
checkCudaErrors(cudaMemcpyToSymbol(morton256_z, host_morton256_z, 256 * sizeof(uint32_t)));
}
// Estimate best block and grid size using CUDA Occupancy Calculator
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, voxelize_triangle, 0, 0);
// Round up according to array size
gridSize = (v.n_triangles + blockSize - 1) / blockSize;
if (useThrustPath) { // We're not using UNIFIED memory
vtable_size = ((size_t)v.gridsize.x * v.gridsize.y * v.gridsize.z) / (size_t) 8.0;
fprintf(stdout, "[Voxel Grid] Allocating %llu kB of DEVICE memory\n", size_t(vtable_size / 1024.0f));
checkCudaErrors(cudaMalloc(&dev_vtable, vtable_size));
checkCudaErrors(cudaMemset(dev_vtable, 0, vtable_size));
// Start voxelization
checkCudaErrors(cudaEventRecord(start_vox, 0));
voxelize_triangle << <gridSize, blockSize >> > (v, triangle_data, dev_vtable, morton_code);
}
else { // UNIFIED MEMORY
checkCudaErrors(cudaEventRecord(start_vox, 0));
voxelize_triangle << <gridSize, blockSize >> > (v, triangle_data, vtable, morton_code);
}
cudaDeviceSynchronize();
checkCudaErrors(cudaEventRecord(stop_vox, 0));
checkCudaErrors(cudaEventSynchronize(stop_vox));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start_vox, stop_vox));
printf("[Voxelization] GPU time: %3.1f ms\n", elapsedTime);
// If we're not using UNIFIED memory, copy the voxel table back and free all
if (useThrustPath){
fprintf(stdout, "[Voxel Grid] Copying %llu kB to page-locked HOST memory\n", size_t(vtable_size / 1024.0f));
checkCudaErrors(cudaMemcpy((void*)vtable, dev_vtable, vtable_size, cudaMemcpyDefault));
fprintf(stdout, "[Voxel Grid] Freeing %llu kB of DEVICE memory\n", size_t(vtable_size / 1024.0f));
checkCudaErrors(cudaFree(dev_vtable));
}
// SANITY CHECKS
//size_t t_seen, v_count;
//HANDLE_CUDA_ERROR(cudaMemcpyFromSymbol((void*)&(t_seen),triangles_seen_count, sizeof(t_seen), 0, cudaMemcpyDeviceToHost));
//HANDLE_CUDA_ERROR(cudaMemcpyFromSymbol((void*)&(v_count), voxel_count, sizeof(v_count), 0, cudaMemcpyDeviceToHost));
//printf("We've seen %llu triangles on the GPU \n", t_seen);
//printf("We've found %llu voxels on the GPU \n", v_count);
// Destroy timers
checkCudaErrors(cudaEventDestroy(start_vox));
checkCudaErrors(cudaEventDestroy(stop_vox));
}
|
cc67c8f8fba19c7ff9efd91305c84e7d8c1e4045.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void mean_array_kernel(float *src, int size, float alpha, float *avg)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i >= size) return;
avg[i] = avg[i] * (1 - alpha) + src[i] * alpha;
src[i] = avg[i];
} | cc67c8f8fba19c7ff9efd91305c84e7d8c1e4045.cu | #include "includes.h"
__global__ void mean_array_kernel(float *src, int size, float alpha, float *avg)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i >= size) return;
avg[i] = avg[i] * (1 - alpha) + src[i] * alpha;
src[i] = avg[i];
} |
c0af4519696e3185ba22709ce36bf1032edd43d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2012-2017 VideoStitch SAS
// Copyright (c) 2018 stitchEm
#include "parallax/flowSequence.hpp"
#include "./kernels/patchDifferenceFunction.cu"
#include "backend/common/vectorOps.hpp"
#include "backend/cuda/deviceBuffer.hpp"
#include "backend/cuda/deviceStream.hpp"
#include "cuda/error.hpp"
#include "cuda/util.hpp"
#include "gpu/image/sampling.hpp"
#include "gpu/image/imageOps.hpp"
#include "gpu/image/blur.hpp"
#include "gpu/stream.hpp"
#include <string.h>
namespace VideoStitch {
namespace Core {
#define WARPER_BLOCK_SIZE_X 16
#define WARPER_BLOCK_SIZE_Y 16
#define WARPER_BLOCK_SIZE_Z 16
__global__ void weightedAvgFlowWarpKernel(const int2 size, const int frameId, const int frameCount,
const float sigmaTime, const float* frames, const float2* inputFlows,
float2* outputFlow) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < size.x && y < size.y) {
const int index = y * size.x + x;
float2 avgFlow = make_float2(0, 0);
float totalWeight = 0.0;
for (int t = 0; t < frameCount; t++)
if (frames[t] >= 0) {
// Now calculate the distance of time
float distTime = float(frames[t] - frames[frameId]) / frameCount;
float weightTime = exp(-abs(distTime * distTime * sigmaTime));
const float2 inputFlow = inputFlows[t * size.x * size.y + index];
if (inputFlow.x != INVALID_FLOW_VALUE) {
avgFlow += inputFlow * weightTime;
totalWeight += weightTime;
}
}
if (totalWeight > 0) {
outputFlow[index] = avgFlow / totalWeight;
} else {
outputFlow[index] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE);
}
}
}
Status FlowSequence::regularizeFlowTemporally(const std::string& name, const frameid_t frame, const int2 size,
const int2 offset, GPU::Buffer<float2> flow, GPU::Stream gpuStream) {
// Cache the input flow
FAIL_RETURN(cacheBuffer<float2>(frame, name, size, offset, flow, gpuStream));
TypedCached<float2>* cache = dynamic_cast<TypedCached<float2>*>(getFlowCachedBuffer(name).get());
if (!cache) {
return {Origin::ImageFlow, ErrType::InvalidConfiguration, "FlowSequence::cache is not valid"};
}
const int frameIndex = getFrameIndex(frame);
if (frameIndex < 0) {
return {Origin::ImageFlow, ErrType::InvalidConfiguration, "FlowSequence::frameindex < 0"};
}
// Now compute the weighted average flow
// Now make the flow as stable as possible from previous computation
hipStream_t stream = gpuStream.get();
dim3 dimBlock(WARPER_BLOCK_SIZE_X, WARPER_BLOCK_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size.x, dimBlock.y), 1);
hipLaunchKernelGGL(( weightedAvgFlowWarpKernel), dim3(dimGrid), dim3(dimBlock), 0, stream, size, frameIndex, (int)getFrames().numElements(), 5,
getFrames().get(), cache->getBuffer().get(), flow.get());
return CUDA_STATUS;
}
} // namespace Core
} // namespace VideoStitch
| c0af4519696e3185ba22709ce36bf1032edd43d7.cu | // Copyright (c) 2012-2017 VideoStitch SAS
// Copyright (c) 2018 stitchEm
#include "parallax/flowSequence.hpp"
#include "./kernels/patchDifferenceFunction.cu"
#include "backend/common/vectorOps.hpp"
#include "backend/cuda/deviceBuffer.hpp"
#include "backend/cuda/deviceStream.hpp"
#include "cuda/error.hpp"
#include "cuda/util.hpp"
#include "gpu/image/sampling.hpp"
#include "gpu/image/imageOps.hpp"
#include "gpu/image/blur.hpp"
#include "gpu/stream.hpp"
#include <string.h>
namespace VideoStitch {
namespace Core {
#define WARPER_BLOCK_SIZE_X 16
#define WARPER_BLOCK_SIZE_Y 16
#define WARPER_BLOCK_SIZE_Z 16
__global__ void weightedAvgFlowWarpKernel(const int2 size, const int frameId, const int frameCount,
const float sigmaTime, const float* frames, const float2* inputFlows,
float2* outputFlow) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < size.x && y < size.y) {
const int index = y * size.x + x;
float2 avgFlow = make_float2(0, 0);
float totalWeight = 0.0;
for (int t = 0; t < frameCount; t++)
if (frames[t] >= 0) {
// Now calculate the distance of time
float distTime = float(frames[t] - frames[frameId]) / frameCount;
float weightTime = exp(-abs(distTime * distTime * sigmaTime));
const float2 inputFlow = inputFlows[t * size.x * size.y + index];
if (inputFlow.x != INVALID_FLOW_VALUE) {
avgFlow += inputFlow * weightTime;
totalWeight += weightTime;
}
}
if (totalWeight > 0) {
outputFlow[index] = avgFlow / totalWeight;
} else {
outputFlow[index] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE);
}
}
}
Status FlowSequence::regularizeFlowTemporally(const std::string& name, const frameid_t frame, const int2 size,
const int2 offset, GPU::Buffer<float2> flow, GPU::Stream gpuStream) {
// Cache the input flow
FAIL_RETURN(cacheBuffer<float2>(frame, name, size, offset, flow, gpuStream));
TypedCached<float2>* cache = dynamic_cast<TypedCached<float2>*>(getFlowCachedBuffer(name).get());
if (!cache) {
return {Origin::ImageFlow, ErrType::InvalidConfiguration, "FlowSequence::cache is not valid"};
}
const int frameIndex = getFrameIndex(frame);
if (frameIndex < 0) {
return {Origin::ImageFlow, ErrType::InvalidConfiguration, "FlowSequence::frameindex < 0"};
}
// Now compute the weighted average flow
// Now make the flow as stable as possible from previous computation
cudaStream_t stream = gpuStream.get();
dim3 dimBlock(WARPER_BLOCK_SIZE_X, WARPER_BLOCK_SIZE_Y, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size.x, dimBlock.y), 1);
weightedAvgFlowWarpKernel<<<dimGrid, dimBlock, 0, stream>>>(size, frameIndex, (int)getFrames().numElements(), 5,
getFrames().get(), cache->getBuffer().get(), flow.get());
return CUDA_STATUS;
}
} // namespace Core
} // namespace VideoStitch
|
ad0764ecc8a566cd1bea6397835728b50eb677bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "Logging.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
void fatal(const char *format, ...)
{
va_list args;
fprintf(stderr, "Erreur fatale\n");
va_start(args, format);
vfprintf(stderr, format, args);
va_end(args);
putc('\n', stderr);
exit(1);
}
__host__
void cudaCheck(hipError_t code) {
if(code != hipSuccess) {
fatal("Cuda error: %s.\n", hipGetErrorString(code));
}
} | ad0764ecc8a566cd1bea6397835728b50eb677bd.cu | #include "Logging.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
void fatal(const char *format, ...)
{
va_list args;
fprintf(stderr, "Erreur fatale\n");
va_start(args, format);
vfprintf(stderr, format, args);
va_end(args);
putc('\n', stderr);
exit(1);
}
__host__
void cudaCheck(cudaError_t code) {
if(code != cudaSuccess) {
fatal("Cuda error: %s.\n", cudaGetErrorString(code));
}
} |
ab3d98be7b3fac4f8f9868eaa831b0caec0a2453.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <string>
#include <ctime>
#include <cstdio>
#include "device_kernel_wrapper.h"
#include "datatypes.h"
#include "common.h"
#include "memory_scheduler.h"
__global__
void device_chain_tiled(
return_dt *ret, int n, const anchor_dt *a,
control_dt *control, score_dt **max_tracker, parent_dt **j_tracker,
int max_dist_x, int max_dist_y, int bw);
__host__
void device_chain_kernel_wrapper(
std::vector<control_dt> &cont,
std::vector<anchor_dt> &arg,
std::vector<return_dt> &ret,
int max_dist_x, int max_dist_y, int bw)
{
auto batch_count = cont.size() / PE_NUM;
control_dt *h_control;
anchor_dt *h_arg;
return_dt *h_ret;
hipHostMalloc(&h_control, cont.size() * sizeof(control_dt));
hipHostMalloc(&h_arg, arg.size() * sizeof(anchor_dt));
hipHostMalloc(&h_ret, batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt));
ret.resize(batch_count * TILE_SIZE * PE_NUM);
memcpy(h_control, cont.data(), cont.size() * sizeof(control_dt));
memcpy(h_arg, arg.data(), arg.size() * sizeof(anchor_dt));
struct timespec start, end;
clock_gettime(CLOCK_BOOTTIME, &start);
control_dt *d_control;
anchor_dt *d_arg;
return_dt *d_ret;
// presistent storage
score_dt *d_max_tracker[PE_NUM];
parent_dt *d_j_tracker[PE_NUM];
score_dt **d_d_max_tracker;
parent_dt **d_d_j_tracker;
hipMalloc(&d_control, cont.size() * sizeof(control_dt));
hipMalloc(&d_arg, arg.size() * sizeof(anchor_dt));
hipMalloc(&d_ret, batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt));
for (auto pe = 0; pe < PE_NUM; pe++) {
hipMalloc(&d_max_tracker[pe], BACK_SEARCH_COUNT_GPU * sizeof(score_dt));
hipMalloc(&d_j_tracker[pe], BACK_SEARCH_COUNT_GPU * sizeof(parent_dt));
}
hipMalloc(&d_d_max_tracker, PE_NUM * sizeof(score_dt *));
hipMalloc(&d_d_j_tracker, PE_NUM * sizeof(parent_dt *));
hipMemcpy(d_control, h_control,
cont.size() * sizeof(control_dt), hipMemcpyHostToDevice);
hipMemcpy(d_arg, h_arg,
arg.size() * sizeof(anchor_dt), hipMemcpyHostToDevice);
hipMemcpy(d_d_max_tracker, d_max_tracker,
PE_NUM * sizeof(score_dt *), hipMemcpyHostToDevice);
hipMemcpy(d_d_j_tracker, d_j_tracker,
PE_NUM * sizeof(parent_dt *), hipMemcpyHostToDevice);
hipStream_t streams[STREAM_NUM];
for (auto i = 0; i < STREAM_NUM; i++) {
hipStreamCreate(&streams[i]);
}
clock_gettime(CLOCK_BOOTTIME, &end);
printf(" ***** kernel took %f seconds to transfer in data\n",
( end.tv_sec - start.tv_sec ) + ( end.tv_nsec - start.tv_nsec ) / 1E9);
for (auto batch = 0; batch < batch_count; batch++) {
for (auto st = 0; st < STREAM_NUM; st++) {
hipLaunchKernelGGL(( device_chain_tiled), dim3(BLOCK_NUM),
dim3(THREAD_FACTOR * BACK_SEARCH_COUNT_GPU),
0, streams[st],
d_ret + batch * PE_NUM * TILE_SIZE +
st * BLOCK_NUM * THREAD_FACTOR * TILE_SIZE,
TILE_SIZE,
d_arg + batch * PE_NUM * TILE_SIZE_ACTUAL +
st * BLOCK_NUM * THREAD_FACTOR * TILE_SIZE_ACTUAL,
d_control + batch * PE_NUM +
st * BLOCK_NUM * THREAD_FACTOR,
d_d_max_tracker + st * BLOCK_NUM * THREAD_FACTOR,
d_d_j_tracker + st * BLOCK_NUM * THREAD_FACTOR,
max_dist_x, max_dist_y, bw);
}
}
for (auto i = 0; i < STREAM_NUM; i++) {
hipStreamSynchronize(streams[i]);
}
clock_gettime(CLOCK_BOOTTIME, &end);
printf(" ***** kernel took %f seconds to transfer in and execute\n",
( end.tv_sec - start.tv_sec ) + ( end.tv_nsec - start.tv_nsec ) / 1E9);
hipMemcpy(h_ret, d_ret,
batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt),
hipMemcpyDeviceToHost);
clock_gettime(CLOCK_BOOTTIME, &end);
printf(" ***** kernel took %f seconds for end-to-end\n",
( end.tv_sec - start.tv_sec ) + ( end.tv_nsec - start.tv_nsec ) / 1E9);
memcpy(ret.data(), h_ret, batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt));
}
| ab3d98be7b3fac4f8f9868eaa831b0caec0a2453.cu | #include <vector>
#include <string>
#include <ctime>
#include <cstdio>
#include "device_kernel_wrapper.h"
#include "datatypes.h"
#include "common.h"
#include "memory_scheduler.h"
__global__
void device_chain_tiled(
return_dt *ret, int n, const anchor_dt *a,
control_dt *control, score_dt **max_tracker, parent_dt **j_tracker,
int max_dist_x, int max_dist_y, int bw);
__host__
void device_chain_kernel_wrapper(
std::vector<control_dt> &cont,
std::vector<anchor_dt> &arg,
std::vector<return_dt> &ret,
int max_dist_x, int max_dist_y, int bw)
{
auto batch_count = cont.size() / PE_NUM;
control_dt *h_control;
anchor_dt *h_arg;
return_dt *h_ret;
cudaMallocHost(&h_control, cont.size() * sizeof(control_dt));
cudaMallocHost(&h_arg, arg.size() * sizeof(anchor_dt));
cudaMallocHost(&h_ret, batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt));
ret.resize(batch_count * TILE_SIZE * PE_NUM);
memcpy(h_control, cont.data(), cont.size() * sizeof(control_dt));
memcpy(h_arg, arg.data(), arg.size() * sizeof(anchor_dt));
struct timespec start, end;
clock_gettime(CLOCK_BOOTTIME, &start);
control_dt *d_control;
anchor_dt *d_arg;
return_dt *d_ret;
// presistent storage
score_dt *d_max_tracker[PE_NUM];
parent_dt *d_j_tracker[PE_NUM];
score_dt **d_d_max_tracker;
parent_dt **d_d_j_tracker;
cudaMalloc(&d_control, cont.size() * sizeof(control_dt));
cudaMalloc(&d_arg, arg.size() * sizeof(anchor_dt));
cudaMalloc(&d_ret, batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt));
for (auto pe = 0; pe < PE_NUM; pe++) {
cudaMalloc(&d_max_tracker[pe], BACK_SEARCH_COUNT_GPU * sizeof(score_dt));
cudaMalloc(&d_j_tracker[pe], BACK_SEARCH_COUNT_GPU * sizeof(parent_dt));
}
cudaMalloc(&d_d_max_tracker, PE_NUM * sizeof(score_dt *));
cudaMalloc(&d_d_j_tracker, PE_NUM * sizeof(parent_dt *));
cudaMemcpy(d_control, h_control,
cont.size() * sizeof(control_dt), cudaMemcpyHostToDevice);
cudaMemcpy(d_arg, h_arg,
arg.size() * sizeof(anchor_dt), cudaMemcpyHostToDevice);
cudaMemcpy(d_d_max_tracker, d_max_tracker,
PE_NUM * sizeof(score_dt *), cudaMemcpyHostToDevice);
cudaMemcpy(d_d_j_tracker, d_j_tracker,
PE_NUM * sizeof(parent_dt *), cudaMemcpyHostToDevice);
cudaStream_t streams[STREAM_NUM];
for (auto i = 0; i < STREAM_NUM; i++) {
cudaStreamCreate(&streams[i]);
}
clock_gettime(CLOCK_BOOTTIME, &end);
printf(" ***** kernel took %f seconds to transfer in data\n",
( end.tv_sec - start.tv_sec ) + ( end.tv_nsec - start.tv_nsec ) / 1E9);
for (auto batch = 0; batch < batch_count; batch++) {
for (auto st = 0; st < STREAM_NUM; st++) {
device_chain_tiled<<<BLOCK_NUM,
THREAD_FACTOR * BACK_SEARCH_COUNT_GPU,
0, streams[st]>>>(
d_ret + batch * PE_NUM * TILE_SIZE +
st * BLOCK_NUM * THREAD_FACTOR * TILE_SIZE,
TILE_SIZE,
d_arg + batch * PE_NUM * TILE_SIZE_ACTUAL +
st * BLOCK_NUM * THREAD_FACTOR * TILE_SIZE_ACTUAL,
d_control + batch * PE_NUM +
st * BLOCK_NUM * THREAD_FACTOR,
d_d_max_tracker + st * BLOCK_NUM * THREAD_FACTOR,
d_d_j_tracker + st * BLOCK_NUM * THREAD_FACTOR,
max_dist_x, max_dist_y, bw);
}
}
for (auto i = 0; i < STREAM_NUM; i++) {
cudaStreamSynchronize(streams[i]);
}
clock_gettime(CLOCK_BOOTTIME, &end);
printf(" ***** kernel took %f seconds to transfer in and execute\n",
( end.tv_sec - start.tv_sec ) + ( end.tv_nsec - start.tv_nsec ) / 1E9);
cudaMemcpy(h_ret, d_ret,
batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt),
cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_BOOTTIME, &end);
printf(" ***** kernel took %f seconds for end-to-end\n",
( end.tv_sec - start.tv_sec ) + ( end.tv_nsec - start.tv_nsec ) / 1E9);
memcpy(ret.data(), h_ret, batch_count * TILE_SIZE * PE_NUM * sizeof(return_dt));
}
|
4be3005aa8af30f389f83bf7bc08a73f2980ae8c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
//-----------------------------------------------------------------------------
// GpuConstantsPackage: a struct to hold many constants (including pointers
// to allocated memory on the device) that can be
// uploaded all at once. Placing this in the "constants
// cache" is a convenient and performant way of handling
// constant information on the GPU.
//-----------------------------------------------------------------------------
struct GpuConstantsPackage {
int nparticle;
int* partType;
float* partX;
float* partY;
float* partZ;
float* partFrcX;
float* partFrcY;
float* partFrcZ;
float* partQ;
float* Etot;
};
typedef struct GpuConstantsPackage cribSheet;
// This device constant is available to all functions in this CUDA unit
__device__ __constant__ cribSheet cSh;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored int data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredInt {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
int* HostData; // Pointer to allocated memory on the host
int* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredInt gpuInt;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored fp32 data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredFloat {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
float* HostData; // Pointer to allocated memory on the host
float* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredFloat gpuFloat;
//-----------------------------------------------------------------------------
// ParticleSimulator: run a rudimentary simulation of particles
//-----------------------------------------------------------------------------
__global__ void ParticleSimulator()
{
int i;
// Each thread must have __shared__ memory, visible by other threads,
// to store information about one particle it has been assigned to read
// and manage. Each array has as many elements as there are threads in
// the block. If the launch parameters were to change, all of these
// array sizes should change as well.
__shared__ volatile float pX[512], pY[512], pZ[512], pQ[512];
__shared__ volatile float tX[512], tY[512], tZ[512], tQ[512];
__shared__ volatile float sfpX[512], sfpY[512], sfpZ[512];
__shared__ volatile float sftX[512], sftY[512], sftZ[512];
// Treat warps as the irreducible units, not threads. A warp is a group
// of 32 threads. Threads 0-31 are, by convention, warp 0. Threads
// 32-63 are warp 1, and so on. The thread's warp and lane within the
// warp become relevant to its task. Every thread will store these two
// pieces of information in its registers for the duration of the kernel.
int warpIdx = threadIdx.x / 32;
int tgx = (threadIdx.x & 31);
// Initialize forces within the same kernel. Because this kernel
// runs in only one block,
i = threadIdx.x;
while (i < cSh.nparticle) {
cSh.partFrcX[i] = (float)0.0;
cSh.partFrcY[i] = (float)0.0;
cSh.partFrcZ[i] = (float)0.0;
i += blockDim.x;
}
__syncthreads();
// A more advanced way, using L1 __shared__ memory
float qq = (float)0.0;
int nstripes = (cSh.nparticle + 31) / 32;
int bpos = nstripes - warpIdx - 1;
while (bpos >= 0) {
// Read 32 particles into memory, accumulate the forces on them,
// then write the results back to the device. If the thread
// would read a particle beyond the system's size, then set its
// position as dummy numbers which will not do terrible things
// if they get into calculations with real particles.
//
// NOTE HERE and BELOW: threadIdx.x = 32*warpIdx + tgx
//
// See note above... each thread is operating within a stripe of
// the problem. Accessing index threadIdx.x is integral to that.
int prtclIdx = 32*bpos + tgx;
if (prtclIdx < cSh.nparticle) {
pX[threadIdx.x] = cSh.partX[prtclIdx];
pY[threadIdx.x] = cSh.partY[prtclIdx];
pZ[threadIdx.x] = cSh.partZ[prtclIdx];
pQ[threadIdx.x] = cSh.partQ[prtclIdx];
}
else {
pX[threadIdx.x] = (float)10000.0 + (float)(prtclIdx);
pY[threadIdx.x] = (float)10000.0 + (float)(prtclIdx);
pZ[threadIdx.x] = (float)10000.0 + (float)(prtclIdx);
pQ[threadIdx.x] = (float)0.0;
}
// Loop over all particle pairs in the lower half triangle as before
int tpos = 0;
while (tpos <= bpos) {
// Initialize particles as in the outer loop
int prtclIdx = 32*tpos + tgx;
if (prtclIdx < cSh.nparticle) {
tX[threadIdx.x] = cSh.partX[prtclIdx];
tY[threadIdx.x] = cSh.partY[prtclIdx];
tZ[threadIdx.x] = cSh.partZ[prtclIdx];
tQ[threadIdx.x] = cSh.partQ[prtclIdx];
}
else {
// The offsets for particle positions must run along a different
// (parallel, but distinct) line so that not even dummy particles
// can ever occupy the same positions and cause a divide-by-zero.
// As before, the charge of the dummy particles is zero.
tX[threadIdx.x] = (float)10100.0 + (float)(prtclIdx);
tY[threadIdx.x] = (float)10200.0 + (float)(prtclIdx);
tZ[threadIdx.x] = (float)10300.0 + (float)(prtclIdx);
tQ[threadIdx.x] = (float)0.0;
}
// Initialize tile force accumulators
sfpX[threadIdx.x] = (float)0.0;
sfpY[threadIdx.x] = (float)0.0;
sfpZ[threadIdx.x] = (float)0.0;
sftX[threadIdx.x] = (float)0.0;
sftY[threadIdx.x] = (float)0.0;
sftZ[threadIdx.x] = (float)0.0;
// The tile is now ready. Compute 32 x 32 interactions.
// Tiles lying on the diagonal of the interaction matrix
// will do full work for half the results.
int imin = (bpos == tpos);
float anti2xCountingFactor = (bpos == tpos) ? (float)0.5 : (float)1.0;
for (i = imin; i < 32; i++) {
int j = tgx + i;
// Wrap j back so that it stays within the range [0, 32)
j -= (j >= 32) * 32;
// The value in position threadIdx.x of each __shared__
// memory array will now be compared to one of 32 other
// values from the array, in the range:
// [ (threadIdx.x / 32) * 32 :: ((threadIdx.x + 31) / 32) * 32 )
float dx = tX[warpIdx*32 + j] - pX[threadIdx.x];
float dy = tY[warpIdx*32 + j] - pY[threadIdx.x];
float dz = tZ[warpIdx*32 + j] - pZ[threadIdx.x];
float r2 = dx*dx + dy*dy + dz*dz;
float r = sqrt(r2);
float qfac = anti2xCountingFactor *
tQ[warpIdx*32 + j] * pQ[threadIdx.x];
qq += qfac / sqrt(r2);
// This works because threadIdx.x is the only thread that will
// ever contribute to sfpX, and the tile is arranged so that,
// for a synchronized warp, only one thread will have a
// contribution to make to each element of sftX.
float fmag = qfac / (r2 * r);
sfpX[threadIdx.x ] += dx * fmag;
sftX[warpIdx*32 + j] -= dx * fmag;
sfpY[threadIdx.x ] += dy * fmag;
sftY[warpIdx*32 + j] -= dy * fmag;
sfpZ[threadIdx.x ] += dz * fmag;
sftZ[warpIdx*32 + j] -= dz * fmag;
__syncwarp();
}
// Contribute the tile force accumulations atomically to global memory
// (DRAM). This is only about 2x slower than atomic accumulation to
// __shared__. Accumulating things like this atomically to __shared__
// would make the kernel run only about 30% slower than accumulating
// them in an unsafe manner, willy-nilly. Fast atomics to global are
// a tremendous accomplishment by NVIDIA engineers!
//
// Note, the correspondence between 32*bpos + tgx or 32*tpos + tgx
// and 32*warpIdx + tgx. 32*warpIdx + tgx is, again, threadIdx.x.
atomicAdd(&cSh.partFrcX[32*bpos + tgx], sfpX[threadIdx.x]);
atomicAdd(&cSh.partFrcY[32*bpos + tgx], sfpY[threadIdx.x]);
atomicAdd(&cSh.partFrcZ[32*bpos + tgx], sfpZ[threadIdx.x]);
atomicAdd(&cSh.partFrcX[32*tpos + tgx], sftX[threadIdx.x]);
atomicAdd(&cSh.partFrcY[32*tpos + tgx], sftY[threadIdx.x]);
atomicAdd(&cSh.partFrcZ[32*tpos + tgx], sftZ[threadIdx.x]);
// Increment the tile counter
tpos++;
}
// Increment stripe counter
bpos -= blockDim.x / 32;
}
// Need to synchronize warps here as the next instructions will burn sfpX
__syncwarp();
// Reduce the energy contributions using __shared__. This cannibalizes
// the sfpX force accumulator, which is no longer needed. Then make a
// final contribution to the global array from only one thread per warp.
// This is another global memory traffic jam mitigation.
sfpX[threadIdx.x] = qq;
for (i = 16; i >= 1; i /= 2) {
if (tgx < i) {
sfpX[threadIdx.x] += sfpX[threadIdx.x + i];
}
__syncwarp();
}
if (tgx == 0) {
atomicAdd(&cSh.Etot[0], sfpX[threadIdx.x]);
}
}
//-----------------------------------------------------------------------------
// CreateGpuInt: constructor function for allocating memory in a gpuInt
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed to the device)
//-----------------------------------------------------------------------------
gpuInt CreateGpuInt(int len, int pin)
{
gpuInt G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
hipHostMalloc((void **)&G.HostData, len * sizeof(int),
hipHostMallocMapped);
}
else {
G.HostData = (int*)malloc(len * sizeof(int));
}
hipMalloc((void **)&G.DevcData, len * sizeof(int));
memset(G.HostData, 0, len * sizeof(int));
hipMemset((void *)G.DevcData, 0, len * sizeof(int));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuInt: destructor function for freeing memory in a gpuInt
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuInt(gpuInt *G)
{
if (G->IsPinned == 1) {
hipHostFree(G->HostData);
}
else {
free(G->HostData);
}
hipFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuInt: upload an integer array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuInt(gpuInt *G)
{
hipMemcpy(G->DevcData, G->HostData, G->len * sizeof(int),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuInt: download an integer array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuInt(gpuInt *G)
{
hipMemcpy(G->HostData, G->DevcData, G->len * sizeof(int),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// CreateGpuFloat: constructor function for allocating memory in a gpuFloat
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed ot the device)
//-----------------------------------------------------------------------------
gpuFloat CreateGpuFloat(int len, int pin)
{
gpuFloat G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
hipHostMalloc((void **)&G.HostData, len * sizeof(float),
hipHostMallocMapped);
}
else {
G.HostData = (float*)malloc(len * sizeof(float));
}
hipMalloc((void **)&G.DevcData, len * sizeof(float));
memset(G.HostData, 0, len * sizeof(float));
hipMemset((void *)G.DevcData, 0, len * sizeof(float));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuFloat: destructor function for freeing memory in a gpuFloat
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuFloat(gpuFloat *G)
{
if (G->IsPinned == 1) {
hipHostFree(G->HostData);
}
else {
free(G->HostData);
}
hipFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuFloat: upload an float array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuFloat(gpuFloat *G)
{
hipMemcpy(G->DevcData, G->HostData, G->len * sizeof(float),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuFloat: download an float array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuFloat(gpuFloat *G)
{
hipMemcpy(G->HostData, G->DevcData, G->len * sizeof(float),
hipMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
int i, np;
gpuInt particleTypes;
gpuFloat particleXcoord, particleYcoord, particleZcoord, particleCharge;
gpuFloat particleXfrc, particleYfrc, particleZfrc;
gpuFloat etot;
// Create a small array of particles and populate it
particleTypes = CreateGpuInt(100000, 1);
particleXcoord = CreateGpuFloat(100000, 1);
particleYcoord = CreateGpuFloat(100000, 1);
particleZcoord = CreateGpuFloat(100000, 1);
particleXfrc = CreateGpuFloat(100000, 1);
particleYfrc = CreateGpuFloat(100000, 1);
particleZfrc = CreateGpuFloat(100000, 1);
particleCharge = CreateGpuFloat(100000, 1);
// Allocate and initialize the total energy
// accumulator on the host and on the device.
etot = CreateGpuFloat(1, 1);
// Initialize random number generator. srand() SEEDS the generator,
// thereafter each call to rand() will return a different number.
// This is a reeally bad generator (much better methods with longer
// periods before they start looping back over the same sequence are
// available).
srand(62052);
// Place many, many particles
np = 97913;
for (i = 0; i < np; i++) {
// Integer truncation would happen anyway, I'm just making it explicit
particleTypes.HostData[i] = (int)(8 * rand());
// Create some random coordinates (double-to-float conversion
// is happening here. On the GPU this can have performance
// impact, so keep an eye on the data types at all times!
particleXcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX;
particleYcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX;
particleZcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX;
particleCharge.HostData[i] = 0.5 - rand() / (double)RAND_MAX;
}
// CHECK
#if 0
int j;
double qq = 0.0;
for (i = 0; i < np; i++) {
for (j = 0; j < i; j++) {
double dx = particleXcoord.HostData[i] - particleXcoord.HostData[j];
double dy = particleYcoord.HostData[i] - particleYcoord.HostData[j];
double dz = particleZcoord.HostData[i] - particleZcoord.HostData[j];
double qfac = particleCharge.HostData[i] * particleCharge.HostData[j];
qq += qfac / sqrt(dx*dx + dy*dy + dz*dz);
}
}
printf("CPU result = %9.4lf\n", qq);
#endif
// END CHECK
// Stage critical constants--see cribSheet struct instance cSh above.
cribSheet cnstage;
cnstage.nparticle = np;
cnstage.partX = particleXcoord.DevcData;
cnstage.partY = particleYcoord.DevcData;
cnstage.partZ = particleZcoord.DevcData;
cnstage.partFrcX = particleXfrc.DevcData;
cnstage.partFrcY = particleYfrc.DevcData;
cnstage.partFrcZ = particleZfrc.DevcData;
cnstage.partQ = particleCharge.DevcData;
cnstage.Etot = etot.DevcData;
// Upload all data to the device--note that forces are not getting
// uploaded, as the memory is already allocated. The forces will
// be initialized and computed on the device.
UploadGpuInt(&particleTypes);
UploadGpuFloat(&particleXcoord);
UploadGpuFloat(&particleYcoord);
UploadGpuFloat(&particleZcoord);
UploadGpuFloat(&particleCharge);
// Upload the constants to the constants cache
hipMemcpyToSymbol(cSh, &cnstage, sizeof(cribSheet));
// Initialize energy and forces
etot.HostData[0] = 0.0;
UploadGpuFloat(&etot);
hipLaunchKernelGGL(( ParticleSimulator), dim3(1), dim3(512), 0, 0, );
// Download the total energy
DownloadGpuFloat(&etot);
printf("Total energy (%4d threads) = %10.4f\n", 512, etot.HostData[0]);
// Device synchronization
hipDeviceSynchronize();
return 0;
}
| 4be3005aa8af30f389f83bf7bc08a73f2980ae8c.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
//-----------------------------------------------------------------------------
// GpuConstantsPackage: a struct to hold many constants (including pointers
// to allocated memory on the device) that can be
// uploaded all at once. Placing this in the "constants
// cache" is a convenient and performant way of handling
// constant information on the GPU.
//-----------------------------------------------------------------------------
struct GpuConstantsPackage {
int nparticle;
int* partType;
float* partX;
float* partY;
float* partZ;
float* partFrcX;
float* partFrcY;
float* partFrcZ;
float* partQ;
float* Etot;
};
typedef struct GpuConstantsPackage cribSheet;
// This device constant is available to all functions in this CUDA unit
__device__ __constant__ cribSheet cSh;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored int data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredInt {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
int* HostData; // Pointer to allocated memory on the host
int* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredInt gpuInt;
//-----------------------------------------------------------------------------
// GpuMirroredInt: a struct holding mirrored fp32 data on both the CPU and the
// GPU. Functions below will operate on this struct
// (because this isn't a workshop on C++)
//-----------------------------------------------------------------------------
struct GpuMirroredFloat {
int len; // Length of the array (again, this is not a C++ course)
int IsPinned; // "Pinned" memory is best for Host <= => GPU transfers.
// In fact, if non-pinned memory is transferred to the
// GPU from the host, a temporary allocation of pinned
// memory will be created and then destroyed. Pinned
// memory is not host-pageable, but the only performance
// implication is that creating lots of pinned memory
// may make it harder for the host OS to manage large
// memory jobs.
float* HostData; // Pointer to allocated memory on the host
float* DevcData; // Pointer to allocated memory on the GPU. Note that the
// host can know what the address of memory on the GPU
// is, but it cannot simply de-reference that pointer
// in host code.
};
typedef struct GpuMirroredFloat gpuFloat;
//-----------------------------------------------------------------------------
// ParticleSimulator: run a rudimentary simulation of particles
//-----------------------------------------------------------------------------
__global__ void ParticleSimulator()
{
int i;
// Each thread must have __shared__ memory, visible by other threads,
// to store information about one particle it has been assigned to read
// and manage. Each array has as many elements as there are threads in
// the block. If the launch parameters were to change, all of these
// array sizes should change as well.
__shared__ volatile float pX[512], pY[512], pZ[512], pQ[512];
__shared__ volatile float tX[512], tY[512], tZ[512], tQ[512];
__shared__ volatile float sfpX[512], sfpY[512], sfpZ[512];
__shared__ volatile float sftX[512], sftY[512], sftZ[512];
// Treat warps as the irreducible units, not threads. A warp is a group
// of 32 threads. Threads 0-31 are, by convention, warp 0. Threads
// 32-63 are warp 1, and so on. The thread's warp and lane within the
// warp become relevant to its task. Every thread will store these two
// pieces of information in its registers for the duration of the kernel.
int warpIdx = threadIdx.x / 32;
int tgx = (threadIdx.x & 31);
// Initialize forces within the same kernel. Because this kernel
// runs in only one block,
i = threadIdx.x;
while (i < cSh.nparticle) {
cSh.partFrcX[i] = (float)0.0;
cSh.partFrcY[i] = (float)0.0;
cSh.partFrcZ[i] = (float)0.0;
i += blockDim.x;
}
__syncthreads();
// A more advanced way, using L1 __shared__ memory
float qq = (float)0.0;
int nstripes = (cSh.nparticle + 31) / 32;
int bpos = nstripes - warpIdx - 1;
while (bpos >= 0) {
// Read 32 particles into memory, accumulate the forces on them,
// then write the results back to the device. If the thread
// would read a particle beyond the system's size, then set its
// position as dummy numbers which will not do terrible things
// if they get into calculations with real particles.
//
// NOTE HERE and BELOW: threadIdx.x = 32*warpIdx + tgx
//
// See note above... each thread is operating within a stripe of
// the problem. Accessing index threadIdx.x is integral to that.
int prtclIdx = 32*bpos + tgx;
if (prtclIdx < cSh.nparticle) {
pX[threadIdx.x] = cSh.partX[prtclIdx];
pY[threadIdx.x] = cSh.partY[prtclIdx];
pZ[threadIdx.x] = cSh.partZ[prtclIdx];
pQ[threadIdx.x] = cSh.partQ[prtclIdx];
}
else {
pX[threadIdx.x] = (float)10000.0 + (float)(prtclIdx);
pY[threadIdx.x] = (float)10000.0 + (float)(prtclIdx);
pZ[threadIdx.x] = (float)10000.0 + (float)(prtclIdx);
pQ[threadIdx.x] = (float)0.0;
}
// Loop over all particle pairs in the lower half triangle as before
int tpos = 0;
while (tpos <= bpos) {
// Initialize particles as in the outer loop
int prtclIdx = 32*tpos + tgx;
if (prtclIdx < cSh.nparticle) {
tX[threadIdx.x] = cSh.partX[prtclIdx];
tY[threadIdx.x] = cSh.partY[prtclIdx];
tZ[threadIdx.x] = cSh.partZ[prtclIdx];
tQ[threadIdx.x] = cSh.partQ[prtclIdx];
}
else {
// The offsets for particle positions must run along a different
// (parallel, but distinct) line so that not even dummy particles
// can ever occupy the same positions and cause a divide-by-zero.
// As before, the charge of the dummy particles is zero.
tX[threadIdx.x] = (float)10100.0 + (float)(prtclIdx);
tY[threadIdx.x] = (float)10200.0 + (float)(prtclIdx);
tZ[threadIdx.x] = (float)10300.0 + (float)(prtclIdx);
tQ[threadIdx.x] = (float)0.0;
}
// Initialize tile force accumulators
sfpX[threadIdx.x] = (float)0.0;
sfpY[threadIdx.x] = (float)0.0;
sfpZ[threadIdx.x] = (float)0.0;
sftX[threadIdx.x] = (float)0.0;
sftY[threadIdx.x] = (float)0.0;
sftZ[threadIdx.x] = (float)0.0;
// The tile is now ready. Compute 32 x 32 interactions.
// Tiles lying on the diagonal of the interaction matrix
// will do full work for half the results.
int imin = (bpos == tpos);
float anti2xCountingFactor = (bpos == tpos) ? (float)0.5 : (float)1.0;
for (i = imin; i < 32; i++) {
int j = tgx + i;
// Wrap j back so that it stays within the range [0, 32)
j -= (j >= 32) * 32;
// The value in position threadIdx.x of each __shared__
// memory array will now be compared to one of 32 other
// values from the array, in the range:
// [ (threadIdx.x / 32) * 32 :: ((threadIdx.x + 31) / 32) * 32 )
float dx = tX[warpIdx*32 + j] - pX[threadIdx.x];
float dy = tY[warpIdx*32 + j] - pY[threadIdx.x];
float dz = tZ[warpIdx*32 + j] - pZ[threadIdx.x];
float r2 = dx*dx + dy*dy + dz*dz;
float r = sqrt(r2);
float qfac = anti2xCountingFactor *
tQ[warpIdx*32 + j] * pQ[threadIdx.x];
qq += qfac / sqrt(r2);
// This works because threadIdx.x is the only thread that will
// ever contribute to sfpX, and the tile is arranged so that,
// for a synchronized warp, only one thread will have a
// contribution to make to each element of sftX.
float fmag = qfac / (r2 * r);
sfpX[threadIdx.x ] += dx * fmag;
sftX[warpIdx*32 + j] -= dx * fmag;
sfpY[threadIdx.x ] += dy * fmag;
sftY[warpIdx*32 + j] -= dy * fmag;
sfpZ[threadIdx.x ] += dz * fmag;
sftZ[warpIdx*32 + j] -= dz * fmag;
__syncwarp();
}
// Contribute the tile force accumulations atomically to global memory
// (DRAM). This is only about 2x slower than atomic accumulation to
// __shared__. Accumulating things like this atomically to __shared__
// would make the kernel run only about 30% slower than accumulating
// them in an unsafe manner, willy-nilly. Fast atomics to global are
// a tremendous accomplishment by NVIDIA engineers!
//
// Note, the correspondence between 32*bpos + tgx or 32*tpos + tgx
// and 32*warpIdx + tgx. 32*warpIdx + tgx is, again, threadIdx.x.
atomicAdd(&cSh.partFrcX[32*bpos + tgx], sfpX[threadIdx.x]);
atomicAdd(&cSh.partFrcY[32*bpos + tgx], sfpY[threadIdx.x]);
atomicAdd(&cSh.partFrcZ[32*bpos + tgx], sfpZ[threadIdx.x]);
atomicAdd(&cSh.partFrcX[32*tpos + tgx], sftX[threadIdx.x]);
atomicAdd(&cSh.partFrcY[32*tpos + tgx], sftY[threadIdx.x]);
atomicAdd(&cSh.partFrcZ[32*tpos + tgx], sftZ[threadIdx.x]);
// Increment the tile counter
tpos++;
}
// Increment stripe counter
bpos -= blockDim.x / 32;
}
// Need to synchronize warps here as the next instructions will burn sfpX
__syncwarp();
// Reduce the energy contributions using __shared__. This cannibalizes
// the sfpX force accumulator, which is no longer needed. Then make a
// final contribution to the global array from only one thread per warp.
// This is another global memory traffic jam mitigation.
sfpX[threadIdx.x] = qq;
for (i = 16; i >= 1; i /= 2) {
if (tgx < i) {
sfpX[threadIdx.x] += sfpX[threadIdx.x + i];
}
__syncwarp();
}
if (tgx == 0) {
atomicAdd(&cSh.Etot[0], sfpX[threadIdx.x]);
}
}
//-----------------------------------------------------------------------------
// CreateGpuInt: constructor function for allocating memory in a gpuInt
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed to the device)
//-----------------------------------------------------------------------------
gpuInt CreateGpuInt(int len, int pin)
{
gpuInt G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
cudaHostAlloc((void **)&G.HostData, len * sizeof(int),
cudaHostAllocMapped);
}
else {
G.HostData = (int*)malloc(len * sizeof(int));
}
cudaMalloc((void **)&G.DevcData, len * sizeof(int));
memset(G.HostData, 0, len * sizeof(int));
cudaMemset((void *)G.DevcData, 0, len * sizeof(int));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuInt: destructor function for freeing memory in a gpuInt
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuInt(gpuInt *G)
{
if (G->IsPinned == 1) {
cudaFreeHost(G->HostData);
}
else {
free(G->HostData);
}
cudaFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuInt: upload an integer array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuInt(gpuInt *G)
{
cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(int),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuInt: download an integer array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuInt(gpuInt *G)
{
cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(int),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// CreateGpuFloat: constructor function for allocating memory in a gpuFloat
// instance.
//
// Arguments:
// len: the length of array to allocate
// pin: flag to have the memory pinned (non-pageable on the host side
// for optimal transfer speed ot the device)
//-----------------------------------------------------------------------------
gpuFloat CreateGpuFloat(int len, int pin)
{
gpuFloat G;
G.len = len;
G.IsPinned = pin;
// Now that the official length is recorded, upgrade the real length
// to the next convenient multiple of 128, so as to always allocate
// GPU memory in 512-byte blocks. This is for alignment purposes,
// and keeping host to device transfers in line.
len = ((len + 127) / 128) * 128;
if (pin == 1) {
cudaHostAlloc((void **)&G.HostData, len * sizeof(float),
cudaHostAllocMapped);
}
else {
G.HostData = (float*)malloc(len * sizeof(float));
}
cudaMalloc((void **)&G.DevcData, len * sizeof(float));
memset(G.HostData, 0, len * sizeof(float));
cudaMemset((void *)G.DevcData, 0, len * sizeof(float));
return G;
}
//-----------------------------------------------------------------------------
// DestroyGpuFloat: destructor function for freeing memory in a gpuFloat
// instance.
//-----------------------------------------------------------------------------
void DestroyGpuFloat(gpuFloat *G)
{
if (G->IsPinned == 1) {
cudaFreeHost(G->HostData);
}
else {
free(G->HostData);
}
cudaFree(G->DevcData);
}
//-----------------------------------------------------------------------------
// UploadGpuFloat: upload an float array from the host to the device.
//-----------------------------------------------------------------------------
void UploadGpuFloat(gpuFloat *G)
{
cudaMemcpy(G->DevcData, G->HostData, G->len * sizeof(float),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// DownloadGpuFloat: download an float array from the host to the device.
//-----------------------------------------------------------------------------
void DownloadGpuFloat(gpuFloat *G)
{
cudaMemcpy(G->HostData, G->DevcData, G->len * sizeof(float),
cudaMemcpyHostToDevice);
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
int i, np;
gpuInt particleTypes;
gpuFloat particleXcoord, particleYcoord, particleZcoord, particleCharge;
gpuFloat particleXfrc, particleYfrc, particleZfrc;
gpuFloat etot;
// Create a small array of particles and populate it
particleTypes = CreateGpuInt(100000, 1);
particleXcoord = CreateGpuFloat(100000, 1);
particleYcoord = CreateGpuFloat(100000, 1);
particleZcoord = CreateGpuFloat(100000, 1);
particleXfrc = CreateGpuFloat(100000, 1);
particleYfrc = CreateGpuFloat(100000, 1);
particleZfrc = CreateGpuFloat(100000, 1);
particleCharge = CreateGpuFloat(100000, 1);
// Allocate and initialize the total energy
// accumulator on the host and on the device.
etot = CreateGpuFloat(1, 1);
// Initialize random number generator. srand() SEEDS the generator,
// thereafter each call to rand() will return a different number.
// This is a reeally bad generator (much better methods with longer
// periods before they start looping back over the same sequence are
// available).
srand(62052);
// Place many, many particles
np = 97913;
for (i = 0; i < np; i++) {
// Integer truncation would happen anyway, I'm just making it explicit
particleTypes.HostData[i] = (int)(8 * rand());
// Create some random coordinates (double-to-float conversion
// is happening here. On the GPU this can have performance
// impact, so keep an eye on the data types at all times!
particleXcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX;
particleYcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX;
particleZcoord.HostData[i] = 200.0 * (double)rand() / (double)RAND_MAX;
particleCharge.HostData[i] = 0.5 - rand() / (double)RAND_MAX;
}
// CHECK
#if 0
int j;
double qq = 0.0;
for (i = 0; i < np; i++) {
for (j = 0; j < i; j++) {
double dx = particleXcoord.HostData[i] - particleXcoord.HostData[j];
double dy = particleYcoord.HostData[i] - particleYcoord.HostData[j];
double dz = particleZcoord.HostData[i] - particleZcoord.HostData[j];
double qfac = particleCharge.HostData[i] * particleCharge.HostData[j];
qq += qfac / sqrt(dx*dx + dy*dy + dz*dz);
}
}
printf("CPU result = %9.4lf\n", qq);
#endif
// END CHECK
// Stage critical constants--see cribSheet struct instance cSh above.
cribSheet cnstage;
cnstage.nparticle = np;
cnstage.partX = particleXcoord.DevcData;
cnstage.partY = particleYcoord.DevcData;
cnstage.partZ = particleZcoord.DevcData;
cnstage.partFrcX = particleXfrc.DevcData;
cnstage.partFrcY = particleYfrc.DevcData;
cnstage.partFrcZ = particleZfrc.DevcData;
cnstage.partQ = particleCharge.DevcData;
cnstage.Etot = etot.DevcData;
// Upload all data to the device--note that forces are not getting
// uploaded, as the memory is already allocated. The forces will
// be initialized and computed on the device.
UploadGpuInt(&particleTypes);
UploadGpuFloat(&particleXcoord);
UploadGpuFloat(&particleYcoord);
UploadGpuFloat(&particleZcoord);
UploadGpuFloat(&particleCharge);
// Upload the constants to the constants cache
cudaMemcpyToSymbol(cSh, &cnstage, sizeof(cribSheet));
// Initialize energy and forces
etot.HostData[0] = 0.0;
UploadGpuFloat(&etot);
ParticleSimulator<<<1, 512>>>();
// Download the total energy
DownloadGpuFloat(&etot);
printf("Total energy (%4d threads) = %10.4f\n", 512, etot.HostData[0]);
// Device synchronization
cudaDeviceSynchronize();
return 0;
}
|
a6a481621ec4238b007fe1f2800158402ea84ea4.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_wrapper.h"
#include <sstream>
#include "paddle/fluid/framework/fleet/fleet_wrapper.h"
#include "paddle/fluid/framework/fleet/heter_ps/gpu_graph_utils.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h"
#include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h"
DECLARE_int32(gpugraph_storage_mode);
DECLARE_bool(graph_metapath_split_opt);
namespace paddle {
namespace framework {
#ifdef PADDLE_WITH_HETERPS
std::shared_ptr<GraphGpuWrapper> GraphGpuWrapper::s_instance_(nullptr);
void GraphGpuWrapper::set_device(std::vector<int> ids) {
for (auto device_id : ids) {
device_id_mapping.push_back(device_id);
}
}
void GraphGpuWrapper::init_conf(const std::string &first_node_type,
const std::string &meta_path,
const std::string &excluded_train_pair) {
static std::mutex mutex;
{
std::lock_guard<std::mutex> lock(mutex);
if (conf_initialized_) {
return;
}
VLOG(2) << "init path config";
conf_initialized_ = true;
auto node_types =
paddle::string::split_string<std::string>(first_node_type, ";");
VLOG(2) << "node_types: " << first_node_type;
for (auto &type : node_types) {
auto iter = node_to_id.find(type);
PADDLE_ENFORCE_NE(
iter,
node_to_id.end(),
platform::errors::NotFound("(%s) is not found in node_to_id.", type));
VLOG(2) << "node_to_id[" << type << "] = " << iter->second;
first_node_type_.push_back(iter->second);
}
meta_path_.resize(first_node_type_.size());
auto meta_paths = paddle::string::split_string<std::string>(meta_path, ";");
for (size_t i = 0; i < meta_paths.size(); i++) {
auto path = meta_paths[i];
auto edges = paddle::string::split_string<std::string>(path, "-");
for (auto &edge : edges) {
auto iter = edge_to_id.find(edge);
PADDLE_ENFORCE_NE(iter,
edge_to_id.end(),
platform::errors::NotFound(
"(%s) is not found in edge_to_id.", edge));
VLOG(2) << "edge_to_id[" << edge << "] = " << iter->second;
meta_path_[i].push_back(iter->second);
if (edge_to_node_map_.find(iter->second) == edge_to_node_map_.end()) {
auto nodes = get_ntype_from_etype(edge);
uint64_t src_node_id = node_to_id.find(nodes[0])->second;
uint64_t dst_node_id = node_to_id.find(nodes[1])->second;
edge_to_node_map_[iter->second] = src_node_id << 32 | dst_node_id;
}
}
}
auto paths =
paddle::string::split_string<std::string>(excluded_train_pair, ";");
VLOG(2) << "excluded_train_pair[" << excluded_train_pair << "]";
for (auto &path : paths) {
auto nodes = get_ntype_from_etype(path);
for (auto &node : nodes) {
auto iter = node_to_id.find(node);
PADDLE_ENFORCE_NE(iter,
edge_to_id.end(),
platform::errors::NotFound(
"(%s) is not found in edge_to_id.", node));
VLOG(2) << "edge_to_id[" << node << "] = " << iter->second;
excluded_train_pair_.push_back(iter->second);
}
}
int max_dev_id = 0;
for (size_t i = 0; i < device_id_mapping.size(); i++) {
if (device_id_mapping[i] > max_dev_id) {
max_dev_id = device_id_mapping[i];
}
}
finish_node_type_.resize(max_dev_id + 1);
node_type_start_.resize(max_dev_id + 1);
global_infer_node_type_start_.resize(max_dev_id + 1);
for (size_t i = 0; i < device_id_mapping.size(); i++) {
int dev_id = device_id_mapping[i];
auto &node_type_start = node_type_start_[i];
auto &infer_node_type_start = global_infer_node_type_start_[i];
auto &finish_node_type = finish_node_type_[i];
finish_node_type.clear();
for (size_t idx = 0; idx < node_to_id.size(); idx++) {
infer_node_type_start[idx] = 0;
}
for (auto &type : node_types) {
auto iter = node_to_id.find(type);
node_type_start[iter->second] = 0;
infer_node_type_start[iter->second] = 0;
}
infer_cursor_.push_back(0);
cursor_.push_back(0);
}
init_type_keys();
}
}
void GraphGpuWrapper::init_type_keys() {
size_t thread_num = device_id_mapping.size();
int cnt = 0;
auto &graph_all_type_total_keys = get_graph_type_keys();
auto &type_to_index = get_graph_type_to_index();
std::vector<std::vector<uint64_t>> tmp_keys;
tmp_keys.resize(thread_num);
int first_node_idx;
d_graph_all_type_total_keys_.resize(graph_all_type_total_keys.size());
h_graph_all_type_keys_len_.resize(graph_all_type_total_keys.size());
for (size_t f_idx = 0; f_idx < graph_all_type_total_keys.size(); f_idx++) {
for (size_t j = 0; j < tmp_keys.size(); j++) {
tmp_keys[j].clear();
}
d_graph_all_type_total_keys_[f_idx].resize(thread_num);
auto &type_total_key = graph_all_type_total_keys[f_idx];
for (size_t j = 0; j < type_total_key.size(); j++) {
uint64_t shard = type_total_key[j] % thread_num;
tmp_keys[shard].push_back(type_total_key[j]);
}
for (size_t j = 0; j < thread_num; j++) {
h_graph_all_type_keys_len_[f_idx].push_back(tmp_keys[j].size());
VLOG(1) << "node type: " << type_to_index[f_idx]
<< ", gpu_graph_device_keys[" << j
<< "] = " << tmp_keys[j].size();
}
for (size_t j = 0; j < thread_num; j++) {
auto stream = get_local_stream(j);
int gpuid = device_id_mapping[j];
auto place = platform::CUDAPlace(gpuid);
platform::CUDADeviceGuard guard(gpuid);
d_graph_all_type_total_keys_[f_idx][j] =
memory::AllocShared(place, tmp_keys[j].size() * sizeof(uint64_t));
hipMemcpyAsync(d_graph_all_type_total_keys_[f_idx][j]->ptr(),
tmp_keys[j].data(),
sizeof(uint64_t) * tmp_keys[j].size(),
hipMemcpyHostToDevice,
stream);
}
}
for (int i = 0; i < thread_num; i++) {
auto stream = get_local_stream(i);
hipStreamSynchronize(stream);
}
}
void GraphGpuWrapper::init_metapath(std::string cur_metapath,
int cur_metapath_index,
int cur_metapath_len) {
cur_metapath_ = cur_metapath;
cur_metapath_index_ = cur_metapath_index;
cur_metapath_len_ = cur_metapath_len;
auto nodes = paddle::string::split_string<std::string>(cur_metapath_, "-");
cur_parse_metapath_.clear();
cur_parse_reverse_metapath_.clear();
for (auto &node : nodes) {
VLOG(2) << "node: " << node << " , in metapath: " << cur_metapath_;
auto iter = edge_to_id.find(node);
PADDLE_ENFORCE_NE(
iter,
edge_to_id.end(),
platform::errors::NotFound("(%s) is not found in edge_to_id.", node));
cur_parse_metapath_.push_back(iter->second);
std::string reverse_type = get_reverse_etype(node);
iter = edge_to_id.find(reverse_type);
PADDLE_ENFORCE_NE(iter,
edge_to_id.end(),
platform::errors::NotFound(
"(%s) is not found in edge_to_id.", reverse_type));
cur_parse_reverse_metapath_.push_back(iter->second);
}
size_t thread_num = device_id_mapping.size();
cur_metapath_start_.resize(thread_num);
for (size_t i = 0; i < thread_num; i++) {
cur_metapath_start_[i] = 0;
}
auto &graph_all_type_total_keys = get_graph_type_keys();
auto &type_to_index = get_graph_type_to_index();
std::vector<std::vector<uint64_t>> tmp_keys;
tmp_keys.resize(thread_num);
int first_node_idx;
std::string first_node = get_ntype_from_etype(nodes[0])[0];
auto it = node_to_id.find(first_node);
first_node_idx = it->second;
d_graph_train_total_keys_.resize(thread_num);
h_graph_train_keys_len_.resize(thread_num);
for (size_t j = 0; j < tmp_keys.size(); j++) {
tmp_keys[j].clear();
}
size_t f_idx = type_to_index[first_node_idx];
auto &type_total_key = graph_all_type_total_keys[f_idx];
VLOG(2) << "first node type:" << first_node_idx
<< ", node start size:" << type_total_key.size();
for (size_t j = 0; j < type_total_key.size(); j++) {
uint64_t shard = type_total_key[j] % thread_num;
tmp_keys[shard].push_back(type_total_key[j]);
}
auto fleet_ptr = framework::FleetWrapper::GetInstance();
std::shuffle(
tmp_keys.begin(), tmp_keys.end(), fleet_ptr->LocalRandomEngine());
for (size_t j = 0; j < thread_num; j++) {
h_graph_train_keys_len_[j] = tmp_keys[j].size();
VLOG(2) << j << " th card, graph train keys len: " << tmp_keys[j].size();
}
for (size_t j = 0; j < thread_num; j++) {
auto stream = get_local_stream(j);
int gpuid = device_id_mapping[j];
auto place = platform::CUDAPlace(gpuid);
platform::CUDADeviceGuard guard(gpuid);
d_graph_train_total_keys_[j] =
memory::AllocShared(place, tmp_keys[j].size() * sizeof(uint64_t));
hipMemcpyAsync(d_graph_train_total_keys_[j]->ptr(),
tmp_keys[j].data(),
sizeof(uint64_t) * tmp_keys[j].size(),
hipMemcpyHostToDevice,
stream);
}
}
void GraphGpuWrapper::clear_metapath_state() {
size_t thread_num = device_id_mapping.size();
for (size_t j = 0; j < thread_num; j++) {
cur_metapath_start_[j] = 0;
h_graph_train_keys_len_[j] = 0;
d_graph_train_total_keys_[j].reset();
for (size_t k = 0; k < cur_parse_metapath_.size(); k++) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->clear_graph_info(j, cur_parse_metapath_[k]);
}
}
std::vector<int> clear_etype;
for (size_t j = 0; j < cur_parse_metapath_.size(); j++) {
if (find(clear_etype.begin(), clear_etype.end(), cur_parse_metapath_[j]) ==
clear_etype.end()) {
clear_etype.push_back(cur_parse_metapath_[j]);
}
}
for (size_t j = 0; j < cur_parse_reverse_metapath_.size(); j++) {
if (find(clear_etype.begin(),
clear_etype.end(),
cur_parse_reverse_metapath_[j]) == clear_etype.end()) {
clear_etype.push_back(cur_parse_reverse_metapath_[j]);
}
}
for (size_t j = 0; j < clear_etype.size(); j++) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->clear_graph(clear_etype[j]);
}
}
int GraphGpuWrapper::get_all_id(int table_type,
int slice_num,
std::vector<std::vector<uint64_t>> *output) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_all_id(
(GraphTableType)table_type, slice_num, output);
}
int GraphGpuWrapper::get_all_neighbor_id(
GraphTableType table_type,
int slice_num,
std::vector<std::vector<uint64_t>> *output) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_all_neighbor_id(table_type, slice_num, output);
}
int GraphGpuWrapper::get_all_id(int table_type,
int idx,
int slice_num,
std::vector<std::vector<uint64_t>> *output) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_all_id(
(GraphTableType)table_type, idx, slice_num, output);
}
int GraphGpuWrapper::get_all_neighbor_id(
GraphTableType table_type,
int idx,
int slice_num,
std::vector<std::vector<uint64_t>> *output) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_all_neighbor_id(
table_type, idx, slice_num, output);
}
int GraphGpuWrapper::get_all_feature_ids(
GraphTableType table_type,
int idx,
int slice_num,
std::vector<std::vector<uint64_t>> *output) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_all_feature_ids(
table_type, idx, slice_num, output);
}
int GraphGpuWrapper::get_node_embedding_ids(
int slice_num, std::vector<std::vector<uint64_t>> *output) {
return (reinterpret_cast<GpuPsGraphTable *>(graph_table))
->cpu_graph_table_->get_node_embedding_ids(slice_num, output);
}
std::string GraphGpuWrapper::get_reverse_etype(std::string etype) {
auto etype_split = paddle::string::split_string<std::string>(etype, "2");
if (etype_split.size() == 2) {
std::string reverse_type = etype_split[1] + "2" + etype_split[0];
return reverse_type;
} else if (etype_split.size() == 3) {
std::string reverse_type =
etype_split[2] + "2" + etype_split[1] + "2" + etype_split[0];
return reverse_type;
} else {
PADDLE_THROW(platform::errors::Fatal(
"The format of edge type should be [src2dst] or [src2etype2dst], "
"but got [%s].",
etype));
}
}
std::vector<std::string> GraphGpuWrapper::get_ntype_from_etype(
std::string etype) {
std::vector<std::string> etype_split =
paddle::string::split_string<std::string>(etype, "2");
if (etype_split.size() == 2) {
return etype_split;
} else if (etype_split.size() == 3) {
auto iter = etype_split.erase(etype_split.begin() + 1);
return etype_split;
} else {
PADDLE_THROW(platform::errors::Fatal(
"The format of edge type should be [src2dst] or [src2etype2dst], "
"but got [%s].",
etype));
}
}
void GraphGpuWrapper::set_up_types(const std::vector<std::string> &edge_types,
const std::vector<std::string> &node_types) {
id_to_edge = edge_types;
edge_to_id.clear();
for (size_t table_id = 0; table_id < edge_types.size(); table_id++) {
int res = edge_to_id.size();
edge_to_id[edge_types[table_id]] = res;
}
id_to_feature = node_types;
node_to_id.clear();
for (size_t table_id = 0; table_id < node_types.size(); table_id++) {
int res = node_to_id.size();
node_to_id[node_types[table_id]] = res;
}
table_feat_mapping.resize(node_types.size());
this->table_feat_conf_feat_name.resize(node_types.size());
this->table_feat_conf_feat_dtype.resize(node_types.size());
this->table_feat_conf_feat_shape.resize(node_types.size());
}
void GraphGpuWrapper::set_feature_separator(std::string ch) {
feature_separator_ = ch;
if (graph_table != nullptr) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->set_feature_separator(feature_separator_);
}
}
void GraphGpuWrapper::set_slot_feature_separator(std::string ch) {
slot_feature_separator_ = ch;
if (graph_table != nullptr) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->set_slot_feature_separator(slot_feature_separator_);
}
}
void GraphGpuWrapper::make_partitions(int idx,
int64_t byte_size,
int device_len) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->make_partitions(idx, byte_size, device_len);
}
int32_t GraphGpuWrapper::load_next_partition(int idx) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->load_next_partition(idx);
}
void GraphGpuWrapper::set_search_level(int level) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->set_search_level(level);
}
std::vector<uint64_t> GraphGpuWrapper::get_partition(int idx, int num) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_partition(idx, num);
}
int32_t GraphGpuWrapper::get_partition_num(int idx) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_partition_num(idx);
}
void GraphGpuWrapper::make_complementary_graph(int idx, int64_t byte_size) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->make_complementary_graph(idx, byte_size);
}
void GraphGpuWrapper::load_edge_file(std::string name,
std::string filepath,
bool reverse) {
// 'e' means load edge
std::string params = "e";
if (reverse) {
// 'e<' means load edges from $2 to $1
params += "<" + name;
} else {
// 'e>' means load edges from $1 to $2
params += ">" + name;
}
if (edge_to_id.find(name) != edge_to_id.end()) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->Load(std::string(filepath), params);
}
}
void GraphGpuWrapper::load_edge_file(
std::string etype2files,
std::string graph_data_local_path,
int part_num,
bool reverse,
const std::vector<bool> &is_reverse_edge_map) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->parse_edge_and_load(etype2files,
graph_data_local_path,
part_num,
reverse,
is_reverse_edge_map);
}
int GraphGpuWrapper::load_node_file(std::string name, std::string filepath) {
// 'n' means load nodes and 'node_type' follows
std::string params = "n" + name;
if (node_to_id.find(name) != node_to_id.end()) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->Load(std::string(filepath), params);
}
return 0;
}
int GraphGpuWrapper::load_node_file(std::string ntype2files,
std::string graph_data_local_path,
int part_num) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->parse_node_and_load(
ntype2files, graph_data_local_path, part_num);
}
void GraphGpuWrapper::load_node_and_edge(
std::string etype2files,
std::string ntype2files,
std::string graph_data_local_path,
int part_num,
bool reverse,
const std::vector<bool> &is_reverse_edge_map) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->load_node_and_edge_file(etype2files,
ntype2files,
graph_data_local_path,
part_num,
reverse,
is_reverse_edge_map);
}
void GraphGpuWrapper::add_table_feat_conf(std::string table_name,
std::string feat_name,
std::string feat_dtype,
int feat_shape) {
if (node_to_id.find(table_name) != node_to_id.end()) {
int idx = node_to_id[table_name];
if (table_feat_mapping[idx].find(feat_name) ==
table_feat_mapping[idx].end()) {
int res = table_feat_mapping[idx].size();
table_feat_mapping[idx][feat_name] = res;
}
int feat_idx = table_feat_mapping[idx][feat_name];
VLOG(0) << "table_name " << table_name << " mapping id " << idx;
VLOG(0) << " feat name " << feat_name << " feat id" << feat_idx;
if (feat_idx < table_feat_conf_feat_name[idx].size()) {
// overide
table_feat_conf_feat_name[idx][feat_idx] = feat_name;
table_feat_conf_feat_dtype[idx][feat_idx] = feat_dtype;
table_feat_conf_feat_shape[idx][feat_idx] = feat_shape;
} else {
// new
table_feat_conf_feat_name[idx].push_back(feat_name);
table_feat_conf_feat_dtype[idx].push_back(feat_dtype);
table_feat_conf_feat_shape[idx].push_back(feat_shape);
}
}
VLOG(0) << "add conf over";
}
void GraphGpuWrapper::init_search_level(int level) { search_level = level; }
gpuStream_t GraphGpuWrapper::get_local_stream(int gpuid) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->get_local_stream(gpuid);
}
void GraphGpuWrapper::init_service() {
table_proto.set_task_pool_size(64);
table_proto.set_shard_num(1000);
table_proto.set_build_sampler_on_cpu(false);
table_proto.set_search_level(search_level);
table_proto.set_table_name("cpu_graph_table_");
table_proto.set_use_cache(false);
for (int i = 0; i < id_to_edge.size(); i++)
table_proto.add_edge_types(id_to_edge[i]);
for (int i = 0; i < id_to_feature.size(); i++) {
table_proto.add_node_types(id_to_feature[i]);
auto feat_node = id_to_feature[i];
::paddle::distributed::GraphFeature *g_f = table_proto.add_graph_feature();
for (int x = 0; x < table_feat_conf_feat_name[i].size(); x++) {
g_f->add_name(table_feat_conf_feat_name[i][x]);
g_f->add_dtype(table_feat_conf_feat_dtype[i][x]);
g_f->add_shape(table_feat_conf_feat_shape[i][x]);
}
}
std::shared_ptr<HeterPsResource> resource =
std::make_shared<HeterPsResource>(device_id_mapping);
resource->enable_p2p();
GpuPsGraphTable *g = new GpuPsGraphTable(resource, id_to_edge.size());
size_t gpu_num = device_id_mapping.size();
g->init_cpu_table(table_proto, gpu_num);
g->cpu_graph_table_->set_feature_separator(feature_separator_);
g->cpu_graph_table_->set_slot_feature_separator(slot_feature_separator_);
graph_table = reinterpret_cast<char *>(g);
upload_num = gpu_num;
upload_task_pool.reset(new ::ThreadPool(upload_num));
}
void GraphGpuWrapper::finalize() {
reinterpret_cast<GpuPsGraphTable *>(graph_table)->show_table_collisions();
}
// edge table
void GraphGpuWrapper::upload_batch(int table_type,
int slice_num,
const std::string &edge_type) {
VLOG(0) << "begin upload edge, etype[" << edge_type << "]";
auto iter = edge_to_id.find(edge_type);
int edge_idx = iter->second;
VLOG(2) << "cur edge: " << edge_type << ", edge_idx: " << edge_idx;
std::vector<std::vector<uint64_t>> ids;
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_all_id(
(GraphTableType)table_type, edge_idx, slice_num, &ids);
debug_gpu_memory_info("upload_batch node start");
GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table);
std::vector<std::future<int>> tasks;
for (int i = 0; i < slice_num; i++) {
tasks.push_back(upload_task_pool->enqueue([&, i, edge_idx, this]() -> int {
VLOG(0) << "begin make_gpu_ps_graph, node_id[" << i << "]_size["
<< ids[i].size() << "]";
GpuPsCommGraph sub_graph =
g->cpu_graph_table_->make_gpu_ps_graph(edge_idx, ids[i]);
g->build_graph_on_single_gpu(sub_graph, i, edge_idx);
sub_graph.release_on_cpu();
VLOG(1) << "sub graph on gpu " << i << " is built";
return 0;
}));
}
for (size_t i = 0; i < tasks.size(); i++) tasks[i].get();
debug_gpu_memory_info("upload_batch node end");
}
// feature table
void GraphGpuWrapper::upload_batch(int table_type,
int slice_num,
int slot_num) {
if (table_type == GraphTableType::FEATURE_TABLE &&
(FLAGS_gpugraph_storage_mode == paddle::framework::GpuGraphStorageMode::
MEM_EMB_FEATURE_AND_GPU_GRAPH ||
FLAGS_gpugraph_storage_mode == paddle::framework::GpuGraphStorageMode::
SSD_EMB_AND_MEM_FEATURE_GPU_GRAPH)) {
return;
}
std::vector<std::vector<uint64_t>> node_ids;
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_all_id(
(GraphTableType)table_type, slice_num, &node_ids);
debug_gpu_memory_info("upload_batch feature start");
GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table);
std::vector<std::future<int>> tasks;
for (int i = 0; i < slice_num; i++) {
tasks.push_back(upload_task_pool->enqueue([&, i, this]() -> int {
VLOG(0) << "begin make_gpu_ps_graph_fea, node_ids[" << i << "]_size["
<< node_ids[i].size() << "]";
GpuPsCommGraphFea sub_graph =
g->cpu_graph_table_->make_gpu_ps_graph_fea(i, node_ids[i], slot_num);
// sub_graph.display_on_cpu();
VLOG(0) << "begin build_graph_fea_on_single_gpu, node_ids[" << i
<< "]_size[" << node_ids[i].size() << "]";
g->build_graph_fea_on_single_gpu(sub_graph, i);
sub_graph.release_on_cpu();
VLOG(0) << "sub graph fea on gpu " << i << " is built";
return 0;
}));
}
for (size_t i = 0; i < tasks.size(); i++) tasks[i].get();
// g->build_graph_from_cpu(vec);
debug_gpu_memory_info("upload_batch feature end");
}
// get sub_graph_fea
std::vector<GpuPsCommGraphFea> GraphGpuWrapper::get_sub_graph_fea(
std::vector<std::vector<uint64_t>> &node_ids, int slot_num) {
GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table);
std::vector<std::future<int>> tasks;
std::vector<GpuPsCommGraphFea> sub_graph_feas(node_ids.size());
for (int i = 0; i < node_ids.size(); i++) {
tasks.push_back(upload_task_pool->enqueue([&, i, this]() -> int {
GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table);
sub_graph_feas[i] =
g->cpu_graph_table_->make_gpu_ps_graph_fea(i, node_ids[i], slot_num);
return 0;
}));
}
for (size_t i = 0; i < tasks.size(); i++) tasks[i].get();
return sub_graph_feas;
}
// build_gpu_graph_fea
void GraphGpuWrapper::build_gpu_graph_fea(GpuPsCommGraphFea &sub_graph_fea,
int i) {
GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table);
g->build_graph_fea_on_single_gpu(sub_graph_fea, i);
sub_graph_fea.release_on_cpu();
VLOG(1) << "sub graph fea on gpu " << i << " is built";
return;
}
NeighborSampleResult GraphGpuWrapper::graph_neighbor_sample_v3(
NeighborSampleQuery q, bool cpu_switch, bool compress = true) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->graph_neighbor_sample_v3(q, cpu_switch, compress);
}
NeighborSampleResultV2 GraphGpuWrapper::graph_neighbor_sample_all_edge_type(
int gpu_id,
int edge_type_len,
uint64_t *key,
int sample_size,
int len,
std::vector<std::shared_ptr<phi::Allocation>> edge_type_graphs) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->graph_neighbor_sample_all_edge_type(
gpu_id, edge_type_len, key, sample_size, len, edge_type_graphs);
}
std::vector<std::shared_ptr<phi::Allocation>>
GraphGpuWrapper::get_edge_type_graph(int gpu_id, int edge_type_len) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->get_edge_type_graph(gpu_id, edge_type_len);
}
void GraphGpuWrapper::get_node_degree(
int gpu_id,
int edge_idx,
uint64_t *key,
int len,
std::shared_ptr<phi::Allocation> node_degree) {
return (reinterpret_cast<GpuPsGraphTable *>(graph_table))
->get_node_degree(gpu_id, edge_idx, key, len, node_degree);
}
int GraphGpuWrapper::get_feature_info_of_nodes(
int gpu_id,
uint64_t *d_nodes,
int node_num,
uint32_t *size_list,
uint32_t *size_list_prefix_sum,
std::shared_ptr<phi::Allocation> &feature_list,
std::shared_ptr<phi::Allocation> &slot_list) {
platform::CUDADeviceGuard guard(gpu_id);
PADDLE_ENFORCE_NOT_NULL(graph_table,
paddle::platform::errors::InvalidArgument(
"graph_table should not be null"));
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->get_feature_info_of_nodes(gpu_id,
d_nodes,
node_num,
size_list,
size_list_prefix_sum,
feature_list,
slot_list);
}
int GraphGpuWrapper::get_feature_of_nodes(int gpu_id,
uint64_t *d_walk,
uint64_t *d_offset,
uint32_t size,
int slot_num,
int *d_slot_feature_num_map,
int fea_num_per_node) {
platform::CUDADeviceGuard guard(gpu_id);
PADDLE_ENFORCE_NOT_NULL(graph_table,
paddle::platform::errors::InvalidArgument(
"graph_table should not be null"));
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->get_feature_of_nodes(gpu_id,
d_walk,
d_offset,
size,
slot_num,
d_slot_feature_num_map,
fea_num_per_node);
}
NeighborSampleResult GraphGpuWrapper::graph_neighbor_sample(
int gpu_id, uint64_t *device_keys, int walk_degree, int len) {
platform::CUDADeviceGuard guard(gpu_id);
auto neighbor_sample_res =
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->graph_neighbor_sample(gpu_id, device_keys, walk_degree, len);
return neighbor_sample_res;
}
// this function is contributed by Liwb5
std::vector<uint64_t> GraphGpuWrapper::graph_neighbor_sample(
int gpu_id, int idx, std::vector<uint64_t> &key, int sample_size) {
std::vector<uint64_t> res;
if (key.size() == 0) {
return res;
}
uint64_t *cuda_key;
platform::CUDADeviceGuard guard(gpu_id);
hipMalloc(&cuda_key, key.size() * sizeof(uint64_t));
hipMemcpy(cuda_key,
key.data(),
key.size() * sizeof(uint64_t),
hipMemcpyHostToDevice);
VLOG(0) << "key_size: " << key.size();
auto neighbor_sample_res =
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->graph_neighbor_sample_v2(
gpu_id, idx, cuda_key, sample_size, key.size(), false, true);
int *actual_sample_size = new int[key.size()];
hipMemcpy(actual_sample_size,
neighbor_sample_res.actual_sample_size,
key.size() * sizeof(int),
hipMemcpyDeviceToHost); // 3, 1, 3
int cumsum = 0;
for (int i = 0; i < key.size(); i++) {
cumsum += actual_sample_size[i];
}
std::vector<uint64_t> cpu_key;
cpu_key.resize(key.size() * sample_size);
hipMemcpy(cpu_key.data(),
neighbor_sample_res.val,
key.size() * sample_size * sizeof(uint64_t),
hipMemcpyDeviceToHost);
for (int i = 0; i < key.size(); i++) {
for (int j = 0; j < actual_sample_size[i]; j++) {
res.push_back(key[i]);
res.push_back(cpu_key[i * sample_size + j]);
}
}
delete[] actual_sample_size;
hipFree(cuda_key);
return res;
}
NodeQueryResult GraphGpuWrapper::query_node_list(int gpu_id,
int idx,
int start,
int query_size) {
PADDLE_ENFORCE_EQ(FLAGS_gpugraph_load_node_list_into_hbm,
true,
paddle::platform::errors::PreconditionNotMet(
"when use query_node_list should set "
"gpugraph_load_node_list_into_hbm true"));
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->query_node_list(gpu_id, idx, start, query_size);
}
void GraphGpuWrapper::load_node_weight(int type_id, int idx, std::string path) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->load_node_weight(type_id, idx, path);
}
std::vector<int> GraphGpuWrapper::slot_feature_num_map() const {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->slot_feature_num_map();
}
void GraphGpuWrapper::export_partition_files(int idx, std::string file_path) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->export_partition_files(idx, file_path);
}
void GraphGpuWrapper::release_graph() {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->release_graph();
}
void GraphGpuWrapper::release_graph_edge() {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->release_graph_edge();
}
void GraphGpuWrapper::release_graph_node() {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->release_graph_node();
}
std::vector<uint64_t> &GraphGpuWrapper::get_graph_total_keys() {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->graph_total_keys_;
}
std::vector<std::vector<uint64_t>> &GraphGpuWrapper::get_graph_type_keys() {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->graph_type_keys_;
}
std::unordered_map<int, int> &GraphGpuWrapper::get_graph_type_to_index() {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->type_to_index_;
}
std::string &GraphGpuWrapper::get_node_type_size(std::string first_node_type) {
auto node_types =
paddle::string::split_string<std::string>(first_node_type, ";");
for (auto &type : node_types) {
uniq_first_node_.insert(type);
}
auto &graph_all_type_total_keys = get_graph_type_keys();
auto &type_to_index = get_graph_type_to_index();
std::vector<std::string> node_type_size;
for (auto node : uniq_first_node_) {
auto it = node_to_id.find(node);
auto first_node_idx = it->second;
size_t f_idx = type_to_index[first_node_idx];
int type_total_key_size = graph_all_type_total_keys[f_idx].size();
std::string node_type_str =
node + ":" + std::to_string(type_total_key_size);
node_type_size.push_back(node_type_str);
}
std::string delim = ";";
node_type_size_str_ = paddle::string::join_strings(node_type_size, delim);
return node_type_size_str_;
}
std::string &GraphGpuWrapper::get_edge_type_size() {
auto edge_type_size = reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->edge_type_size;
std::string delim = ";";
edge_type_size_str_ = paddle::string::join_strings(edge_type_size, delim);
return edge_type_size_str_;
}
#endif
} // namespace framework
}; // namespace paddle
| a6a481621ec4238b007fe1f2800158402ea84ea4.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_wrapper.h"
#include <sstream>
#include "paddle/fluid/framework/fleet/fleet_wrapper.h"
#include "paddle/fluid/framework/fleet/heter_ps/gpu_graph_utils.h"
#include "paddle/fluid/framework/fleet/heter_ps/graph_gpu_ps_table.h"
#include "paddle/fluid/framework/fleet/heter_ps/heter_resource.h"
DECLARE_int32(gpugraph_storage_mode);
DECLARE_bool(graph_metapath_split_opt);
namespace paddle {
namespace framework {
#ifdef PADDLE_WITH_HETERPS
std::shared_ptr<GraphGpuWrapper> GraphGpuWrapper::s_instance_(nullptr);
void GraphGpuWrapper::set_device(std::vector<int> ids) {
for (auto device_id : ids) {
device_id_mapping.push_back(device_id);
}
}
void GraphGpuWrapper::init_conf(const std::string &first_node_type,
const std::string &meta_path,
const std::string &excluded_train_pair) {
static std::mutex mutex;
{
std::lock_guard<std::mutex> lock(mutex);
if (conf_initialized_) {
return;
}
VLOG(2) << "init path config";
conf_initialized_ = true;
auto node_types =
paddle::string::split_string<std::string>(first_node_type, ";");
VLOG(2) << "node_types: " << first_node_type;
for (auto &type : node_types) {
auto iter = node_to_id.find(type);
PADDLE_ENFORCE_NE(
iter,
node_to_id.end(),
platform::errors::NotFound("(%s) is not found in node_to_id.", type));
VLOG(2) << "node_to_id[" << type << "] = " << iter->second;
first_node_type_.push_back(iter->second);
}
meta_path_.resize(first_node_type_.size());
auto meta_paths = paddle::string::split_string<std::string>(meta_path, ";");
for (size_t i = 0; i < meta_paths.size(); i++) {
auto path = meta_paths[i];
auto edges = paddle::string::split_string<std::string>(path, "-");
for (auto &edge : edges) {
auto iter = edge_to_id.find(edge);
PADDLE_ENFORCE_NE(iter,
edge_to_id.end(),
platform::errors::NotFound(
"(%s) is not found in edge_to_id.", edge));
VLOG(2) << "edge_to_id[" << edge << "] = " << iter->second;
meta_path_[i].push_back(iter->second);
if (edge_to_node_map_.find(iter->second) == edge_to_node_map_.end()) {
auto nodes = get_ntype_from_etype(edge);
uint64_t src_node_id = node_to_id.find(nodes[0])->second;
uint64_t dst_node_id = node_to_id.find(nodes[1])->second;
edge_to_node_map_[iter->second] = src_node_id << 32 | dst_node_id;
}
}
}
auto paths =
paddle::string::split_string<std::string>(excluded_train_pair, ";");
VLOG(2) << "excluded_train_pair[" << excluded_train_pair << "]";
for (auto &path : paths) {
auto nodes = get_ntype_from_etype(path);
for (auto &node : nodes) {
auto iter = node_to_id.find(node);
PADDLE_ENFORCE_NE(iter,
edge_to_id.end(),
platform::errors::NotFound(
"(%s) is not found in edge_to_id.", node));
VLOG(2) << "edge_to_id[" << node << "] = " << iter->second;
excluded_train_pair_.push_back(iter->second);
}
}
int max_dev_id = 0;
for (size_t i = 0; i < device_id_mapping.size(); i++) {
if (device_id_mapping[i] > max_dev_id) {
max_dev_id = device_id_mapping[i];
}
}
finish_node_type_.resize(max_dev_id + 1);
node_type_start_.resize(max_dev_id + 1);
global_infer_node_type_start_.resize(max_dev_id + 1);
for (size_t i = 0; i < device_id_mapping.size(); i++) {
int dev_id = device_id_mapping[i];
auto &node_type_start = node_type_start_[i];
auto &infer_node_type_start = global_infer_node_type_start_[i];
auto &finish_node_type = finish_node_type_[i];
finish_node_type.clear();
for (size_t idx = 0; idx < node_to_id.size(); idx++) {
infer_node_type_start[idx] = 0;
}
for (auto &type : node_types) {
auto iter = node_to_id.find(type);
node_type_start[iter->second] = 0;
infer_node_type_start[iter->second] = 0;
}
infer_cursor_.push_back(0);
cursor_.push_back(0);
}
init_type_keys();
}
}
void GraphGpuWrapper::init_type_keys() {
size_t thread_num = device_id_mapping.size();
int cnt = 0;
auto &graph_all_type_total_keys = get_graph_type_keys();
auto &type_to_index = get_graph_type_to_index();
std::vector<std::vector<uint64_t>> tmp_keys;
tmp_keys.resize(thread_num);
int first_node_idx;
d_graph_all_type_total_keys_.resize(graph_all_type_total_keys.size());
h_graph_all_type_keys_len_.resize(graph_all_type_total_keys.size());
for (size_t f_idx = 0; f_idx < graph_all_type_total_keys.size(); f_idx++) {
for (size_t j = 0; j < tmp_keys.size(); j++) {
tmp_keys[j].clear();
}
d_graph_all_type_total_keys_[f_idx].resize(thread_num);
auto &type_total_key = graph_all_type_total_keys[f_idx];
for (size_t j = 0; j < type_total_key.size(); j++) {
uint64_t shard = type_total_key[j] % thread_num;
tmp_keys[shard].push_back(type_total_key[j]);
}
for (size_t j = 0; j < thread_num; j++) {
h_graph_all_type_keys_len_[f_idx].push_back(tmp_keys[j].size());
VLOG(1) << "node type: " << type_to_index[f_idx]
<< ", gpu_graph_device_keys[" << j
<< "] = " << tmp_keys[j].size();
}
for (size_t j = 0; j < thread_num; j++) {
auto stream = get_local_stream(j);
int gpuid = device_id_mapping[j];
auto place = platform::CUDAPlace(gpuid);
platform::CUDADeviceGuard guard(gpuid);
d_graph_all_type_total_keys_[f_idx][j] =
memory::AllocShared(place, tmp_keys[j].size() * sizeof(uint64_t));
cudaMemcpyAsync(d_graph_all_type_total_keys_[f_idx][j]->ptr(),
tmp_keys[j].data(),
sizeof(uint64_t) * tmp_keys[j].size(),
cudaMemcpyHostToDevice,
stream);
}
}
for (int i = 0; i < thread_num; i++) {
auto stream = get_local_stream(i);
cudaStreamSynchronize(stream);
}
}
void GraphGpuWrapper::init_metapath(std::string cur_metapath,
int cur_metapath_index,
int cur_metapath_len) {
cur_metapath_ = cur_metapath;
cur_metapath_index_ = cur_metapath_index;
cur_metapath_len_ = cur_metapath_len;
auto nodes = paddle::string::split_string<std::string>(cur_metapath_, "-");
cur_parse_metapath_.clear();
cur_parse_reverse_metapath_.clear();
for (auto &node : nodes) {
VLOG(2) << "node: " << node << " , in metapath: " << cur_metapath_;
auto iter = edge_to_id.find(node);
PADDLE_ENFORCE_NE(
iter,
edge_to_id.end(),
platform::errors::NotFound("(%s) is not found in edge_to_id.", node));
cur_parse_metapath_.push_back(iter->second);
std::string reverse_type = get_reverse_etype(node);
iter = edge_to_id.find(reverse_type);
PADDLE_ENFORCE_NE(iter,
edge_to_id.end(),
platform::errors::NotFound(
"(%s) is not found in edge_to_id.", reverse_type));
cur_parse_reverse_metapath_.push_back(iter->second);
}
size_t thread_num = device_id_mapping.size();
cur_metapath_start_.resize(thread_num);
for (size_t i = 0; i < thread_num; i++) {
cur_metapath_start_[i] = 0;
}
auto &graph_all_type_total_keys = get_graph_type_keys();
auto &type_to_index = get_graph_type_to_index();
std::vector<std::vector<uint64_t>> tmp_keys;
tmp_keys.resize(thread_num);
int first_node_idx;
std::string first_node = get_ntype_from_etype(nodes[0])[0];
auto it = node_to_id.find(first_node);
first_node_idx = it->second;
d_graph_train_total_keys_.resize(thread_num);
h_graph_train_keys_len_.resize(thread_num);
for (size_t j = 0; j < tmp_keys.size(); j++) {
tmp_keys[j].clear();
}
size_t f_idx = type_to_index[first_node_idx];
auto &type_total_key = graph_all_type_total_keys[f_idx];
VLOG(2) << "first node type:" << first_node_idx
<< ", node start size:" << type_total_key.size();
for (size_t j = 0; j < type_total_key.size(); j++) {
uint64_t shard = type_total_key[j] % thread_num;
tmp_keys[shard].push_back(type_total_key[j]);
}
auto fleet_ptr = framework::FleetWrapper::GetInstance();
std::shuffle(
tmp_keys.begin(), tmp_keys.end(), fleet_ptr->LocalRandomEngine());
for (size_t j = 0; j < thread_num; j++) {
h_graph_train_keys_len_[j] = tmp_keys[j].size();
VLOG(2) << j << " th card, graph train keys len: " << tmp_keys[j].size();
}
for (size_t j = 0; j < thread_num; j++) {
auto stream = get_local_stream(j);
int gpuid = device_id_mapping[j];
auto place = platform::CUDAPlace(gpuid);
platform::CUDADeviceGuard guard(gpuid);
d_graph_train_total_keys_[j] =
memory::AllocShared(place, tmp_keys[j].size() * sizeof(uint64_t));
cudaMemcpyAsync(d_graph_train_total_keys_[j]->ptr(),
tmp_keys[j].data(),
sizeof(uint64_t) * tmp_keys[j].size(),
cudaMemcpyHostToDevice,
stream);
}
}
void GraphGpuWrapper::clear_metapath_state() {
size_t thread_num = device_id_mapping.size();
for (size_t j = 0; j < thread_num; j++) {
cur_metapath_start_[j] = 0;
h_graph_train_keys_len_[j] = 0;
d_graph_train_total_keys_[j].reset();
for (size_t k = 0; k < cur_parse_metapath_.size(); k++) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->clear_graph_info(j, cur_parse_metapath_[k]);
}
}
std::vector<int> clear_etype;
for (size_t j = 0; j < cur_parse_metapath_.size(); j++) {
if (find(clear_etype.begin(), clear_etype.end(), cur_parse_metapath_[j]) ==
clear_etype.end()) {
clear_etype.push_back(cur_parse_metapath_[j]);
}
}
for (size_t j = 0; j < cur_parse_reverse_metapath_.size(); j++) {
if (find(clear_etype.begin(),
clear_etype.end(),
cur_parse_reverse_metapath_[j]) == clear_etype.end()) {
clear_etype.push_back(cur_parse_reverse_metapath_[j]);
}
}
for (size_t j = 0; j < clear_etype.size(); j++) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->clear_graph(clear_etype[j]);
}
}
int GraphGpuWrapper::get_all_id(int table_type,
int slice_num,
std::vector<std::vector<uint64_t>> *output) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_all_id(
(GraphTableType)table_type, slice_num, output);
}
int GraphGpuWrapper::get_all_neighbor_id(
GraphTableType table_type,
int slice_num,
std::vector<std::vector<uint64_t>> *output) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_all_neighbor_id(table_type, slice_num, output);
}
int GraphGpuWrapper::get_all_id(int table_type,
int idx,
int slice_num,
std::vector<std::vector<uint64_t>> *output) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_all_id(
(GraphTableType)table_type, idx, slice_num, output);
}
int GraphGpuWrapper::get_all_neighbor_id(
GraphTableType table_type,
int idx,
int slice_num,
std::vector<std::vector<uint64_t>> *output) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_all_neighbor_id(
table_type, idx, slice_num, output);
}
int GraphGpuWrapper::get_all_feature_ids(
GraphTableType table_type,
int idx,
int slice_num,
std::vector<std::vector<uint64_t>> *output) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_all_feature_ids(
table_type, idx, slice_num, output);
}
int GraphGpuWrapper::get_node_embedding_ids(
int slice_num, std::vector<std::vector<uint64_t>> *output) {
return (reinterpret_cast<GpuPsGraphTable *>(graph_table))
->cpu_graph_table_->get_node_embedding_ids(slice_num, output);
}
std::string GraphGpuWrapper::get_reverse_etype(std::string etype) {
auto etype_split = paddle::string::split_string<std::string>(etype, "2");
if (etype_split.size() == 2) {
std::string reverse_type = etype_split[1] + "2" + etype_split[0];
return reverse_type;
} else if (etype_split.size() == 3) {
std::string reverse_type =
etype_split[2] + "2" + etype_split[1] + "2" + etype_split[0];
return reverse_type;
} else {
PADDLE_THROW(platform::errors::Fatal(
"The format of edge type should be [src2dst] or [src2etype2dst], "
"but got [%s].",
etype));
}
}
std::vector<std::string> GraphGpuWrapper::get_ntype_from_etype(
std::string etype) {
std::vector<std::string> etype_split =
paddle::string::split_string<std::string>(etype, "2");
if (etype_split.size() == 2) {
return etype_split;
} else if (etype_split.size() == 3) {
auto iter = etype_split.erase(etype_split.begin() + 1);
return etype_split;
} else {
PADDLE_THROW(platform::errors::Fatal(
"The format of edge type should be [src2dst] or [src2etype2dst], "
"but got [%s].",
etype));
}
}
void GraphGpuWrapper::set_up_types(const std::vector<std::string> &edge_types,
const std::vector<std::string> &node_types) {
id_to_edge = edge_types;
edge_to_id.clear();
for (size_t table_id = 0; table_id < edge_types.size(); table_id++) {
int res = edge_to_id.size();
edge_to_id[edge_types[table_id]] = res;
}
id_to_feature = node_types;
node_to_id.clear();
for (size_t table_id = 0; table_id < node_types.size(); table_id++) {
int res = node_to_id.size();
node_to_id[node_types[table_id]] = res;
}
table_feat_mapping.resize(node_types.size());
this->table_feat_conf_feat_name.resize(node_types.size());
this->table_feat_conf_feat_dtype.resize(node_types.size());
this->table_feat_conf_feat_shape.resize(node_types.size());
}
void GraphGpuWrapper::set_feature_separator(std::string ch) {
feature_separator_ = ch;
if (graph_table != nullptr) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->set_feature_separator(feature_separator_);
}
}
void GraphGpuWrapper::set_slot_feature_separator(std::string ch) {
slot_feature_separator_ = ch;
if (graph_table != nullptr) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->set_slot_feature_separator(slot_feature_separator_);
}
}
void GraphGpuWrapper::make_partitions(int idx,
int64_t byte_size,
int device_len) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->make_partitions(idx, byte_size, device_len);
}
int32_t GraphGpuWrapper::load_next_partition(int idx) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->load_next_partition(idx);
}
void GraphGpuWrapper::set_search_level(int level) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->set_search_level(level);
}
std::vector<uint64_t> GraphGpuWrapper::get_partition(int idx, int num) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_partition(idx, num);
}
int32_t GraphGpuWrapper::get_partition_num(int idx) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_partition_num(idx);
}
void GraphGpuWrapper::make_complementary_graph(int idx, int64_t byte_size) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->make_complementary_graph(idx, byte_size);
}
void GraphGpuWrapper::load_edge_file(std::string name,
std::string filepath,
bool reverse) {
// 'e' means load edge
std::string params = "e";
if (reverse) {
// 'e<' means load edges from $2 to $1
params += "<" + name;
} else {
// 'e>' means load edges from $1 to $2
params += ">" + name;
}
if (edge_to_id.find(name) != edge_to_id.end()) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->Load(std::string(filepath), params);
}
}
void GraphGpuWrapper::load_edge_file(
std::string etype2files,
std::string graph_data_local_path,
int part_num,
bool reverse,
const std::vector<bool> &is_reverse_edge_map) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->parse_edge_and_load(etype2files,
graph_data_local_path,
part_num,
reverse,
is_reverse_edge_map);
}
int GraphGpuWrapper::load_node_file(std::string name, std::string filepath) {
// 'n' means load nodes and 'node_type' follows
std::string params = "n" + name;
if (node_to_id.find(name) != node_to_id.end()) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->Load(std::string(filepath), params);
}
return 0;
}
int GraphGpuWrapper::load_node_file(std::string ntype2files,
std::string graph_data_local_path,
int part_num) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->parse_node_and_load(
ntype2files, graph_data_local_path, part_num);
}
void GraphGpuWrapper::load_node_and_edge(
std::string etype2files,
std::string ntype2files,
std::string graph_data_local_path,
int part_num,
bool reverse,
const std::vector<bool> &is_reverse_edge_map) {
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->load_node_and_edge_file(etype2files,
ntype2files,
graph_data_local_path,
part_num,
reverse,
is_reverse_edge_map);
}
void GraphGpuWrapper::add_table_feat_conf(std::string table_name,
std::string feat_name,
std::string feat_dtype,
int feat_shape) {
if (node_to_id.find(table_name) != node_to_id.end()) {
int idx = node_to_id[table_name];
if (table_feat_mapping[idx].find(feat_name) ==
table_feat_mapping[idx].end()) {
int res = table_feat_mapping[idx].size();
table_feat_mapping[idx][feat_name] = res;
}
int feat_idx = table_feat_mapping[idx][feat_name];
VLOG(0) << "table_name " << table_name << " mapping id " << idx;
VLOG(0) << " feat name " << feat_name << " feat id" << feat_idx;
if (feat_idx < table_feat_conf_feat_name[idx].size()) {
// overide
table_feat_conf_feat_name[idx][feat_idx] = feat_name;
table_feat_conf_feat_dtype[idx][feat_idx] = feat_dtype;
table_feat_conf_feat_shape[idx][feat_idx] = feat_shape;
} else {
// new
table_feat_conf_feat_name[idx].push_back(feat_name);
table_feat_conf_feat_dtype[idx].push_back(feat_dtype);
table_feat_conf_feat_shape[idx].push_back(feat_shape);
}
}
VLOG(0) << "add conf over";
}
void GraphGpuWrapper::init_search_level(int level) { search_level = level; }
gpuStream_t GraphGpuWrapper::get_local_stream(int gpuid) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->get_local_stream(gpuid);
}
void GraphGpuWrapper::init_service() {
table_proto.set_task_pool_size(64);
table_proto.set_shard_num(1000);
table_proto.set_build_sampler_on_cpu(false);
table_proto.set_search_level(search_level);
table_proto.set_table_name("cpu_graph_table_");
table_proto.set_use_cache(false);
for (int i = 0; i < id_to_edge.size(); i++)
table_proto.add_edge_types(id_to_edge[i]);
for (int i = 0; i < id_to_feature.size(); i++) {
table_proto.add_node_types(id_to_feature[i]);
auto feat_node = id_to_feature[i];
::paddle::distributed::GraphFeature *g_f = table_proto.add_graph_feature();
for (int x = 0; x < table_feat_conf_feat_name[i].size(); x++) {
g_f->add_name(table_feat_conf_feat_name[i][x]);
g_f->add_dtype(table_feat_conf_feat_dtype[i][x]);
g_f->add_shape(table_feat_conf_feat_shape[i][x]);
}
}
std::shared_ptr<HeterPsResource> resource =
std::make_shared<HeterPsResource>(device_id_mapping);
resource->enable_p2p();
GpuPsGraphTable *g = new GpuPsGraphTable(resource, id_to_edge.size());
size_t gpu_num = device_id_mapping.size();
g->init_cpu_table(table_proto, gpu_num);
g->cpu_graph_table_->set_feature_separator(feature_separator_);
g->cpu_graph_table_->set_slot_feature_separator(slot_feature_separator_);
graph_table = reinterpret_cast<char *>(g);
upload_num = gpu_num;
upload_task_pool.reset(new ::ThreadPool(upload_num));
}
void GraphGpuWrapper::finalize() {
reinterpret_cast<GpuPsGraphTable *>(graph_table)->show_table_collisions();
}
// edge table
void GraphGpuWrapper::upload_batch(int table_type,
int slice_num,
const std::string &edge_type) {
VLOG(0) << "begin upload edge, etype[" << edge_type << "]";
auto iter = edge_to_id.find(edge_type);
int edge_idx = iter->second;
VLOG(2) << "cur edge: " << edge_type << ", edge_idx: " << edge_idx;
std::vector<std::vector<uint64_t>> ids;
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_all_id(
(GraphTableType)table_type, edge_idx, slice_num, &ids);
debug_gpu_memory_info("upload_batch node start");
GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table);
std::vector<std::future<int>> tasks;
for (int i = 0; i < slice_num; i++) {
tasks.push_back(upload_task_pool->enqueue([&, i, edge_idx, this]() -> int {
VLOG(0) << "begin make_gpu_ps_graph, node_id[" << i << "]_size["
<< ids[i].size() << "]";
GpuPsCommGraph sub_graph =
g->cpu_graph_table_->make_gpu_ps_graph(edge_idx, ids[i]);
g->build_graph_on_single_gpu(sub_graph, i, edge_idx);
sub_graph.release_on_cpu();
VLOG(1) << "sub graph on gpu " << i << " is built";
return 0;
}));
}
for (size_t i = 0; i < tasks.size(); i++) tasks[i].get();
debug_gpu_memory_info("upload_batch node end");
}
// feature table
void GraphGpuWrapper::upload_batch(int table_type,
int slice_num,
int slot_num) {
if (table_type == GraphTableType::FEATURE_TABLE &&
(FLAGS_gpugraph_storage_mode == paddle::framework::GpuGraphStorageMode::
MEM_EMB_FEATURE_AND_GPU_GRAPH ||
FLAGS_gpugraph_storage_mode == paddle::framework::GpuGraphStorageMode::
SSD_EMB_AND_MEM_FEATURE_GPU_GRAPH)) {
return;
}
std::vector<std::vector<uint64_t>> node_ids;
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->get_all_id(
(GraphTableType)table_type, slice_num, &node_ids);
debug_gpu_memory_info("upload_batch feature start");
GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table);
std::vector<std::future<int>> tasks;
for (int i = 0; i < slice_num; i++) {
tasks.push_back(upload_task_pool->enqueue([&, i, this]() -> int {
VLOG(0) << "begin make_gpu_ps_graph_fea, node_ids[" << i << "]_size["
<< node_ids[i].size() << "]";
GpuPsCommGraphFea sub_graph =
g->cpu_graph_table_->make_gpu_ps_graph_fea(i, node_ids[i], slot_num);
// sub_graph.display_on_cpu();
VLOG(0) << "begin build_graph_fea_on_single_gpu, node_ids[" << i
<< "]_size[" << node_ids[i].size() << "]";
g->build_graph_fea_on_single_gpu(sub_graph, i);
sub_graph.release_on_cpu();
VLOG(0) << "sub graph fea on gpu " << i << " is built";
return 0;
}));
}
for (size_t i = 0; i < tasks.size(); i++) tasks[i].get();
// g->build_graph_from_cpu(vec);
debug_gpu_memory_info("upload_batch feature end");
}
// get sub_graph_fea
std::vector<GpuPsCommGraphFea> GraphGpuWrapper::get_sub_graph_fea(
std::vector<std::vector<uint64_t>> &node_ids, int slot_num) {
GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table);
std::vector<std::future<int>> tasks;
std::vector<GpuPsCommGraphFea> sub_graph_feas(node_ids.size());
for (int i = 0; i < node_ids.size(); i++) {
tasks.push_back(upload_task_pool->enqueue([&, i, this]() -> int {
GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table);
sub_graph_feas[i] =
g->cpu_graph_table_->make_gpu_ps_graph_fea(i, node_ids[i], slot_num);
return 0;
}));
}
for (size_t i = 0; i < tasks.size(); i++) tasks[i].get();
return sub_graph_feas;
}
// build_gpu_graph_fea
void GraphGpuWrapper::build_gpu_graph_fea(GpuPsCommGraphFea &sub_graph_fea,
int i) {
GpuPsGraphTable *g = reinterpret_cast<GpuPsGraphTable *>(graph_table);
g->build_graph_fea_on_single_gpu(sub_graph_fea, i);
sub_graph_fea.release_on_cpu();
VLOG(1) << "sub graph fea on gpu " << i << " is built";
return;
}
NeighborSampleResult GraphGpuWrapper::graph_neighbor_sample_v3(
NeighborSampleQuery q, bool cpu_switch, bool compress = true) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->graph_neighbor_sample_v3(q, cpu_switch, compress);
}
NeighborSampleResultV2 GraphGpuWrapper::graph_neighbor_sample_all_edge_type(
int gpu_id,
int edge_type_len,
uint64_t *key,
int sample_size,
int len,
std::vector<std::shared_ptr<phi::Allocation>> edge_type_graphs) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->graph_neighbor_sample_all_edge_type(
gpu_id, edge_type_len, key, sample_size, len, edge_type_graphs);
}
std::vector<std::shared_ptr<phi::Allocation>>
GraphGpuWrapper::get_edge_type_graph(int gpu_id, int edge_type_len) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->get_edge_type_graph(gpu_id, edge_type_len);
}
void GraphGpuWrapper::get_node_degree(
int gpu_id,
int edge_idx,
uint64_t *key,
int len,
std::shared_ptr<phi::Allocation> node_degree) {
return (reinterpret_cast<GpuPsGraphTable *>(graph_table))
->get_node_degree(gpu_id, edge_idx, key, len, node_degree);
}
int GraphGpuWrapper::get_feature_info_of_nodes(
int gpu_id,
uint64_t *d_nodes,
int node_num,
uint32_t *size_list,
uint32_t *size_list_prefix_sum,
std::shared_ptr<phi::Allocation> &feature_list,
std::shared_ptr<phi::Allocation> &slot_list) {
platform::CUDADeviceGuard guard(gpu_id);
PADDLE_ENFORCE_NOT_NULL(graph_table,
paddle::platform::errors::InvalidArgument(
"graph_table should not be null"));
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->get_feature_info_of_nodes(gpu_id,
d_nodes,
node_num,
size_list,
size_list_prefix_sum,
feature_list,
slot_list);
}
int GraphGpuWrapper::get_feature_of_nodes(int gpu_id,
uint64_t *d_walk,
uint64_t *d_offset,
uint32_t size,
int slot_num,
int *d_slot_feature_num_map,
int fea_num_per_node) {
platform::CUDADeviceGuard guard(gpu_id);
PADDLE_ENFORCE_NOT_NULL(graph_table,
paddle::platform::errors::InvalidArgument(
"graph_table should not be null"));
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->get_feature_of_nodes(gpu_id,
d_walk,
d_offset,
size,
slot_num,
d_slot_feature_num_map,
fea_num_per_node);
}
NeighborSampleResult GraphGpuWrapper::graph_neighbor_sample(
int gpu_id, uint64_t *device_keys, int walk_degree, int len) {
platform::CUDADeviceGuard guard(gpu_id);
auto neighbor_sample_res =
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->graph_neighbor_sample(gpu_id, device_keys, walk_degree, len);
return neighbor_sample_res;
}
// this function is contributed by Liwb5
std::vector<uint64_t> GraphGpuWrapper::graph_neighbor_sample(
int gpu_id, int idx, std::vector<uint64_t> &key, int sample_size) {
std::vector<uint64_t> res;
if (key.size() == 0) {
return res;
}
uint64_t *cuda_key;
platform::CUDADeviceGuard guard(gpu_id);
cudaMalloc(&cuda_key, key.size() * sizeof(uint64_t));
cudaMemcpy(cuda_key,
key.data(),
key.size() * sizeof(uint64_t),
cudaMemcpyHostToDevice);
VLOG(0) << "key_size: " << key.size();
auto neighbor_sample_res =
reinterpret_cast<GpuPsGraphTable *>(graph_table)
->graph_neighbor_sample_v2(
gpu_id, idx, cuda_key, sample_size, key.size(), false, true);
int *actual_sample_size = new int[key.size()];
cudaMemcpy(actual_sample_size,
neighbor_sample_res.actual_sample_size,
key.size() * sizeof(int),
cudaMemcpyDeviceToHost); // 3, 1, 3
int cumsum = 0;
for (int i = 0; i < key.size(); i++) {
cumsum += actual_sample_size[i];
}
std::vector<uint64_t> cpu_key;
cpu_key.resize(key.size() * sample_size);
cudaMemcpy(cpu_key.data(),
neighbor_sample_res.val,
key.size() * sample_size * sizeof(uint64_t),
cudaMemcpyDeviceToHost);
for (int i = 0; i < key.size(); i++) {
for (int j = 0; j < actual_sample_size[i]; j++) {
res.push_back(key[i]);
res.push_back(cpu_key[i * sample_size + j]);
}
}
delete[] actual_sample_size;
cudaFree(cuda_key);
return res;
}
NodeQueryResult GraphGpuWrapper::query_node_list(int gpu_id,
int idx,
int start,
int query_size) {
PADDLE_ENFORCE_EQ(FLAGS_gpugraph_load_node_list_into_hbm,
true,
paddle::platform::errors::PreconditionNotMet(
"when use query_node_list should set "
"gpugraph_load_node_list_into_hbm true"));
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->query_node_list(gpu_id, idx, start, query_size);
}
void GraphGpuWrapper::load_node_weight(int type_id, int idx, std::string path) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->load_node_weight(type_id, idx, path);
}
std::vector<int> GraphGpuWrapper::slot_feature_num_map() const {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->slot_feature_num_map();
}
void GraphGpuWrapper::export_partition_files(int idx, std::string file_path) {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->export_partition_files(idx, file_path);
}
void GraphGpuWrapper::release_graph() {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->release_graph();
}
void GraphGpuWrapper::release_graph_edge() {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->release_graph_edge();
}
void GraphGpuWrapper::release_graph_node() {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->release_graph_node();
}
std::vector<uint64_t> &GraphGpuWrapper::get_graph_total_keys() {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->graph_total_keys_;
}
std::vector<std::vector<uint64_t>> &GraphGpuWrapper::get_graph_type_keys() {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->graph_type_keys_;
}
std::unordered_map<int, int> &GraphGpuWrapper::get_graph_type_to_index() {
return reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->type_to_index_;
}
std::string &GraphGpuWrapper::get_node_type_size(std::string first_node_type) {
auto node_types =
paddle::string::split_string<std::string>(first_node_type, ";");
for (auto &type : node_types) {
uniq_first_node_.insert(type);
}
auto &graph_all_type_total_keys = get_graph_type_keys();
auto &type_to_index = get_graph_type_to_index();
std::vector<std::string> node_type_size;
for (auto node : uniq_first_node_) {
auto it = node_to_id.find(node);
auto first_node_idx = it->second;
size_t f_idx = type_to_index[first_node_idx];
int type_total_key_size = graph_all_type_total_keys[f_idx].size();
std::string node_type_str =
node + ":" + std::to_string(type_total_key_size);
node_type_size.push_back(node_type_str);
}
std::string delim = ";";
node_type_size_str_ = paddle::string::join_strings(node_type_size, delim);
return node_type_size_str_;
}
std::string &GraphGpuWrapper::get_edge_type_size() {
auto edge_type_size = reinterpret_cast<GpuPsGraphTable *>(graph_table)
->cpu_graph_table_->edge_type_size;
std::string delim = ";";
edge_type_size_str_ = paddle::string::join_strings(edge_type_size, delim);
return edge_type_size_str_;
}
#endif
} // namespace framework
}; // namespace paddle
|
a0eff5ab9fff9a275efb300f58d82e8a611a892a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--blockDim=2048 --gridDim=64
__global__ void foo(int *r) {
r[threadIdx.x + blockIdx.x * blockDim.x] = warpSize;
}
| a0eff5ab9fff9a275efb300f58d82e8a611a892a.cu | //pass
//--blockDim=2048 --gridDim=64
__global__ void foo(int *r) {
r[threadIdx.x + blockIdx.x * blockDim.x] = warpSize;
}
|
7df2974779ef3570ca57543fe354bbc95de62623.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Max (const int n, const float *top_temp, float *top_data, float *mask, const int mask_index){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
if (top_data[index] < top_temp[index])
{
top_data[index] = top_temp[index];
mask[index] = mask_index;
}
} | 7df2974779ef3570ca57543fe354bbc95de62623.cu | #include "includes.h"
__global__ void Max (const int n, const float *top_temp, float *top_data, float *mask, const int mask_index){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
if (top_data[index] < top_temp[index])
{
top_data[index] = top_temp[index];
mask[index] = mask_index;
}
} |
a7d3c31b143feb56ade5cbacaf9069f0cc11db15.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
#define MAX_THREADS_PER_BLOCK 256
#include "util.h"
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
__global__ void
Kernel(const Node* __restrict__ d_graph_nodes,
const int* __restrict__ d_graph_edges,
char* __restrict__ d_graph_mask,
char* __restrict__ d_updatind_graph_mask,
const char *__restrict__ d_graph_visited,
int* __restrict__ d_cost,
const int no_of_nodes)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if( tid<no_of_nodes && d_graph_mask[tid])
{
d_graph_mask[tid]=0;
const int num_edges = d_graph_nodes[tid].no_of_edges;
const int starting = d_graph_nodes[tid].starting;
for(int i=starting; i<(num_edges + starting); i++)
{
int id = d_graph_edges[i];
if(!d_graph_visited[id])
{
d_cost[id]=d_cost[tid]+1;
d_updatind_graph_mask[id]=1;
}
}
}
}
__global__ void
Kernel2(char* __restrict__ d_graph_mask,
char* __restrict__ d_updatind_graph_mask,
char* __restrict__ d_graph_visited,
char* __restrict__ d_over,
const int no_of_nodes)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if( tid<no_of_nodes && d_updatind_graph_mask[tid])
{
d_graph_mask[tid]=1;
d_graph_visited[tid]=1;
*d_over=1;
d_updatind_graph_mask[tid]=0;
}
}
void run_bfs_cpu(int no_of_nodes, Node *h_graph_nodes, int edge_list_size,
int *h_graph_edges, char *h_graph_mask, char *h_updating_graph_mask,
char *h_graph_visited, int *h_cost_ref)
{
char stop;
do{
//if no thread changes this value then the loop stops
stop=0;
for(int tid = 0; tid < no_of_nodes; tid++ )
{
if (h_graph_mask[tid] == 1){
h_graph_mask[tid]=0;
for(int i=h_graph_nodes[tid].starting;
i<(h_graph_nodes[tid].no_of_edges + h_graph_nodes[tid].starting); i++){
int id = h_graph_edges[i]; // node id is connected with node tid
if(!h_graph_visited[id]){ // if node id has not been visited, enter the body below
h_cost_ref[id]=h_cost_ref[tid]+1;
h_updating_graph_mask[id]=1;
}
}
}
}
for(int tid=0; tid< no_of_nodes ; tid++ )
{
if (h_updating_graph_mask[tid] == 1){
h_graph_mask[tid]=1;
h_graph_visited[tid]=1;
stop=1;
h_updating_graph_mask[tid]=0;
}
}
}
while(stop);
}
void Usage(int argc, char**argv){
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
//Apply BFS on a Graph
void run_bfs_gpu(int no_of_nodes, Node *h_graph_nodes, int edge_list_size,
int *h_graph_edges, char *h_graph_mask, char *h_updating_graph_mask,
char *h_graph_visited, int *h_cost)
{
Node* d_graph_nodes;
hipMalloc((void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ;
hipMemcpy(d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, hipMemcpyHostToDevice) ;
int* d_graph_edges;
hipMalloc((void**) &d_graph_edges, sizeof(int)*edge_list_size) ;
hipMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, hipMemcpyHostToDevice) ;
char* d_graph_mask;
hipMalloc((void**) &d_graph_mask, sizeof(char)*no_of_nodes) ;
hipMemcpy(d_graph_mask, h_graph_mask, sizeof(char)*no_of_nodes, hipMemcpyHostToDevice) ;
char* d_updating_graph_mask;
hipMalloc((void**) &d_updating_graph_mask, sizeof(char)*no_of_nodes) ;
hipMemcpy(d_updating_graph_mask, h_updating_graph_mask, sizeof(char)*no_of_nodes, hipMemcpyHostToDevice) ;
char* d_graph_visited;
hipMalloc((void**) &d_graph_visited, sizeof(char)*no_of_nodes) ;
hipMemcpy(d_graph_visited, h_graph_visited, sizeof(char)*no_of_nodes, hipMemcpyHostToDevice) ;
int* d_cost;
hipMalloc((void**) &d_cost, sizeof(int)*no_of_nodes);
hipMemcpy(d_cost, h_cost, sizeof(int)*no_of_nodes, hipMemcpyHostToDevice) ;
char h_over;
char *d_over;
hipMalloc((void**) &d_over, sizeof(char));
// setup execution parameters
dim3 grid((no_of_nodes + MAX_THREADS_PER_BLOCK - 1) / MAX_THREADS_PER_BLOCK);
dim3 threads(MAX_THREADS_PER_BLOCK);
long time = 0;
do {
h_over = 0;
hipMemcpy(d_over, &h_over, sizeof(char), hipMemcpyHostToDevice) ;
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( Kernel), dim3(grid), dim3(threads) , 0, 0, d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask,
d_graph_visited, d_cost, no_of_nodes);
hipLaunchKernelGGL(( Kernel2), dim3(grid), dim3(threads) , 0, 0, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
hipMemcpy(&h_over, d_over, sizeof(char), hipMemcpyDeviceToHost) ;
} while(h_over);
printf("Total kernel execution time : %f (us)\n", time * 1e-3f);
// copy result from device to host
hipMemcpy(h_cost, d_cost, sizeof(int)*no_of_nodes, hipMemcpyDeviceToHost) ;
hipFree(d_graph_nodes);
hipFree(d_graph_edges);
hipFree(d_graph_mask);
hipFree(d_updating_graph_mask);
hipFree(d_graph_visited);
hipFree(d_cost);
hipFree(d_over);
}
//----------------------------------------------------------
//--cambine: main function
//--author: created by Jianbin Fang
//--date: 25/01/2011
//----------------------------------------------------------
int main(int argc, char * argv[])
{
int no_of_nodes;
int edge_list_size;
FILE *fp;
Node* h_graph_nodes;
char *h_graph_mask, *h_updating_graph_mask, *h_graph_visited;
char *input_f;
if(argc!=2){
Usage(argc, argv);
exit(0);
}
input_f = argv[1];
printf("Reading File\n");
//Read in Graph from a file
fp = fopen(input_f,"r");
if(!fp){
printf("Error Reading graph file %s\n", input_f);
return 1;
}
int source = 0;
fscanf(fp,"%d",&no_of_nodes);
// allocate host memory
h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
h_graph_mask = (char*) malloc(sizeof(char)*no_of_nodes);
h_updating_graph_mask = (char*) malloc(sizeof(char)*no_of_nodes);
h_graph_visited = (char*) malloc(sizeof(char)*no_of_nodes);
int start, edgeno;
// initalize the memory
for(int i = 0; i < no_of_nodes; i++){
fscanf(fp,"%d %d",&start,&edgeno);
h_graph_nodes[i].starting = start;
h_graph_nodes[i].no_of_edges = edgeno;
h_graph_mask[i]=0;
h_updating_graph_mask[i]=0;
h_graph_visited[i]=0;
}
//read the source node from the file
fscanf(fp,"%d",&source);
source=0;
//set the source node as 1 in the mask
h_graph_mask[source]=1;
h_graph_visited[source]=1;
fscanf(fp,"%d",&edge_list_size);
int id,cost;
int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
for(int i=0; i < edge_list_size ; i++){
fscanf(fp,"%d",&id);
fscanf(fp,"%d",&cost);
h_graph_edges[i] = id;
}
if(fp) fclose(fp);
// allocate mem for the result on host side
int *h_cost = (int*) malloc(sizeof(int)*no_of_nodes);
int *h_cost_ref = (int*)malloc(sizeof(int)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++){
h_cost[i]=-1;
h_cost_ref[i] = -1;
}
h_cost[source]=0;
h_cost_ref[source]=0;
printf("run bfs (#nodes = %d) on device\n", no_of_nodes);
run_bfs_gpu(no_of_nodes,h_graph_nodes,edge_list_size,h_graph_edges,
h_graph_mask, h_updating_graph_mask, h_graph_visited, h_cost);
printf("run bfs (#nodes = %d) on host (cpu) \n", no_of_nodes);
// initalize the memory again
for(int i = 0; i < no_of_nodes; i++){
h_graph_mask[i]=0;
h_updating_graph_mask[i]=0;
h_graph_visited[i]=0;
}
//set the source node as 1 in the mask
source=0;
h_graph_mask[source]=1;
h_graph_visited[source]=1;
run_bfs_cpu(no_of_nodes,h_graph_nodes,edge_list_size,h_graph_edges,
h_graph_mask, h_updating_graph_mask, h_graph_visited, h_cost_ref);
// verify
compare_results<int>(h_cost_ref, h_cost, no_of_nodes);
free(h_graph_nodes);
free(h_graph_mask);
free(h_updating_graph_mask);
free(h_graph_visited);
free(h_cost);
free(h_cost_ref);
return 0;
}
| a7d3c31b143feb56ade5cbacaf9069f0cc11db15.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <chrono>
#include <cuda.h>
#define MAX_THREADS_PER_BLOCK 256
#include "util.h"
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
__global__ void
Kernel(const Node* __restrict__ d_graph_nodes,
const int* __restrict__ d_graph_edges,
char* __restrict__ d_graph_mask,
char* __restrict__ d_updatind_graph_mask,
const char *__restrict__ d_graph_visited,
int* __restrict__ d_cost,
const int no_of_nodes)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if( tid<no_of_nodes && d_graph_mask[tid])
{
d_graph_mask[tid]=0;
const int num_edges = d_graph_nodes[tid].no_of_edges;
const int starting = d_graph_nodes[tid].starting;
for(int i=starting; i<(num_edges + starting); i++)
{
int id = d_graph_edges[i];
if(!d_graph_visited[id])
{
d_cost[id]=d_cost[tid]+1;
d_updatind_graph_mask[id]=1;
}
}
}
}
__global__ void
Kernel2(char* __restrict__ d_graph_mask,
char* __restrict__ d_updatind_graph_mask,
char* __restrict__ d_graph_visited,
char* __restrict__ d_over,
const int no_of_nodes)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if( tid<no_of_nodes && d_updatind_graph_mask[tid])
{
d_graph_mask[tid]=1;
d_graph_visited[tid]=1;
*d_over=1;
d_updatind_graph_mask[tid]=0;
}
}
void run_bfs_cpu(int no_of_nodes, Node *h_graph_nodes, int edge_list_size,
int *h_graph_edges, char *h_graph_mask, char *h_updating_graph_mask,
char *h_graph_visited, int *h_cost_ref)
{
char stop;
do{
//if no thread changes this value then the loop stops
stop=0;
for(int tid = 0; tid < no_of_nodes; tid++ )
{
if (h_graph_mask[tid] == 1){
h_graph_mask[tid]=0;
for(int i=h_graph_nodes[tid].starting;
i<(h_graph_nodes[tid].no_of_edges + h_graph_nodes[tid].starting); i++){
int id = h_graph_edges[i]; // node id is connected with node tid
if(!h_graph_visited[id]){ // if node id has not been visited, enter the body below
h_cost_ref[id]=h_cost_ref[tid]+1;
h_updating_graph_mask[id]=1;
}
}
}
}
for(int tid=0; tid< no_of_nodes ; tid++ )
{
if (h_updating_graph_mask[tid] == 1){
h_graph_mask[tid]=1;
h_graph_visited[tid]=1;
stop=1;
h_updating_graph_mask[tid]=0;
}
}
}
while(stop);
}
void Usage(int argc, char**argv){
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
//Apply BFS on a Graph
void run_bfs_gpu(int no_of_nodes, Node *h_graph_nodes, int edge_list_size,
int *h_graph_edges, char *h_graph_mask, char *h_updating_graph_mask,
char *h_graph_visited, int *h_cost)
{
Node* d_graph_nodes;
cudaMalloc((void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ;
cudaMemcpy(d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, cudaMemcpyHostToDevice) ;
int* d_graph_edges;
cudaMalloc((void**) &d_graph_edges, sizeof(int)*edge_list_size) ;
cudaMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, cudaMemcpyHostToDevice) ;
char* d_graph_mask;
cudaMalloc((void**) &d_graph_mask, sizeof(char)*no_of_nodes) ;
cudaMemcpy(d_graph_mask, h_graph_mask, sizeof(char)*no_of_nodes, cudaMemcpyHostToDevice) ;
char* d_updating_graph_mask;
cudaMalloc((void**) &d_updating_graph_mask, sizeof(char)*no_of_nodes) ;
cudaMemcpy(d_updating_graph_mask, h_updating_graph_mask, sizeof(char)*no_of_nodes, cudaMemcpyHostToDevice) ;
char* d_graph_visited;
cudaMalloc((void**) &d_graph_visited, sizeof(char)*no_of_nodes) ;
cudaMemcpy(d_graph_visited, h_graph_visited, sizeof(char)*no_of_nodes, cudaMemcpyHostToDevice) ;
int* d_cost;
cudaMalloc((void**) &d_cost, sizeof(int)*no_of_nodes);
cudaMemcpy(d_cost, h_cost, sizeof(int)*no_of_nodes, cudaMemcpyHostToDevice) ;
char h_over;
char *d_over;
cudaMalloc((void**) &d_over, sizeof(char));
// setup execution parameters
dim3 grid((no_of_nodes + MAX_THREADS_PER_BLOCK - 1) / MAX_THREADS_PER_BLOCK);
dim3 threads(MAX_THREADS_PER_BLOCK);
long time = 0;
do {
h_over = 0;
cudaMemcpy(d_over, &h_over, sizeof(char), cudaMemcpyHostToDevice) ;
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
Kernel<<< grid, threads >>>(d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask,
d_graph_visited, d_cost, no_of_nodes);
Kernel2<<< grid, threads >>>(d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
cudaMemcpy(&h_over, d_over, sizeof(char), cudaMemcpyDeviceToHost) ;
} while(h_over);
printf("Total kernel execution time : %f (us)\n", time * 1e-3f);
// copy result from device to host
cudaMemcpy(h_cost, d_cost, sizeof(int)*no_of_nodes, cudaMemcpyDeviceToHost) ;
cudaFree(d_graph_nodes);
cudaFree(d_graph_edges);
cudaFree(d_graph_mask);
cudaFree(d_updating_graph_mask);
cudaFree(d_graph_visited);
cudaFree(d_cost);
cudaFree(d_over);
}
//----------------------------------------------------------
//--cambine: main function
//--author: created by Jianbin Fang
//--date: 25/01/2011
//----------------------------------------------------------
int main(int argc, char * argv[])
{
int no_of_nodes;
int edge_list_size;
FILE *fp;
Node* h_graph_nodes;
char *h_graph_mask, *h_updating_graph_mask, *h_graph_visited;
char *input_f;
if(argc!=2){
Usage(argc, argv);
exit(0);
}
input_f = argv[1];
printf("Reading File\n");
//Read in Graph from a file
fp = fopen(input_f,"r");
if(!fp){
printf("Error Reading graph file %s\n", input_f);
return 1;
}
int source = 0;
fscanf(fp,"%d",&no_of_nodes);
// allocate host memory
h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes);
h_graph_mask = (char*) malloc(sizeof(char)*no_of_nodes);
h_updating_graph_mask = (char*) malloc(sizeof(char)*no_of_nodes);
h_graph_visited = (char*) malloc(sizeof(char)*no_of_nodes);
int start, edgeno;
// initalize the memory
for(int i = 0; i < no_of_nodes; i++){
fscanf(fp,"%d %d",&start,&edgeno);
h_graph_nodes[i].starting = start;
h_graph_nodes[i].no_of_edges = edgeno;
h_graph_mask[i]=0;
h_updating_graph_mask[i]=0;
h_graph_visited[i]=0;
}
//read the source node from the file
fscanf(fp,"%d",&source);
source=0;
//set the source node as 1 in the mask
h_graph_mask[source]=1;
h_graph_visited[source]=1;
fscanf(fp,"%d",&edge_list_size);
int id,cost;
int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size);
for(int i=0; i < edge_list_size ; i++){
fscanf(fp,"%d",&id);
fscanf(fp,"%d",&cost);
h_graph_edges[i] = id;
}
if(fp) fclose(fp);
// allocate mem for the result on host side
int *h_cost = (int*) malloc(sizeof(int)*no_of_nodes);
int *h_cost_ref = (int*)malloc(sizeof(int)*no_of_nodes);
for(int i=0;i<no_of_nodes;i++){
h_cost[i]=-1;
h_cost_ref[i] = -1;
}
h_cost[source]=0;
h_cost_ref[source]=0;
printf("run bfs (#nodes = %d) on device\n", no_of_nodes);
run_bfs_gpu(no_of_nodes,h_graph_nodes,edge_list_size,h_graph_edges,
h_graph_mask, h_updating_graph_mask, h_graph_visited, h_cost);
printf("run bfs (#nodes = %d) on host (cpu) \n", no_of_nodes);
// initalize the memory again
for(int i = 0; i < no_of_nodes; i++){
h_graph_mask[i]=0;
h_updating_graph_mask[i]=0;
h_graph_visited[i]=0;
}
//set the source node as 1 in the mask
source=0;
h_graph_mask[source]=1;
h_graph_visited[source]=1;
run_bfs_cpu(no_of_nodes,h_graph_nodes,edge_list_size,h_graph_edges,
h_graph_mask, h_updating_graph_mask, h_graph_visited, h_cost_ref);
// verify
compare_results<int>(h_cost_ref, h_cost, no_of_nodes);
free(h_graph_nodes);
free(h_graph_mask);
free(h_updating_graph_mask);
free(h_graph_visited);
free(h_cost);
free(h_cost_ref);
return 0;
}
|
2c3e1ccf00ca07229e1523df93e7b4b16da9bf05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void big_add(int *a, int *b, int *c, unsigned int N){
// init thread id
int tid;
tid = blockIdx.x * blockDim.x + threadIdx.x;
// stride is for big arrays, i.e. bigger than threads we have
int stride = blockDim.x * gridDim.x;
// do the operations
while(tid < N){
c[tid] = a[tid] + b[tid];
tid += stride;
}
} | 2c3e1ccf00ca07229e1523df93e7b4b16da9bf05.cu | #include "includes.h"
__global__ void big_add(int *a, int *b, int *c, unsigned int N){
// init thread id
int tid;
tid = blockIdx.x * blockDim.x + threadIdx.x;
// stride is for big arrays, i.e. bigger than threads we have
int stride = blockDim.x * gridDim.x;
// do the operations
while(tid < N){
c[tid] = a[tid] + b[tid];
tid += stride;
}
} |
f5ff1f356b781f1bb23a40af629270f48a5ddc13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// using both threads and blocks
#include <stdlib.h>
#include <stdio.h>
#include "./random.h"
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
__global__ void add(int *a, int *b, int *c) {
int index = threadIdx.x + blockIdx.x * blockDim.x; //blockDim gets threads per block
// if (index < n) // avoid accessing beyond array
c[index] = a[index] + b[index];
}
int main(void) {
int *a, *b ,*c;
int *d_a, *d_b, *d_c ;
int size = N * sizeof(int);
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU with multiple blocks
// add<<<(N + M-1) / M,M>>>(d_a, d_b, d_c, N);
// Copy result back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
// Cleanup
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
}
| f5ff1f356b781f1bb23a40af629270f48a5ddc13.cu | // using both threads and blocks
#include <stdlib.h>
#include <stdio.h>
#include "./random.h"
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
__global__ void add(int *a, int *b, int *c) {
int index = threadIdx.x + blockIdx.x * blockDim.x; //blockDim gets threads per block
// if (index < n) // avoid accessing beyond array
c[index] = a[index] + b[index];
}
int main(void) {
int *a, *b ,*c;
int *d_a, *d_b, *d_c ;
int size = N * sizeof(int);
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU with multiple blocks
// add<<<(N + M-1) / M,M>>>(d_a, d_b, d_c, N);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
df5ce87826132264d47074edfe4c8aeb1a5e1f7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2008-2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "atom.h"
#include "cutoff.h"
#include "parboil.h"
#ifdef __DEVICE_EMULATION__
#define DEBUG
/* define which grid block and which thread to examine */
#define BX 0
#define BY 0
#define TX 0
#define TY 0
#define TZ 0
#define EMU(code) do { \
if (blockIdx.x==BX && blockIdx.y==BY && \
threadIdx.x==TX && threadIdx.y==TY && threadIdx.z==TZ) { \
code; \
} \
} while (0)
#define INT(n) printf("%s = %d\n", #n, n)
#define FLOAT(f) printf("%s = %g\n", #f, (double)(f))
#define INT3(n) printf("%s = %d %d %d\n", #n, (n).x, (n).y, (n).z)
#define FLOAT4(f) printf("%s = %g %g %g %g\n", #f, (double)(f).x, \
(double)(f).y, (double)(f).z, (double)(f).w)
#else
#define EMU(code)
#define INT(n)
#define FLOAT(f)
#define INT3(n)
#define FLOAT4(f)
#endif
/* report error from CUDA */
#define CUERR \
do { \
hipError_t err; \
if ((err = hipGetLastError()) != hipSuccess) { \
printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__); \
return -1; \
} \
} while (0)
/*
* neighbor list:
* stored in constant memory as table of offsets
* flat index addressing is computed by kernel
*
* reserve enough memory for 11^3 stencil of grid cells
* this fits within 16K of memory
*/
#define NBRLIST_DIM 11
#define NBRLIST_MAXLEN (NBRLIST_DIM * NBRLIST_DIM * NBRLIST_DIM)
__constant__ int NbrListLen;
__constant__ int3 NbrList[NBRLIST_MAXLEN];
/*
* atom bins cached into shared memory for processing
*
* this reserves 4K of shared memory for 32 atom bins each containing 8 atoms,
* should permit scheduling of up to 3 thread blocks per SM
*/
#define BIN_DEPTH 8 /* max number of atoms per bin */
#define BIN_SIZE 32 /* size of bin in floats */
#define BIN_CACHE_MAXLEN 32 /* max number of atom bins to cache */
#define BIN_LENGTH 4.f /* spatial length in Angstroms */
#define BIN_INVLEN (1.f / BIN_LENGTH)
/* assuming density of 1 atom / 10 A^3, expectation is 6.4 atoms per bin
* so that bin fill should be 80% (for non-empty regions of space) */
#define REGION_SIZE 512 /* number of floats in lattice region */
#define SUB_REGION_SIZE 128 /* number of floats in lattice sub-region */
/*
* potential lattice is decomposed into size 8^3 lattice point "regions"
*
* THIS IMPLEMENTATION: one thread per lattice point
* thread block size 128 gives 4 thread blocks per region
* kernel is invoked for each x-y plane of regions,
* where gridDim.x is 4*(x region dimension) so that blockIdx.x
* can absorb the z sub-region index in its 2 lowest order bits
*
* Regions are stored contiguously in memory in row-major order
*
* The bins have to not only cover the region, but they need to surround
* the outer edges so that region sides and corners can still use
* neighbor list stencil. The binZeroAddr is actually a shifted pointer into
* the bin array (binZeroAddr = binBaseAddr + (c*binDim_y + c)*binDim_x + c)
* where c = ceil(cutoff / binsize). This allows for negative offsets to
* be added to myBinIndex.
*
* The (0,0,0) spatial origin corresponds to lower left corner of both
* regionZeroAddr and binZeroAddr. The atom coordinates are translated
* during binning to enforce this assumption.
*/
__global__ static void cuda_cutoff_potential_lattice(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
float *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex
)
{
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ float *mySubRegionAddr;
__shared__ int3 myBinIndex;
//const int xRegionIndex = (blockIdx.x >> 2);
//const int yRegionIndex = blockIdx.y;
/* thread id */
const int tid = (threadIdx.z*8 + threadIdx.y)*8 + threadIdx.x;
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
mySubRegionAddr = regionZeroAddr + ((zRegionIndex*gridDim.y
+ blockIdx.y)*(gridDim.x>>2) + (blockIdx.x >> 2))*REGION_SIZE
+ (blockIdx.x&3)*SUB_REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * (blockIdx.x >> 2) + threadIdx.x) * h;
float y = (8 * blockIdx.y + threadIdx.y) * h;
float z = (8 * zRegionIndex + 2*(blockIdx.x&3) + threadIdx.z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * (blockIdx.x >> 2) + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * blockIdx.y + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
float energy = 0.f;
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr)
+ (((k*binDim_y) + j)*binDim_x + i) * BIN_SIZE;
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int tidmask = tid & 15;
int binIndex = startoff + bincnt*8*BIN_SIZE;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
__syncthreads();
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
for (bincnt = 0; bincnt < numbins; bincnt++) {
int i;
float r2;
for (i = 0; i < BIN_DEPTH; i++) {
float ax = AtomBinCache[bincnt * BIN_SIZE + i*4];
float ay = AtomBinCache[bincnt * BIN_SIZE + i*4 + 1];
float az = AtomBinCache[bincnt * BIN_SIZE + i*4 + 2];
float aq = AtomBinCache[bincnt * BIN_SIZE + i*4 + 3];
if (0.f == aq) break; /* no more atoms in bin */
r2 = (ax - x) * (ax - x) + (ay - y) * (ay - y) + (az - z) * (az - z);
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy += aq * rsqrtf(r2) * s * s;
}
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
__syncthreads();
} /* end loop over neighbor list */
/* store into global memory */
mySubRegionAddr[tid] = energy;
}
extern "C" int gpu_compute_cutoff_potential_lattice(
struct pb_TimerSet *timers,
Lattice *lattice, /* the lattice */
float cutoff, /* cutoff distance */
Atoms *atoms, /* array of atoms */
int verbose /* print info/debug messages */
)
{
int nx = lattice->dim.nx;
int ny = lattice->dim.ny;
int nz = lattice->dim.nz;
float xlo = lattice->dim.lo.x;
float ylo = lattice->dim.lo.y;
float zlo = lattice->dim.lo.z;
float h = lattice->dim.h;
int natoms = atoms->size;
Atom *atom = atoms->atoms;
int3 nbrlist[NBRLIST_MAXLEN];
int nbrlistlen = 0;
int binHistoFull[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int binHistoCover[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int num_excluded = 0;
int xRegionDim, yRegionDim, zRegionDim;
int xRegionIndex, yRegionIndex, zRegionIndex;
int xOffset, yOffset, zOffset;
int lnx, lny, lnz, lnall;
float *regionZeroAddr, *thisRegion;
float *regionZeroCuda;
int index, indexRegion;
int c;
int3 binDim;
int nbins;
float4 *binBaseAddr, *binZeroAddr;
float4 *binBaseCuda, *binZeroCuda;
int *bincntBaseAddr, *bincntZeroAddr;
Atoms *extra = NULL;
int i, j, k, n;
int sum, total;
float avgFillFull, avgFillCover;
const float cutoff2 = cutoff * cutoff;
const float inv_cutoff2 = 1.f / cutoff2;
dim3 gridDim, blockDim;
// Caller has made the 'compute' timer active
/* pad lattice to be factor of 8 in each dimension */
xRegionDim = (int) ceilf(nx/8.f);
yRegionDim = (int) ceilf(ny/8.f);
zRegionDim = (int) ceilf(nz/8.f);
lnx = 8 * xRegionDim;
lny = 8 * yRegionDim;
lnz = 8 * zRegionDim;
lnall = lnx * lny * lnz;
/* will receive energies from CUDA */
regionZeroAddr = (float *) malloc(lnall * sizeof(float));
/* create bins */
c = (int) ceil(cutoff * BIN_INVLEN); /* count extra bins around lattice */
binDim.x = (int) ceil(lnx * h * BIN_INVLEN) + 2*c;
binDim.y = (int) ceil(lny * h * BIN_INVLEN) + 2*c;
binDim.z = (int) ceil(lnz * h * BIN_INVLEN) + 2*c;
nbins = binDim.x * binDim.y * binDim.z;
binBaseAddr = (float4 *) calloc(nbins * BIN_DEPTH, sizeof(float4));
binZeroAddr = binBaseAddr + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
bincntBaseAddr = (int *) calloc(nbins, sizeof(int));
bincntZeroAddr = bincntBaseAddr + (c * binDim.y + c) * binDim.x + c;
/* create neighbor list */
if (ceilf(BIN_LENGTH / (8*h)) == floorf(BIN_LENGTH / (8*h))) {
float s = sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 1 cell */
if (2*c + 1 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-1)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else if (8*h <= 2*BIN_LENGTH) {
float s = 2.f*sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 3-cube of cells */
if (2*c + 3 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-3)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else {
fprintf(stderr, "must have h <= %f\n", 0.25 * BIN_LENGTH);
return -1;
}
/* perform geometric hashing of atoms into bins */
{
/* array of extra atoms, permit average of one extra per bin */
Atom *extra_atoms = (Atom *) calloc(nbins, sizeof(Atom));
int extra_len = 0;
for (n = 0; n < natoms; n++) {
float4 p;
p.x = atom[n].x - xlo;
p.y = atom[n].y - ylo;
p.z = atom[n].z - zlo;
p.w = atom[n].q;
i = (int) floorf(p.x * BIN_INVLEN);
j = (int) floorf(p.y * BIN_INVLEN);
k = (int) floorf(p.z * BIN_INVLEN);
if (i >= -c && i < binDim.x - c &&
j >= -c && j < binDim.y - c &&
k >= -c && k < binDim.z - c &&
atom[n].q != 0) {
int index = (k * binDim.y + j) * binDim.x + i;
float4 *bin = binZeroAddr + index * BIN_DEPTH;
int bindex = bincntZeroAddr[index];
if (bindex < BIN_DEPTH) {
/* copy atom into bin and increase counter for this bin */
bin[bindex] = p;
bincntZeroAddr[index]++;
}
else {
/* add index to array of extra atoms to be computed with CPU */
if (extra_len >= nbins) {
fprintf(stderr, "exceeded space for storing extra atoms\n");
return -1;
}
extra_atoms[extra_len] = atom[n];
extra_len++;
}
}
else {
/* excluded atoms are either outside bins or neutrally charged */
num_excluded++;
}
}
/* Save result */
extra = (Atoms *)malloc(sizeof(Atoms));
extra->atoms = extra_atoms;
extra->size = extra_len;
}
/* bin stats */
sum = total = 0;
for (n = 0; n < nbins; n++) {
binHistoFull[ bincntBaseAddr[n] ]++;
sum += bincntBaseAddr[n];
total += BIN_DEPTH;
}
avgFillFull = sum / (float) total;
sum = total = 0;
for (k = 0; k < binDim.z - 2*c; k++) {
for (j = 0; j < binDim.y - 2*c; j++) {
for (i = 0; i < binDim.x - 2*c; i++) {
int index = (k * binDim.y + j) * binDim.x + i;
binHistoCover[ bincntZeroAddr[index] ]++;
sum += bincntZeroAddr[index];
total += BIN_DEPTH;
}
}
}
avgFillCover = sum / (float) total;
if (verbose) {
/* report */
printf("number of atoms = %d\n", natoms);
printf("lattice spacing = %g\n", h);
printf("cutoff distance = %g\n", cutoff);
printf("\n");
printf("requested lattice dimensions = %d %d %d\n", nx, ny, nz);
printf("requested space dimensions = %g %g %g\n", nx*h, ny*h, nz*h);
printf("expanded lattice dimensions = %d %d %d\n", lnx, lny, lnz);
printf("expanded space dimensions = %g %g %g\n", lnx*h, lny*h, lnz*h);
printf("number of bytes for lattice data = %u\n", lnall*sizeof(float));
printf("\n");
printf("bin padding thickness = %d\n", c);
printf("bin cover dimensions = %d %d %d\n",
binDim.x - 2*c, binDim.y - 2*c, binDim.z - 2*c);
printf("bin full dimensions = %d %d %d\n", binDim.x, binDim.y, binDim.z);
printf("number of bins = %d\n", nbins);
printf("total number of atom slots = %d\n", nbins * BIN_DEPTH);
printf("%% overhead space = %g\n",
(natoms / (double) (nbins * BIN_DEPTH)) * 100);
printf("number of bytes for bin data = %u\n",
nbins * BIN_DEPTH * sizeof(float4));
printf("\n");
printf("bin histogram with padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoFull[n]);
sum += binHistoFull[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillFull * 100);
printf("\n");
printf("bin histogram excluding padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoCover[n]);
sum += binHistoCover[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillCover * 100);
printf("\n");
printf("number of extra atoms = %d\n", extra->size);
printf("%% atoms that are extra = %g\n", (extra->size / (double) natoms) * 100);
printf("\n");
/* sanity check on bins */
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoFull[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram with edges: "
"sum + others = %d\n", sum);
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoCover[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram excluding edges: "
"sum + others = %d\n", sum);
printf("\n");
/* neighbor list */
printf("neighbor list length = %d\n", nbrlistlen);
printf("\n");
}
/* setup CUDA kernel parameters */
gridDim.x = 4 * xRegionDim;
gridDim.y = yRegionDim;
gridDim.z = 1;
blockDim.x = 8;
blockDim.y = 8;
blockDim.z = 2;
/* allocate and initialize memory on CUDA device */
pb_SwitchToTimer(timers, pb_TimerID_COPY);
if (verbose) {
printf("Allocating %.2fMB on CUDA device for potentials\n",
lnall * sizeof(float) / (double) (1024*1024));
}
hipMalloc((void **) ®ionZeroCuda, lnall * sizeof(float));
CUERR;
hipMemset(regionZeroCuda, 0, lnall * sizeof(float));
CUERR;
if (verbose) {
printf("Allocating %.2fMB on CUDA device for atom bins\n",
nbins * BIN_DEPTH * sizeof(float4) / (double) (1024*1024));
}
hipMalloc((void **) &binBaseCuda, nbins * BIN_DEPTH * sizeof(float4));
CUERR;
hipMemcpy(binBaseCuda, binBaseAddr, nbins * BIN_DEPTH * sizeof(float4),
hipMemcpyHostToDevice);
CUERR;
binZeroCuda = binBaseCuda + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
hipMemcpyToSymbol(NbrListLen, &nbrlistlen, sizeof(int), 0);
CUERR;
hipMemcpyToSymbol(NbrList, nbrlist, nbrlistlen * sizeof(int3), 0);
CUERR;
if (verbose)
printf("\n");
/* loop over z-dimension, invoke CUDA kernel for each x-y plane */
pb_SwitchToTimer(timers, pb_TimerID_KERNEL);
printf("Invoking CUDA kernel on %d region planes...\n", zRegionDim);
for (zRegionIndex = 0; zRegionIndex < zRegionDim; zRegionIndex++) {
printf(" computing plane %d\r", zRegionIndex);
fflush(stdout);
hipLaunchKernelGGL(( cuda_cutoff_potential_lattice), dim3(gridDim), dim3(blockDim), 0, 0, binDim.x, binDim.y,
binZeroCuda, h, cutoff2, inv_cutoff2, regionZeroCuda, zRegionIndex);
CUERR;
}
printf("Finished CUDA kernel calls \n");
/* copy result regions from CUDA device */
pb_SwitchToTimer(timers, pb_TimerID_COPY);
hipMemcpy(regionZeroAddr, regionZeroCuda, lnall * sizeof(float),
hipMemcpyDeviceToHost);
CUERR;
/* free CUDA memory allocations */
hipFree(regionZeroCuda);
hipFree(binBaseCuda);
/* transpose regions back into lattice */
pb_SwitchToTimer(timers, pb_TimerID_COMPUTE);
for (k = 0; k < nz; k++) {
zRegionIndex = (k >> 3);
zOffset = (k & 7);
for (j = 0; j < ny; j++) {
yRegionIndex = (j >> 3);
yOffset = (j & 7);
for (i = 0; i < nx; i++) {
xRegionIndex = (i >> 3);
xOffset = (i & 7);
thisRegion = regionZeroAddr
+ ((zRegionIndex * yRegionDim + yRegionIndex) * xRegionDim
+ xRegionIndex) * REGION_SIZE;
indexRegion = (zOffset * 8 + yOffset) * 8 + xOffset;
index = (k * ny + j) * nx + i;
lattice->lattice[index] = thisRegion[indexRegion];
}
}
}
/* handle extra atoms */
if (extra->size > 0) {
printf("computing extra atoms on CPU\n");
if (cpu_compute_cutoff_potential_lattice(lattice, cutoff, extra)) {
fprintf(stderr, "cpu_compute_cutoff_potential_lattice() failed "
"for extra atoms\n");
return -1;
}
printf("\n");
}
/* cleanup memory allocations */
free(regionZeroAddr);
free(binBaseAddr);
free(bincntBaseAddr);
free_atom(extra);
return 0;
}
| df5ce87826132264d47074edfe4c8aeb1a5e1f7a.cu | /***************************************************************************
*cr
*cr (C) Copyright 2008-2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "atom.h"
#include "cutoff.h"
#include "parboil.h"
#ifdef __DEVICE_EMULATION__
#define DEBUG
/* define which grid block and which thread to examine */
#define BX 0
#define BY 0
#define TX 0
#define TY 0
#define TZ 0
#define EMU(code) do { \
if (blockIdx.x==BX && blockIdx.y==BY && \
threadIdx.x==TX && threadIdx.y==TY && threadIdx.z==TZ) { \
code; \
} \
} while (0)
#define INT(n) printf("%s = %d\n", #n, n)
#define FLOAT(f) printf("%s = %g\n", #f, (double)(f))
#define INT3(n) printf("%s = %d %d %d\n", #n, (n).x, (n).y, (n).z)
#define FLOAT4(f) printf("%s = %g %g %g %g\n", #f, (double)(f).x, \
(double)(f).y, (double)(f).z, (double)(f).w)
#else
#define EMU(code)
#define INT(n)
#define FLOAT(f)
#define INT3(n)
#define FLOAT4(f)
#endif
/* report error from CUDA */
#define CUERR \
do { \
cudaError_t err; \
if ((err = cudaGetLastError()) != cudaSuccess) { \
printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \
return -1; \
} \
} while (0)
/*
* neighbor list:
* stored in constant memory as table of offsets
* flat index addressing is computed by kernel
*
* reserve enough memory for 11^3 stencil of grid cells
* this fits within 16K of memory
*/
#define NBRLIST_DIM 11
#define NBRLIST_MAXLEN (NBRLIST_DIM * NBRLIST_DIM * NBRLIST_DIM)
__constant__ int NbrListLen;
__constant__ int3 NbrList[NBRLIST_MAXLEN];
/*
* atom bins cached into shared memory for processing
*
* this reserves 4K of shared memory for 32 atom bins each containing 8 atoms,
* should permit scheduling of up to 3 thread blocks per SM
*/
#define BIN_DEPTH 8 /* max number of atoms per bin */
#define BIN_SIZE 32 /* size of bin in floats */
#define BIN_CACHE_MAXLEN 32 /* max number of atom bins to cache */
#define BIN_LENGTH 4.f /* spatial length in Angstroms */
#define BIN_INVLEN (1.f / BIN_LENGTH)
/* assuming density of 1 atom / 10 A^3, expectation is 6.4 atoms per bin
* so that bin fill should be 80% (for non-empty regions of space) */
#define REGION_SIZE 512 /* number of floats in lattice region */
#define SUB_REGION_SIZE 128 /* number of floats in lattice sub-region */
/*
* potential lattice is decomposed into size 8^3 lattice point "regions"
*
* THIS IMPLEMENTATION: one thread per lattice point
* thread block size 128 gives 4 thread blocks per region
* kernel is invoked for each x-y plane of regions,
* where gridDim.x is 4*(x region dimension) so that blockIdx.x
* can absorb the z sub-region index in its 2 lowest order bits
*
* Regions are stored contiguously in memory in row-major order
*
* The bins have to not only cover the region, but they need to surround
* the outer edges so that region sides and corners can still use
* neighbor list stencil. The binZeroAddr is actually a shifted pointer into
* the bin array (binZeroAddr = binBaseAddr + (c*binDim_y + c)*binDim_x + c)
* where c = ceil(cutoff / binsize). This allows for negative offsets to
* be added to myBinIndex.
*
* The (0,0,0) spatial origin corresponds to lower left corner of both
* regionZeroAddr and binZeroAddr. The atom coordinates are translated
* during binning to enforce this assumption.
*/
__global__ static void cuda_cutoff_potential_lattice(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
float *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex
)
{
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ float *mySubRegionAddr;
__shared__ int3 myBinIndex;
//const int xRegionIndex = (blockIdx.x >> 2);
//const int yRegionIndex = blockIdx.y;
/* thread id */
const int tid = (threadIdx.z*8 + threadIdx.y)*8 + threadIdx.x;
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
mySubRegionAddr = regionZeroAddr + ((zRegionIndex*gridDim.y
+ blockIdx.y)*(gridDim.x>>2) + (blockIdx.x >> 2))*REGION_SIZE
+ (blockIdx.x&3)*SUB_REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * (blockIdx.x >> 2) + threadIdx.x) * h;
float y = (8 * blockIdx.y + threadIdx.y) * h;
float z = (8 * zRegionIndex + 2*(blockIdx.x&3) + threadIdx.z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * (blockIdx.x >> 2) + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * blockIdx.y + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
float energy = 0.f;
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr)
+ (((k*binDim_y) + j)*binDim_x + i) * BIN_SIZE;
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int tidmask = tid & 15;
int binIndex = startoff + bincnt*8*BIN_SIZE;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
__syncthreads();
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
for (bincnt = 0; bincnt < numbins; bincnt++) {
int i;
float r2;
for (i = 0; i < BIN_DEPTH; i++) {
float ax = AtomBinCache[bincnt * BIN_SIZE + i*4];
float ay = AtomBinCache[bincnt * BIN_SIZE + i*4 + 1];
float az = AtomBinCache[bincnt * BIN_SIZE + i*4 + 2];
float aq = AtomBinCache[bincnt * BIN_SIZE + i*4 + 3];
if (0.f == aq) break; /* no more atoms in bin */
r2 = (ax - x) * (ax - x) + (ay - y) * (ay - y) + (az - z) * (az - z);
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy += aq * rsqrtf(r2) * s * s;
}
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
__syncthreads();
} /* end loop over neighbor list */
/* store into global memory */
mySubRegionAddr[tid] = energy;
}
extern "C" int gpu_compute_cutoff_potential_lattice(
struct pb_TimerSet *timers,
Lattice *lattice, /* the lattice */
float cutoff, /* cutoff distance */
Atoms *atoms, /* array of atoms */
int verbose /* print info/debug messages */
)
{
int nx = lattice->dim.nx;
int ny = lattice->dim.ny;
int nz = lattice->dim.nz;
float xlo = lattice->dim.lo.x;
float ylo = lattice->dim.lo.y;
float zlo = lattice->dim.lo.z;
float h = lattice->dim.h;
int natoms = atoms->size;
Atom *atom = atoms->atoms;
int3 nbrlist[NBRLIST_MAXLEN];
int nbrlistlen = 0;
int binHistoFull[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int binHistoCover[BIN_DEPTH+1] = { 0 }; /* clear every array element */
int num_excluded = 0;
int xRegionDim, yRegionDim, zRegionDim;
int xRegionIndex, yRegionIndex, zRegionIndex;
int xOffset, yOffset, zOffset;
int lnx, lny, lnz, lnall;
float *regionZeroAddr, *thisRegion;
float *regionZeroCuda;
int index, indexRegion;
int c;
int3 binDim;
int nbins;
float4 *binBaseAddr, *binZeroAddr;
float4 *binBaseCuda, *binZeroCuda;
int *bincntBaseAddr, *bincntZeroAddr;
Atoms *extra = NULL;
int i, j, k, n;
int sum, total;
float avgFillFull, avgFillCover;
const float cutoff2 = cutoff * cutoff;
const float inv_cutoff2 = 1.f / cutoff2;
dim3 gridDim, blockDim;
// Caller has made the 'compute' timer active
/* pad lattice to be factor of 8 in each dimension */
xRegionDim = (int) ceilf(nx/8.f);
yRegionDim = (int) ceilf(ny/8.f);
zRegionDim = (int) ceilf(nz/8.f);
lnx = 8 * xRegionDim;
lny = 8 * yRegionDim;
lnz = 8 * zRegionDim;
lnall = lnx * lny * lnz;
/* will receive energies from CUDA */
regionZeroAddr = (float *) malloc(lnall * sizeof(float));
/* create bins */
c = (int) ceil(cutoff * BIN_INVLEN); /* count extra bins around lattice */
binDim.x = (int) ceil(lnx * h * BIN_INVLEN) + 2*c;
binDim.y = (int) ceil(lny * h * BIN_INVLEN) + 2*c;
binDim.z = (int) ceil(lnz * h * BIN_INVLEN) + 2*c;
nbins = binDim.x * binDim.y * binDim.z;
binBaseAddr = (float4 *) calloc(nbins * BIN_DEPTH, sizeof(float4));
binZeroAddr = binBaseAddr + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
bincntBaseAddr = (int *) calloc(nbins, sizeof(int));
bincntZeroAddr = bincntBaseAddr + (c * binDim.y + c) * binDim.x + c;
/* create neighbor list */
if (ceilf(BIN_LENGTH / (8*h)) == floorf(BIN_LENGTH / (8*h))) {
float s = sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 1 cell */
if (2*c + 1 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-1)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else if (8*h <= 2*BIN_LENGTH) {
float s = 2.f*sqrtf(3);
float r2 = (cutoff + s*BIN_LENGTH) * (cutoff + s*BIN_LENGTH);
int cnt = 0;
/* develop neighbor list around 3-cube of cells */
if (2*c + 3 > NBRLIST_DIM) {
fprintf(stderr, "must have cutoff <= %f\n",
(NBRLIST_DIM-3)/2 * BIN_LENGTH);
return -1;
}
for (k = -c; k <= c; k++) {
for (j = -c; j <= c; j++) {
for (i = -c; i <= c; i++) {
if ((i*i + j*j + k*k)*BIN_LENGTH*BIN_LENGTH >= r2) continue;
nbrlist[cnt].x = i;
nbrlist[cnt].y = j;
nbrlist[cnt].z = k;
cnt++;
}
}
}
nbrlistlen = cnt;
}
else {
fprintf(stderr, "must have h <= %f\n", 0.25 * BIN_LENGTH);
return -1;
}
/* perform geometric hashing of atoms into bins */
{
/* array of extra atoms, permit average of one extra per bin */
Atom *extra_atoms = (Atom *) calloc(nbins, sizeof(Atom));
int extra_len = 0;
for (n = 0; n < natoms; n++) {
float4 p;
p.x = atom[n].x - xlo;
p.y = atom[n].y - ylo;
p.z = atom[n].z - zlo;
p.w = atom[n].q;
i = (int) floorf(p.x * BIN_INVLEN);
j = (int) floorf(p.y * BIN_INVLEN);
k = (int) floorf(p.z * BIN_INVLEN);
if (i >= -c && i < binDim.x - c &&
j >= -c && j < binDim.y - c &&
k >= -c && k < binDim.z - c &&
atom[n].q != 0) {
int index = (k * binDim.y + j) * binDim.x + i;
float4 *bin = binZeroAddr + index * BIN_DEPTH;
int bindex = bincntZeroAddr[index];
if (bindex < BIN_DEPTH) {
/* copy atom into bin and increase counter for this bin */
bin[bindex] = p;
bincntZeroAddr[index]++;
}
else {
/* add index to array of extra atoms to be computed with CPU */
if (extra_len >= nbins) {
fprintf(stderr, "exceeded space for storing extra atoms\n");
return -1;
}
extra_atoms[extra_len] = atom[n];
extra_len++;
}
}
else {
/* excluded atoms are either outside bins or neutrally charged */
num_excluded++;
}
}
/* Save result */
extra = (Atoms *)malloc(sizeof(Atoms));
extra->atoms = extra_atoms;
extra->size = extra_len;
}
/* bin stats */
sum = total = 0;
for (n = 0; n < nbins; n++) {
binHistoFull[ bincntBaseAddr[n] ]++;
sum += bincntBaseAddr[n];
total += BIN_DEPTH;
}
avgFillFull = sum / (float) total;
sum = total = 0;
for (k = 0; k < binDim.z - 2*c; k++) {
for (j = 0; j < binDim.y - 2*c; j++) {
for (i = 0; i < binDim.x - 2*c; i++) {
int index = (k * binDim.y + j) * binDim.x + i;
binHistoCover[ bincntZeroAddr[index] ]++;
sum += bincntZeroAddr[index];
total += BIN_DEPTH;
}
}
}
avgFillCover = sum / (float) total;
if (verbose) {
/* report */
printf("number of atoms = %d\n", natoms);
printf("lattice spacing = %g\n", h);
printf("cutoff distance = %g\n", cutoff);
printf("\n");
printf("requested lattice dimensions = %d %d %d\n", nx, ny, nz);
printf("requested space dimensions = %g %g %g\n", nx*h, ny*h, nz*h);
printf("expanded lattice dimensions = %d %d %d\n", lnx, lny, lnz);
printf("expanded space dimensions = %g %g %g\n", lnx*h, lny*h, lnz*h);
printf("number of bytes for lattice data = %u\n", lnall*sizeof(float));
printf("\n");
printf("bin padding thickness = %d\n", c);
printf("bin cover dimensions = %d %d %d\n",
binDim.x - 2*c, binDim.y - 2*c, binDim.z - 2*c);
printf("bin full dimensions = %d %d %d\n", binDim.x, binDim.y, binDim.z);
printf("number of bins = %d\n", nbins);
printf("total number of atom slots = %d\n", nbins * BIN_DEPTH);
printf("%% overhead space = %g\n",
(natoms / (double) (nbins * BIN_DEPTH)) * 100);
printf("number of bytes for bin data = %u\n",
nbins * BIN_DEPTH * sizeof(float4));
printf("\n");
printf("bin histogram with padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoFull[n]);
sum += binHistoFull[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillFull * 100);
printf("\n");
printf("bin histogram excluding padding:\n");
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
printf(" number of bins with %d atoms: %d\n", n, binHistoCover[n]);
sum += binHistoCover[n];
}
printf(" total number of bins: %d\n", sum);
printf(" %% average fill: %g\n", avgFillCover * 100);
printf("\n");
printf("number of extra atoms = %d\n", extra->size);
printf("%% atoms that are extra = %g\n", (extra->size / (double) natoms) * 100);
printf("\n");
/* sanity check on bins */
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoFull[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram with edges: "
"sum + others = %d\n", sum);
sum = 0;
for (n = 0; n <= BIN_DEPTH; n++) {
sum += n * binHistoCover[n];
}
sum += extra->size + num_excluded;
printf("sanity check on bin histogram excluding edges: "
"sum + others = %d\n", sum);
printf("\n");
/* neighbor list */
printf("neighbor list length = %d\n", nbrlistlen);
printf("\n");
}
/* setup CUDA kernel parameters */
gridDim.x = 4 * xRegionDim;
gridDim.y = yRegionDim;
gridDim.z = 1;
blockDim.x = 8;
blockDim.y = 8;
blockDim.z = 2;
/* allocate and initialize memory on CUDA device */
pb_SwitchToTimer(timers, pb_TimerID_COPY);
if (verbose) {
printf("Allocating %.2fMB on CUDA device for potentials\n",
lnall * sizeof(float) / (double) (1024*1024));
}
cudaMalloc((void **) ®ionZeroCuda, lnall * sizeof(float));
CUERR;
cudaMemset(regionZeroCuda, 0, lnall * sizeof(float));
CUERR;
if (verbose) {
printf("Allocating %.2fMB on CUDA device for atom bins\n",
nbins * BIN_DEPTH * sizeof(float4) / (double) (1024*1024));
}
cudaMalloc((void **) &binBaseCuda, nbins * BIN_DEPTH * sizeof(float4));
CUERR;
cudaMemcpy(binBaseCuda, binBaseAddr, nbins * BIN_DEPTH * sizeof(float4),
cudaMemcpyHostToDevice);
CUERR;
binZeroCuda = binBaseCuda + ((c * binDim.y + c) * binDim.x + c) * BIN_DEPTH;
cudaMemcpyToSymbol(NbrListLen, &nbrlistlen, sizeof(int), 0);
CUERR;
cudaMemcpyToSymbol(NbrList, nbrlist, nbrlistlen * sizeof(int3), 0);
CUERR;
if (verbose)
printf("\n");
/* loop over z-dimension, invoke CUDA kernel for each x-y plane */
pb_SwitchToTimer(timers, pb_TimerID_KERNEL);
printf("Invoking CUDA kernel on %d region planes...\n", zRegionDim);
for (zRegionIndex = 0; zRegionIndex < zRegionDim; zRegionIndex++) {
printf(" computing plane %d\r", zRegionIndex);
fflush(stdout);
cuda_cutoff_potential_lattice<<<gridDim, blockDim, 0>>>(binDim.x, binDim.y,
binZeroCuda, h, cutoff2, inv_cutoff2, regionZeroCuda, zRegionIndex);
CUERR;
}
printf("Finished CUDA kernel calls \n");
/* copy result regions from CUDA device */
pb_SwitchToTimer(timers, pb_TimerID_COPY);
cudaMemcpy(regionZeroAddr, regionZeroCuda, lnall * sizeof(float),
cudaMemcpyDeviceToHost);
CUERR;
/* free CUDA memory allocations */
cudaFree(regionZeroCuda);
cudaFree(binBaseCuda);
/* transpose regions back into lattice */
pb_SwitchToTimer(timers, pb_TimerID_COMPUTE);
for (k = 0; k < nz; k++) {
zRegionIndex = (k >> 3);
zOffset = (k & 7);
for (j = 0; j < ny; j++) {
yRegionIndex = (j >> 3);
yOffset = (j & 7);
for (i = 0; i < nx; i++) {
xRegionIndex = (i >> 3);
xOffset = (i & 7);
thisRegion = regionZeroAddr
+ ((zRegionIndex * yRegionDim + yRegionIndex) * xRegionDim
+ xRegionIndex) * REGION_SIZE;
indexRegion = (zOffset * 8 + yOffset) * 8 + xOffset;
index = (k * ny + j) * nx + i;
lattice->lattice[index] = thisRegion[indexRegion];
}
}
}
/* handle extra atoms */
if (extra->size > 0) {
printf("computing extra atoms on CPU\n");
if (cpu_compute_cutoff_potential_lattice(lattice, cutoff, extra)) {
fprintf(stderr, "cpu_compute_cutoff_potential_lattice() failed "
"for extra atoms\n");
return -1;
}
printf("\n");
}
/* cleanup memory allocations */
free(regionZeroAddr);
free(binBaseAddr);
free(bincntBaseAddr);
free_atom(extra);
return 0;
}
|
3ca01ca0e2e5b781c2161be11a9a0290487848c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file multibox_detection.cu
* \brief MultiBoxDetection op
* \author Joshua Zhang
* \modified by ddlee
*/
#include "./multibox_detection-inl.h"
#include <mshadow/cuda/tensor_gpu-inl.cuh>
#define MULTIBOX_DETECTION_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
template<typename DType>
__device__ void Clip(DType *value, const DType lower, const DType upper) {
if ((*value) < lower) *value = lower;
if ((*value) > upper) *value = upper;
}
template<typename DType>
__device__ void CalculateOverlap(const DType *a, const DType *b, DType *iou) {
DType w = max(DType(0), min(a[2], b[2]) - max(a[0], b[0]));
DType h = max(DType(0), min(a[3], b[3]) - max(a[1], b[1]));
DType i = w * h;
DType u = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] - b[1]) - i;
(*iou) = u <= 0.f ? static_cast<DType>(0) : static_cast<DType>(i / u);
}
template<typename DType>
__global__
__launch_bounds__(cuda::kMaxThreadsPerBlock)
void DetectionForwardKernel(DType *out, const DType *cls_prob,
const DType *loc_pred, const DType *anchors,
DType *temp_space, const int num_classes,
const int num_anchors, const float *threshold,
const bool clip, const float vx,
const float vy, const float vw,
const float vh, const float nms_threshold,
const bool force_suppress, const int *nms_topk) {
const int nbatch = blockIdx.x; // each block for each batch
int index = threadIdx.x;
__shared__ int valid_count;
out += nbatch * num_anchors * 6;
cls_prob += nbatch * num_anchors * num_classes;
loc_pred += nbatch * num_anchors * 4;
if (index == 0) {
valid_count = 0;
}
__syncthreads();
// apply prediction to anchors
for (int i = index; i < num_anchors; i += blockDim.x) {
for (int j = 1; j < num_classes; ++j) {
int id = 0;
DType score = cls_prob[j * num_anchors + i];
id = j;
if (id > 0 && score < threshold[j-1]) {
id = 0;
}
if (id > 0) {
// valid class
int pos = atomicAdd(&valid_count, 1);
out[pos * 6] = id - 1; // restore original class id
out[pos * 6 + 1] = (id == 0 ? DType(-1) : score);
int offset = i * 4;
DType al = anchors[offset];
DType at = anchors[offset + 1];
DType ar = anchors[offset + 2];
DType ab = anchors[offset + 3];
DType aw = ar - al;
DType ah = ab - at;
DType ax = (al + ar) / 2.f;
DType ay = (at + ab) / 2.f;
DType ox = loc_pred[offset] * vx * aw + ax;
DType oy = loc_pred[offset + 1] * vy * ah + ay;
DType ow = exp(loc_pred[offset + 2] * vw) * aw / 2;
DType oh = exp(loc_pred[offset + 3] * vh) * ah / 2;
DType xmin = ox - ow;
DType ymin = oy - oh;
DType xmax = ox + ow;
DType ymax = oy + oh;
if (clip) {
Clip(&xmin, DType(0), DType(1));
Clip(&ymin, DType(0), DType(1));
Clip(&xmax, DType(0), DType(1));
Clip(&ymax, DType(0), DType(1));
}
out[pos * 6 + 2] = xmin;
out[pos * 6 + 3] = ymin;
out[pos * 6 + 4] = xmax;
out[pos * 6 + 5] = ymax;
}
}
}
__syncthreads();
if (valid_count < 1 || nms_threshold <= 0 || nms_threshold > 1) return;
// if (index == 0) printf("%d\n", valid_count);
// descent sort according to scores
const int size = valid_count;
temp_space += nbatch * num_anchors * 6;
DType *src = out;
DType *dst = temp_space;
for (int width = 2; width < (size << 1); width <<= 1) {
int slices = (size - 1) / (blockDim.x * width) + 1;
int start = width * index * slices;
for (int slice = 0; slice < slices; ++slice) {
if (start >= size) break;
int middle = start + (width >> 1);
if (middle > size) middle = size;
int end = start + width;
if (end > size) end = size;
int i = start;
int j = middle;
for (int k = start; k < end; ++k) {
DType score_i = i < size ? src[i * 6 + 1] : DType(-1);
DType score_j = j < size ? src[j * 6 + 1] : DType(-1);
if (i < middle && (j >= end || score_i > score_j)) {
for (int n = 0; n < 6; ++n) {
dst[k * 6 + n] = src[i * 6 + n];
}
++i;
} else {
for (int n = 0; n < 6; ++n) {
dst[k * 6 + n] = src[j * 6 + n];
}
++j;
}
}
start += width;
}
__syncthreads();
src = src == out? temp_space : out;
dst = dst == out? temp_space : out;
}
__syncthreads();
if (src == temp_space) {
// copy from temp to out
for (int i = index; i < size * 6; i += blockDim.x) {
out[i] = temp_space[i];
}
__syncthreads();
}
// pre TOP_K cls_cnt
// if (index == 0) {
// int cnt[32];
// for (int j = 0; j < num_classes - 1; j++) {
// cnt[j] = 0;
// //printf("%d\n", cls_cnt[j]);
// }
// for (int i = 0; i < size; i++) {
// if (static_cast<int>(out[i*6]) > -1) cnt[static_cast<int>(out[i*6])]++;
// }
// for (int j = 0; j < num_classes - 1; j++) {
// printf("%d\t", cnt[j]);
// }
// printf("\n");
// }
// keep top k detections for each cls
__shared__ int ntop;
__shared__ int cls_cnt[32];
// init cls_cnt with topk parameter
if (index == 0) {
ntop = size;
for (int j = 0; j < num_classes - 1; j++) {
cls_cnt[j] = nms_topk[j];
// printf("%d\n", cls_cnt[j]);
}
}
__syncthreads();
// use seperate counter for each cls, record last indice with non-zero class_id as ntop
for (int i = index; i < size; i += blockDim.x) {
// use atomic function instead of ++
atomicSub(&cls_cnt[static_cast<int>(out[i*6])], 1);
if (cls_cnt[static_cast<int>(out[i*6])] < 0) {
out[i*6] = -1;
}
// find max cls_cnt, when all counters less than zero, set ntop
int temp = -size;
for (int j = 0; j < num_classes - 1; j++) {
if (cls_cnt[j] > temp) {
temp = cls_cnt[j];
}
}
if ( ntop == size && temp < 0) {
ntop = i;
}
}
__syncthreads();
// pre NMS cls_cnt
// if (index == 0) {
// int cnt[32];
// for (int j = 0; j < num_classes - 1; j++) {
// cnt[j] = 0;
// //printf("%d\n", cls_cnt[j]);
// }
// for (int i = 0; i < size; i++) {
// if (static_cast<int>(out[i*6]) > -1) cnt[static_cast<int>(out[i*6])]++;
// }
// for (int j = 0; j < num_classes - 1; j++) {
// printf("%d\t", cnt[j]);
// }
// printf("\n");
// }
// if (index == 0) {
// for (int j = 0; j < num_classes - 1; j++) {
// printf("%d\n", cls_cnt[j]);
// }
// printf("%d\n", ntop);
// }
// apply NMS
for (int compare_pos = 0; compare_pos < ntop; ++compare_pos) {
DType compare_id = out[compare_pos * 6];
if (compare_id < 0) continue; // not a valid positive detection, skip
DType *compare_loc_ptr = out + compare_pos * 6 + 2;
for (int i = compare_pos + index + 1; i < ntop; i += blockDim.x) {
DType class_id = out[i * 6];
if (class_id < 0) continue;
if (force_suppress || (class_id == compare_id)) {
DType iou;
CalculateOverlap(compare_loc_ptr, out + i * 6 + 2, &iou);
if (iou >= nms_threshold) {
out[i * 6] = -1;
}
}
}
__syncthreads();
}
// post NMS cls_cnt
// if (index == 0) {
// int cnt[32];
// for (int j = 0; j < num_classes - 1; j++) {
// cnt[j] = 0;
// //printf("%d\n", cls_cnt[j]);
// }
// for (int i = 0; i < size; i++) {
// if (static_cast<int>(out[i*6]) > -1) cnt[static_cast<int>(out[i*6])]++;
// }
// for (int j = 0; j < num_classes - 1; j++) {
// printf("%d\t", cnt[j]);
// }
// printf("\n");
// }
}
} // namespace cuda
template<typename DType>
inline void MultiBoxDetectionForward(const Tensor<gpu, 3, DType> &out,
const Tensor<gpu, 3, DType> &cls_prob,
const Tensor<gpu, 2, DType> &loc_pred,
const Tensor<gpu, 2, DType> &anchors,
const Tensor<gpu, 3, DType> &temp_space,
const nnvm::Tuple<float> &threshold,
const bool clip,
const nnvm::Tuple<float> &variances,
const float nms_threshold,
const bool force_suppress,
const nnvm::Tuple<int> &nms_topk) {
CHECK_EQ(variances.ndim(), 4) << "Variance size must be 4";
const int num_classes = cls_prob.size(1);
CHECK_EQ(nms_topk.ndim(), num_classes-1) << "nms_topk size must be num_classes";
const int num_anchors = cls_prob.size(2);
const int num_batches = cls_prob.size(0);
const int num_threads = cuda::kMaxThreadsPerBlock;
int num_blocks = num_batches;
// alloc memory for nms_topk on GPU and copy only data to it(extract from nnvm::Tuple class)
int *nms_topk_ptr;
MULTIBOX_DETECTION_CUDA_CHECK(hipMalloc((void **)&nms_topk_ptr, sizeof(int) * num_classes));
MULTIBOX_DETECTION_CUDA_CHECK(hipMemcpy(nms_topk_ptr, &nms_topk[0], sizeof(int) * num_classes, hipMemcpyHostToDevice));
float *score_thresh_ptr;
MULTIBOX_DETECTION_CUDA_CHECK(hipMalloc((void **)&score_thresh_ptr, sizeof(float) * num_classes));
MULTIBOX_DETECTION_CUDA_CHECK(hipMemcpy(score_thresh_ptr, &threshold[0], sizeof(float) * num_classes, hipMemcpyHostToDevice));
cuda::CheckLaunchParam(num_blocks, num_threads, "MultiBoxDetection Forward");
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
hipLaunchKernelGGL(( cuda::DetectionForwardKernel), dim3(num_blocks), dim3(num_threads), 0, stream, out.dptr_,
cls_prob.dptr_, loc_pred.dptr_, anchors.dptr_, temp_space.dptr_,
num_classes, num_anchors, score_thresh_ptr, clip,
variances[0], variances[1], variances[2], variances[3],
nms_threshold, force_suppress, nms_topk_ptr);
MULTIBOX_DETECTION_CUDA_CHECK(hipPeekAtLastError());
MULTIBOX_DETECTION_CUDA_CHECK(hipFree(nms_topk_ptr));
MULTIBOX_DETECTION_CUDA_CHECK(hipFree(score_thresh_ptr));
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator *CreateOp<gpu>(MultiBoxDetectionParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new MultiBoxDetectionOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| 3ca01ca0e2e5b781c2161be11a9a0290487848c5.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file multibox_detection.cu
* \brief MultiBoxDetection op
* \author Joshua Zhang
* \modified by ddlee
*/
#include "./multibox_detection-inl.h"
#include <mshadow/cuda/tensor_gpu-inl.cuh>
#define MULTIBOX_DETECTION_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
template<typename DType>
__device__ void Clip(DType *value, const DType lower, const DType upper) {
if ((*value) < lower) *value = lower;
if ((*value) > upper) *value = upper;
}
template<typename DType>
__device__ void CalculateOverlap(const DType *a, const DType *b, DType *iou) {
DType w = max(DType(0), min(a[2], b[2]) - max(a[0], b[0]));
DType h = max(DType(0), min(a[3], b[3]) - max(a[1], b[1]));
DType i = w * h;
DType u = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] - b[1]) - i;
(*iou) = u <= 0.f ? static_cast<DType>(0) : static_cast<DType>(i / u);
}
template<typename DType>
__global__
__launch_bounds__(cuda::kMaxThreadsPerBlock)
void DetectionForwardKernel(DType *out, const DType *cls_prob,
const DType *loc_pred, const DType *anchors,
DType *temp_space, const int num_classes,
const int num_anchors, const float *threshold,
const bool clip, const float vx,
const float vy, const float vw,
const float vh, const float nms_threshold,
const bool force_suppress, const int *nms_topk) {
const int nbatch = blockIdx.x; // each block for each batch
int index = threadIdx.x;
__shared__ int valid_count;
out += nbatch * num_anchors * 6;
cls_prob += nbatch * num_anchors * num_classes;
loc_pred += nbatch * num_anchors * 4;
if (index == 0) {
valid_count = 0;
}
__syncthreads();
// apply prediction to anchors
for (int i = index; i < num_anchors; i += blockDim.x) {
for (int j = 1; j < num_classes; ++j) {
int id = 0;
DType score = cls_prob[j * num_anchors + i];
id = j;
if (id > 0 && score < threshold[j-1]) {
id = 0;
}
if (id > 0) {
// valid class
int pos = atomicAdd(&valid_count, 1);
out[pos * 6] = id - 1; // restore original class id
out[pos * 6 + 1] = (id == 0 ? DType(-1) : score);
int offset = i * 4;
DType al = anchors[offset];
DType at = anchors[offset + 1];
DType ar = anchors[offset + 2];
DType ab = anchors[offset + 3];
DType aw = ar - al;
DType ah = ab - at;
DType ax = (al + ar) / 2.f;
DType ay = (at + ab) / 2.f;
DType ox = loc_pred[offset] * vx * aw + ax;
DType oy = loc_pred[offset + 1] * vy * ah + ay;
DType ow = exp(loc_pred[offset + 2] * vw) * aw / 2;
DType oh = exp(loc_pred[offset + 3] * vh) * ah / 2;
DType xmin = ox - ow;
DType ymin = oy - oh;
DType xmax = ox + ow;
DType ymax = oy + oh;
if (clip) {
Clip(&xmin, DType(0), DType(1));
Clip(&ymin, DType(0), DType(1));
Clip(&xmax, DType(0), DType(1));
Clip(&ymax, DType(0), DType(1));
}
out[pos * 6 + 2] = xmin;
out[pos * 6 + 3] = ymin;
out[pos * 6 + 4] = xmax;
out[pos * 6 + 5] = ymax;
}
}
}
__syncthreads();
if (valid_count < 1 || nms_threshold <= 0 || nms_threshold > 1) return;
// if (index == 0) printf("%d\n", valid_count);
// descent sort according to scores
const int size = valid_count;
temp_space += nbatch * num_anchors * 6;
DType *src = out;
DType *dst = temp_space;
for (int width = 2; width < (size << 1); width <<= 1) {
int slices = (size - 1) / (blockDim.x * width) + 1;
int start = width * index * slices;
for (int slice = 0; slice < slices; ++slice) {
if (start >= size) break;
int middle = start + (width >> 1);
if (middle > size) middle = size;
int end = start + width;
if (end > size) end = size;
int i = start;
int j = middle;
for (int k = start; k < end; ++k) {
DType score_i = i < size ? src[i * 6 + 1] : DType(-1);
DType score_j = j < size ? src[j * 6 + 1] : DType(-1);
if (i < middle && (j >= end || score_i > score_j)) {
for (int n = 0; n < 6; ++n) {
dst[k * 6 + n] = src[i * 6 + n];
}
++i;
} else {
for (int n = 0; n < 6; ++n) {
dst[k * 6 + n] = src[j * 6 + n];
}
++j;
}
}
start += width;
}
__syncthreads();
src = src == out? temp_space : out;
dst = dst == out? temp_space : out;
}
__syncthreads();
if (src == temp_space) {
// copy from temp to out
for (int i = index; i < size * 6; i += blockDim.x) {
out[i] = temp_space[i];
}
__syncthreads();
}
// pre TOP_K cls_cnt
// if (index == 0) {
// int cnt[32];
// for (int j = 0; j < num_classes - 1; j++) {
// cnt[j] = 0;
// //printf("%d\n", cls_cnt[j]);
// }
// for (int i = 0; i < size; i++) {
// if (static_cast<int>(out[i*6]) > -1) cnt[static_cast<int>(out[i*6])]++;
// }
// for (int j = 0; j < num_classes - 1; j++) {
// printf("%d\t", cnt[j]);
// }
// printf("\n");
// }
// keep top k detections for each cls
__shared__ int ntop;
__shared__ int cls_cnt[32];
// init cls_cnt with topk parameter
if (index == 0) {
ntop = size;
for (int j = 0; j < num_classes - 1; j++) {
cls_cnt[j] = nms_topk[j];
// printf("%d\n", cls_cnt[j]);
}
}
__syncthreads();
// use seperate counter for each cls, record last indice with non-zero class_id as ntop
for (int i = index; i < size; i += blockDim.x) {
// use atomic function instead of ++
atomicSub(&cls_cnt[static_cast<int>(out[i*6])], 1);
if (cls_cnt[static_cast<int>(out[i*6])] < 0) {
out[i*6] = -1;
}
// find max cls_cnt, when all counters less than zero, set ntop
int temp = -size;
for (int j = 0; j < num_classes - 1; j++) {
if (cls_cnt[j] > temp) {
temp = cls_cnt[j];
}
}
if ( ntop == size && temp < 0) {
ntop = i;
}
}
__syncthreads();
// pre NMS cls_cnt
// if (index == 0) {
// int cnt[32];
// for (int j = 0; j < num_classes - 1; j++) {
// cnt[j] = 0;
// //printf("%d\n", cls_cnt[j]);
// }
// for (int i = 0; i < size; i++) {
// if (static_cast<int>(out[i*6]) > -1) cnt[static_cast<int>(out[i*6])]++;
// }
// for (int j = 0; j < num_classes - 1; j++) {
// printf("%d\t", cnt[j]);
// }
// printf("\n");
// }
// if (index == 0) {
// for (int j = 0; j < num_classes - 1; j++) {
// printf("%d\n", cls_cnt[j]);
// }
// printf("%d\n", ntop);
// }
// apply NMS
for (int compare_pos = 0; compare_pos < ntop; ++compare_pos) {
DType compare_id = out[compare_pos * 6];
if (compare_id < 0) continue; // not a valid positive detection, skip
DType *compare_loc_ptr = out + compare_pos * 6 + 2;
for (int i = compare_pos + index + 1; i < ntop; i += blockDim.x) {
DType class_id = out[i * 6];
if (class_id < 0) continue;
if (force_suppress || (class_id == compare_id)) {
DType iou;
CalculateOverlap(compare_loc_ptr, out + i * 6 + 2, &iou);
if (iou >= nms_threshold) {
out[i * 6] = -1;
}
}
}
__syncthreads();
}
// post NMS cls_cnt
// if (index == 0) {
// int cnt[32];
// for (int j = 0; j < num_classes - 1; j++) {
// cnt[j] = 0;
// //printf("%d\n", cls_cnt[j]);
// }
// for (int i = 0; i < size; i++) {
// if (static_cast<int>(out[i*6]) > -1) cnt[static_cast<int>(out[i*6])]++;
// }
// for (int j = 0; j < num_classes - 1; j++) {
// printf("%d\t", cnt[j]);
// }
// printf("\n");
// }
}
} // namespace cuda
template<typename DType>
inline void MultiBoxDetectionForward(const Tensor<gpu, 3, DType> &out,
const Tensor<gpu, 3, DType> &cls_prob,
const Tensor<gpu, 2, DType> &loc_pred,
const Tensor<gpu, 2, DType> &anchors,
const Tensor<gpu, 3, DType> &temp_space,
const nnvm::Tuple<float> &threshold,
const bool clip,
const nnvm::Tuple<float> &variances,
const float nms_threshold,
const bool force_suppress,
const nnvm::Tuple<int> &nms_topk) {
CHECK_EQ(variances.ndim(), 4) << "Variance size must be 4";
const int num_classes = cls_prob.size(1);
CHECK_EQ(nms_topk.ndim(), num_classes-1) << "nms_topk size must be num_classes";
const int num_anchors = cls_prob.size(2);
const int num_batches = cls_prob.size(0);
const int num_threads = cuda::kMaxThreadsPerBlock;
int num_blocks = num_batches;
// alloc memory for nms_topk on GPU and copy only data to it(extract from nnvm::Tuple class)
int *nms_topk_ptr;
MULTIBOX_DETECTION_CUDA_CHECK(cudaMalloc((void **)&nms_topk_ptr, sizeof(int) * num_classes));
MULTIBOX_DETECTION_CUDA_CHECK(cudaMemcpy(nms_topk_ptr, &nms_topk[0], sizeof(int) * num_classes, cudaMemcpyHostToDevice));
float *score_thresh_ptr;
MULTIBOX_DETECTION_CUDA_CHECK(cudaMalloc((void **)&score_thresh_ptr, sizeof(float) * num_classes));
MULTIBOX_DETECTION_CUDA_CHECK(cudaMemcpy(score_thresh_ptr, &threshold[0], sizeof(float) * num_classes, cudaMemcpyHostToDevice));
cuda::CheckLaunchParam(num_blocks, num_threads, "MultiBoxDetection Forward");
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
cuda::DetectionForwardKernel<<<num_blocks, num_threads, 0, stream>>>(out.dptr_,
cls_prob.dptr_, loc_pred.dptr_, anchors.dptr_, temp_space.dptr_,
num_classes, num_anchors, score_thresh_ptr, clip,
variances[0], variances[1], variances[2], variances[3],
nms_threshold, force_suppress, nms_topk_ptr);
MULTIBOX_DETECTION_CUDA_CHECK(cudaPeekAtLastError());
MULTIBOX_DETECTION_CUDA_CHECK(cudaFree(nms_topk_ptr));
MULTIBOX_DETECTION_CUDA_CHECK(cudaFree(score_thresh_ptr));
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator *CreateOp<gpu>(MultiBoxDetectionParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new MultiBoxDetectionOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
2696b309ed1f66ea748e641f4454e096cc6ea7c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
#include "helper.h"
#include <iostream>
#include "math.h"
using namespace std;
// uncomment to use the camera
//#define CAMERA
// Exercise 4
__global__ void getGradient( float *gradientx, float *gradienty, float *img, float w, float h, int nc)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
for(int c = 0; c < nc; c++)
{
size_t ind = x + (size_t)y*w + (size_t)w*h*c;
size_t indxp = x+1 + (size_t)y*w + (size_t)w*h*c;
size_t indyp = x + (size_t)(y+1)*w + (size_t)w*h*c;
float resx = 0;
float resy = 0;
int n = w*h*nc;
if(ind+1 < n)
{
//gradient x
float xplus1 = img[indxp];
float x0 = img[ind];
resx = xplus1 - x0;
//gradient y
float yplus1 = img[indyp];
float y0 = img[ind];
resy = yplus1 - y0;
}
if(ind<n){
gradientx[ind] = resx;
gradienty[ind] = resy;
}
}
}
__global__ void getDivergence(float *divergence, float *img, float w, float h, int nc)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
for(int c = 0; c < nc; c++)
{
size_t ind = x + (size_t)y*w + (size_t)w*h*c;
size_t indxp = x+1 + (size_t)y*w + (size_t)w*h*c;
size_t indyp = x + (size_t)(y+1)*w + (size_t)w*h*c;
float resx = 0;
float resy = 0;
int n = w*h*nc;
if(ind+1 < n)
{
//gradient x
float xplus1 = img[indxp];
float x0 = img[ind];
resx = xplus1 - x0;
//gradient y
float yplus1 = img[indyp];
float y0 = img[ind];
resy = yplus1 - y0;
}
if(ind<n){
divergence[ind] = resx + resy;
}
}
}
__global__ void getLaplacian( float *Laplacian, float *img, float w, float h, int nc)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
for(int c = 0; c < nc; c++)
{
size_t ind = x + (size_t)y*w + (size_t)w*h*c;
size_t indxp = x+1 + (size_t)y*w + (size_t)w*h*c;
size_t indyp = x + (size_t)(y+1)*w + (size_t)w*h*c;
float resx = 0;
float resy = 0;
int n = w*h*nc;
if(ind+1 < n)
{
//gradient x
float xplus1 = img[indxp];
float x0 = img[ind];
resx = xplus1 - x0;
//gradient y
float yplus1 = img[indyp];
float y0 = img[ind];
resy = yplus1 - y0;
}
if(ind<n){
Laplacian[ind] = sqrt(resx * resx + resy * resy);
}
}
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
hipDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
// Set the output image format
// ###
// ###
// ### TODO: Change the output image format as needed
// ###
// ###
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
if (nc == 1)
{
cv::Mat mOut(h,w,CV_32FC1);
}
else{cv::Mat mOut(h,w,CV_32FC3);}
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
float *imgIn = new float[(size_t)w*h*nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
//Exercise 4
int version = 1;
Timer timer; timer.start();
int len = w*h*nc;
float *gradientx = new float[(size_t)w*h*nc];
float *gradienty = new float[(size_t)w*h*nc];
float *divergence = new float[(size_t)w*h*nc];
float *Laplacian = new float[(size_t)w*h*nc];
float *d_imgIn = NULL;
float *d_gradientx = NULL;
float *d_gradienty = NULL;
float *d_divergence = NULL;
float *d_Laplacian = NULL;
size_t nbytes = (size_t)(len)*sizeof(float);
hipMalloc(&d_imgIn, nbytes); CUDA_CHECK;
hipMalloc(&d_gradientx, nbytes); CUDA_CHECK;
hipMalloc(&d_gradienty, nbytes); CUDA_CHECK;
hipMalloc(&d_divergence, nbytes); CUDA_CHECK;
hipMalloc(&d_Laplacian, nbytes); CUDA_CHECK;
hipMemcpy(d_imgIn, imgIn, nbytes, hipMemcpyHostToDevice);
dim3 block = dim3(32,16,nc);
dim3 grid = dim3(len/block.x+1, 1, 1);
hipLaunchKernelGGL(( getGradient), dim3(grid), dim3(block), 0, 0, d_gradientx, d_gradienty, d_imgIn, w, h, nc);
hipLaunchKernelGGL(( getDivergence), dim3(grid), dim3(block), 0, 0, d_divergence, d_imgIn, w, h, nc);
hipLaunchKernelGGL(( getLaplacian), dim3(grid), dim3(block), 0, 0, d_Laplacian, d_imgIn, w, h, nc);
hipMemcpy(gradientx, d_gradientx, nbytes, hipMemcpyDeviceToHost);
hipMemcpy(gradienty, d_gradienty, nbytes, hipMemcpyDeviceToHost);
hipMemcpy(divergence, d_divergence, nbytes, hipMemcpyDeviceToHost);
hipMemcpy(Laplacian, d_Laplacian, nbytes, hipMemcpyDeviceToHost);
hipFree(d_imgIn);
hipFree(d_gradientx);
hipFree(d_gradienty);
hipFree(d_divergence);
hipFree(d_Laplacian);
timer.end();
t = timer.get(); // elapsed time in seconds
cout << "time4: " << t*1000 << " ms" << endl;
//#############################################
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// show output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mOut, imgOut);
showImage("Output", mOut, 100+w+40, 100);
// ### Display your own output images here as needed
convert_layered_to_mat(Igradientx, gradientx);
convert_layered_to_mat(Igradienty, gradienty);
//convert_layered_to_mat(divergence, divergence);
convert_layered_to_mat(ILaplacian, Laplacian);
showImage("gradientx", Igradientx, 100, 100);
showImage("gradienty", Igradienty, 100, 100);
showImage("Laplacian", ILaplacian, 100, 100);
//showImage("gradientx", gradientx, 100, 100);
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
delete[] gradientx;
delete[] gradienty;
delete[] divergence;
delete[] Laplacian;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
| 2696b309ed1f66ea748e641f4454e096cc6ea7c8.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
#include "helper.h"
#include <iostream>
#include "math.h"
using namespace std;
// uncomment to use the camera
//#define CAMERA
// Exercise 4
__global__ void getGradient( float *gradientx, float *gradienty, float *img, float w, float h, int nc)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
for(int c = 0; c < nc; c++)
{
size_t ind = x + (size_t)y*w + (size_t)w*h*c;
size_t indxp = x+1 + (size_t)y*w + (size_t)w*h*c;
size_t indyp = x + (size_t)(y+1)*w + (size_t)w*h*c;
float resx = 0;
float resy = 0;
int n = w*h*nc;
if(ind+1 < n)
{
//gradient x
float xplus1 = img[indxp];
float x0 = img[ind];
resx = xplus1 - x0;
//gradient y
float yplus1 = img[indyp];
float y0 = img[ind];
resy = yplus1 - y0;
}
if(ind<n){
gradientx[ind] = resx;
gradienty[ind] = resy;
}
}
}
__global__ void getDivergence(float *divergence, float *img, float w, float h, int nc)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
for(int c = 0; c < nc; c++)
{
size_t ind = x + (size_t)y*w + (size_t)w*h*c;
size_t indxp = x+1 + (size_t)y*w + (size_t)w*h*c;
size_t indyp = x + (size_t)(y+1)*w + (size_t)w*h*c;
float resx = 0;
float resy = 0;
int n = w*h*nc;
if(ind+1 < n)
{
//gradient x
float xplus1 = img[indxp];
float x0 = img[ind];
resx = xplus1 - x0;
//gradient y
float yplus1 = img[indyp];
float y0 = img[ind];
resy = yplus1 - y0;
}
if(ind<n){
divergence[ind] = resx + resy;
}
}
}
__global__ void getLaplacian( float *Laplacian, float *img, float w, float h, int nc)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
for(int c = 0; c < nc; c++)
{
size_t ind = x + (size_t)y*w + (size_t)w*h*c;
size_t indxp = x+1 + (size_t)y*w + (size_t)w*h*c;
size_t indyp = x + (size_t)(y+1)*w + (size_t)w*h*c;
float resx = 0;
float resy = 0;
int n = w*h*nc;
if(ind+1 < n)
{
//gradient x
float xplus1 = img[indxp];
float x0 = img[ind];
resx = xplus1 - x0;
//gradient y
float yplus1 = img[indyp];
float y0 = img[ind];
resy = yplus1 - y0;
}
if(ind<n){
Laplacian[ind] = sqrt(resx * resx + resy * resy);
}
}
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
cudaDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
// Set the output image format
// ###
// ###
// ### TODO: Change the output image format as needed
// ###
// ###
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
if (nc == 1)
{
cv::Mat mOut(h,w,CV_32FC1);
}
else{cv::Mat mOut(h,w,CV_32FC3);}
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
float *imgIn = new float[(size_t)w*h*nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
//Exercise 4
int version = 1;
Timer timer; timer.start();
int len = w*h*nc;
float *gradientx = new float[(size_t)w*h*nc];
float *gradienty = new float[(size_t)w*h*nc];
float *divergence = new float[(size_t)w*h*nc];
float *Laplacian = new float[(size_t)w*h*nc];
float *d_imgIn = NULL;
float *d_gradientx = NULL;
float *d_gradienty = NULL;
float *d_divergence = NULL;
float *d_Laplacian = NULL;
size_t nbytes = (size_t)(len)*sizeof(float);
cudaMalloc(&d_imgIn, nbytes); CUDA_CHECK;
cudaMalloc(&d_gradientx, nbytes); CUDA_CHECK;
cudaMalloc(&d_gradienty, nbytes); CUDA_CHECK;
cudaMalloc(&d_divergence, nbytes); CUDA_CHECK;
cudaMalloc(&d_Laplacian, nbytes); CUDA_CHECK;
cudaMemcpy(d_imgIn, imgIn, nbytes, cudaMemcpyHostToDevice);
dim3 block = dim3(32,16,nc);
dim3 grid = dim3(len/block.x+1, 1, 1);
getGradient<<<grid, block>>>(d_gradientx, d_gradienty, d_imgIn, w, h, nc);
getDivergence<<<grid, block>>>(d_divergence, d_imgIn, w, h, nc);
getLaplacian<<<grid, block>>>(d_Laplacian, d_imgIn, w, h, nc);
cudaMemcpy(gradientx, d_gradientx, nbytes, cudaMemcpyDeviceToHost);
cudaMemcpy(gradienty, d_gradienty, nbytes, cudaMemcpyDeviceToHost);
cudaMemcpy(divergence, d_divergence, nbytes, cudaMemcpyDeviceToHost);
cudaMemcpy(Laplacian, d_Laplacian, nbytes, cudaMemcpyDeviceToHost);
cudaFree(d_imgIn);
cudaFree(d_gradientx);
cudaFree(d_gradienty);
cudaFree(d_divergence);
cudaFree(d_Laplacian);
timer.end();
t = timer.get(); // elapsed time in seconds
cout << "time4: " << t*1000 << " ms" << endl;
//#############################################
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// show output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mOut, imgOut);
showImage("Output", mOut, 100+w+40, 100);
// ### Display your own output images here as needed
convert_layered_to_mat(Igradientx, gradientx);
convert_layered_to_mat(Igradienty, gradienty);
//convert_layered_to_mat(divergence, divergence);
convert_layered_to_mat(ILaplacian, Laplacian);
showImage("gradientx", Igradientx, 100, 100);
showImage("gradienty", Igradienty, 100, 100);
showImage("Laplacian", ILaplacian, 100, 100);
//showImage("gradientx", gradientx, 100, 100);
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
delete[] gradientx;
delete[] gradienty;
delete[] divergence;
delete[] Laplacian;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
|
cc8ce1cb4bd2a9c30a7f8d84831d4856d7ee2ed6.hip | // !!! This is a file automatically generated by hipify!!!
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __HIPCC__
#define __HIPCC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
extern "C"
{
//kernel code
__global__ void kernel(/* parameters */)
{
}
}
| cc8ce1cb4bd2a9c30a7f8d84831d4856d7ee2ed6.cu | //Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
extern "C"
{
//kernel code
__global__ void kernel(/* parameters */)
{
}
}
|
d43ee0469a4b530a36b8ba8117d8a33a5d15a442.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1999-2017 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xdl/core/ops/add_sparse_gradient.h"
#include "xdl/core/backend/device_singleton.h"
#include "xdl/core/lib/common_defines.h"
#include "xdl/core/framework/gpu/gpu_device.h"
namespace xdl {
template <typename T, typename I>
class SparseGradAddGpuOp : public GpuOpKernel {
public:
Status Init(OpKernelConstruction* ctx) override {
return Status::Ok();
}
Status LaunchKernel(OpKernelContext* ctx, CudaStream* stream) override {
std::vector<Tensor> in_grads, in_ids;
XDL_CHECK_STATUS(ctx->GetInputList("in_grads", &in_grads));
XDL_CHECK_STATUS(ctx->GetInputList("in_ids", &in_ids));
XDL_CHECK_COND(in_grads.size() == in_ids.size(),
Status::ArgumentError("grads and ids size not equal"));
for (size_t i = 0; i < in_grads.size(); ++i) {
XDL_CHECK_COND(in_grads[i].Shape().Size() == 2,
Status::ArgumentError("grad input dim must be 2"));
XDL_CHECK_COND(in_ids[i].Shape().Size() == 1 ||
in_ids[i].Shape().Size() == 2,
Status::ArgumentError("id input dim must be 1 or 2"));
XDL_CHECK_COND(in_grads[i].Shape()[0] == in_ids[i].Shape()[0],
Status::ArgumentError("grad dim 0 not equal to id dim 0"));
}
if (in_grads.empty()) return Status::Ok();
// copy data to host
std::vector<Tensor> host_grads, host_ids;
for (size_t i = 0; i < in_ids.size(); ++i) {
Tensor grad(DeviceSingleton::CpuInstance(), in_grads[i].Shape(),
in_grads[i].Type());
CUDA_CHECK(hipMemcpyAsync(grad.Raw<T>(), in_grads[i].Raw<T>(),
sizeof(T) * in_grads[i].Shape().NumElements(),
hipMemcpyDeviceToHost,
stream->GetInternal()));
host_grads.push_back(grad);
Tensor id(DeviceSingleton::CpuInstance(), in_ids[i].Shape(),
in_ids[i].Type());
CUDA_CHECK(hipMemcpyAsync(id.Raw<I>(), in_ids[i].Raw<I>(),
sizeof(I) * in_ids[i].Shape().NumElements(),
hipMemcpyDeviceToHost,
stream->GetInternal()));
host_ids.push_back(id);
}
CUDA_CHECK(hipStreamSynchronize(stream->GetInternal()));
// add sparse on host
Tensor host_out_grad, host_out_id;
HostAddSparse<T, I>(host_grads, host_ids, &host_out_grad, &host_out_id);
// copy host data to device
Tensor out_grad, out_id;
XDL_CHECK_STATUS(ctx->AllocateOutput(0, host_out_grad.Shape(), &out_grad));
XDL_CHECK_STATUS(ctx->AllocateOutput(1, host_out_id.Shape(), &out_id));
CUDA_CHECK(hipMemcpyAsync(out_grad.Raw<T>(),
host_out_grad.Raw<T>(),
sizeof(T) * host_out_grad.Shape().NumElements(),
hipMemcpyHostToDevice,
stream->GetInternal()));
CUDA_CHECK(hipMemcpyAsync(out_id.Raw<I>(),
host_out_id.Raw<I>(),
sizeof(I) * host_out_id.Shape().NumElements(),
hipMemcpyHostToDevice,
stream->GetInternal()));
return Status::Ok();
}
};
#define REGISTER_GPU_KERNEL(T, I) \
XDL_REGISTER_KERNEL(SparseGradAddOp, SparseGradAddGpuOp<T, I>) \
.Device("GPU") \
.AttrDataType<T>("dtype") \
.AttrDataType<I>("itype");
REGISTER_GPU_KERNEL(float, int32_t);
REGISTER_GPU_KERNEL(float, int64_t);
REGISTER_GPU_KERNEL(double, int32_t);
REGISTER_GPU_KERNEL(double, int64_t);
#undef REGISTER_GPU_KERNEL
} // namespace xdl
| d43ee0469a4b530a36b8ba8117d8a33a5d15a442.cu | /*
* Copyright 1999-2017 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xdl/core/ops/add_sparse_gradient.h"
#include "xdl/core/backend/device_singleton.h"
#include "xdl/core/lib/common_defines.h"
#include "xdl/core/framework/gpu/gpu_device.h"
namespace xdl {
template <typename T, typename I>
class SparseGradAddGpuOp : public GpuOpKernel {
public:
Status Init(OpKernelConstruction* ctx) override {
return Status::Ok();
}
Status LaunchKernel(OpKernelContext* ctx, CudaStream* stream) override {
std::vector<Tensor> in_grads, in_ids;
XDL_CHECK_STATUS(ctx->GetInputList("in_grads", &in_grads));
XDL_CHECK_STATUS(ctx->GetInputList("in_ids", &in_ids));
XDL_CHECK_COND(in_grads.size() == in_ids.size(),
Status::ArgumentError("grads and ids size not equal"));
for (size_t i = 0; i < in_grads.size(); ++i) {
XDL_CHECK_COND(in_grads[i].Shape().Size() == 2,
Status::ArgumentError("grad input dim must be 2"));
XDL_CHECK_COND(in_ids[i].Shape().Size() == 1 ||
in_ids[i].Shape().Size() == 2,
Status::ArgumentError("id input dim must be 1 or 2"));
XDL_CHECK_COND(in_grads[i].Shape()[0] == in_ids[i].Shape()[0],
Status::ArgumentError("grad dim 0 not equal to id dim 0"));
}
if (in_grads.empty()) return Status::Ok();
// copy data to host
std::vector<Tensor> host_grads, host_ids;
for (size_t i = 0; i < in_ids.size(); ++i) {
Tensor grad(DeviceSingleton::CpuInstance(), in_grads[i].Shape(),
in_grads[i].Type());
CUDA_CHECK(cudaMemcpyAsync(grad.Raw<T>(), in_grads[i].Raw<T>(),
sizeof(T) * in_grads[i].Shape().NumElements(),
cudaMemcpyDeviceToHost,
stream->GetInternal()));
host_grads.push_back(grad);
Tensor id(DeviceSingleton::CpuInstance(), in_ids[i].Shape(),
in_ids[i].Type());
CUDA_CHECK(cudaMemcpyAsync(id.Raw<I>(), in_ids[i].Raw<I>(),
sizeof(I) * in_ids[i].Shape().NumElements(),
cudaMemcpyDeviceToHost,
stream->GetInternal()));
host_ids.push_back(id);
}
CUDA_CHECK(cudaStreamSynchronize(stream->GetInternal()));
// add sparse on host
Tensor host_out_grad, host_out_id;
HostAddSparse<T, I>(host_grads, host_ids, &host_out_grad, &host_out_id);
// copy host data to device
Tensor out_grad, out_id;
XDL_CHECK_STATUS(ctx->AllocateOutput(0, host_out_grad.Shape(), &out_grad));
XDL_CHECK_STATUS(ctx->AllocateOutput(1, host_out_id.Shape(), &out_id));
CUDA_CHECK(cudaMemcpyAsync(out_grad.Raw<T>(),
host_out_grad.Raw<T>(),
sizeof(T) * host_out_grad.Shape().NumElements(),
cudaMemcpyHostToDevice,
stream->GetInternal()));
CUDA_CHECK(cudaMemcpyAsync(out_id.Raw<I>(),
host_out_id.Raw<I>(),
sizeof(I) * host_out_id.Shape().NumElements(),
cudaMemcpyHostToDevice,
stream->GetInternal()));
return Status::Ok();
}
};
#define REGISTER_GPU_KERNEL(T, I) \
XDL_REGISTER_KERNEL(SparseGradAddOp, SparseGradAddGpuOp<T, I>) \
.Device("GPU") \
.AttrDataType<T>("dtype") \
.AttrDataType<I>("itype");
REGISTER_GPU_KERNEL(float, int32_t);
REGISTER_GPU_KERNEL(float, int64_t);
REGISTER_GPU_KERNEL(double, int32_t);
REGISTER_GPU_KERNEL(double, int64_t);
#undef REGISTER_GPU_KERNEL
} // namespace xdl
|
5486988d77d9187bf20d39535dd45321081316cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
return;
bools[index] = idata[index] != 0;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
return;
if (bools[index] == 1)
odata[indices[index]] = idata[index];
}
}
}
| 5486988d77d9187bf20d39535dd45321081316cf.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
return;
bools[index] = idata[index] != 0;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
return;
if (bools[index] == 1)
odata[indices[index]] = idata[index];
}
}
}
|
351e0280a1416d68f1a6944957f8257a2748faae.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <omp.h>
#include <stdlib.h>
#include <math.h>
#include <array>
#include <vector>
#include <sstream>
#include <chrono>
#include <hiprand/hiprand_kernel.h>
#include <limits>
#include <memory>
#include <cstddef>
#include <type_traits>
#include "../include/timer.cuh"
#include "../include/musket.cuh"
#include "../include/spfb16_0.cuh"
const double PI = 3.141592653589793;
//Float2::Float2() : x(), y() {}
struct FIR_map_index_in_place_array_functor{
FIR_map_index_in_place_array_functor(const mkt::DArray<float>& _input, const mkt::DArray<float>& _coeff) : input(_input), coeff(_coeff){}
~FIR_map_index_in_place_array_functor() {}
__device__
auto operator()(int Index, float2 a){
float2 newa;
newa.x = 0.0f;
newa.y = 0.0f;
if(((Index) <= ((channels) * (spectra)))){
for(int j = 0; ((j) < (taps)); j++){
newa.x += (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
input.get_data_local(((Index) + ((j) * (channels))))
* // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
coeff.get_data_local((Index%(taps*channels)) + (j) * (channels))
);
}
}
return (newa);
}
void init(int device){
input.init(device);
coeff.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int taps;
int channels;
int spectra;
mkt::DeviceArray<float> input;
mkt::DeviceArray<float> coeff;
};
struct Fetch_map_index_in_place_array_functor{
Fetch_map_index_in_place_array_functor(const mkt::DArray<float2>& _c_output) : c_output(_c_output){}
~Fetch_map_index_in_place_array_functor() {}
__device__
auto operator()(int i, float2 Ti){
return // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_output.get_data_local((i ^ (int) __powf(2, (((log2size) - 1) - (counter)))))
;
}
void init(int device){
c_output.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int counter;
int log2size;
mkt::DeviceArray<float2> c_output;
};
struct Combine_map_index_in_place_array_functor{
Combine_map_index_in_place_array_functor(const mkt::DArray<float2>& _c_input_double) : c_input_double(_c_input_double){}
~Combine_map_index_in_place_array_functor() {}
__device__
auto operator()(int Index, float2 Ai){
float2 newa;
newa.x = 0.0f;
newa.y = 0.0f;
int b = Index >> (log2size - counter - 1);
int b2 = 0;
for(int l = 0;l <= counter;l++) {
b2 = (b & 1) ? 2 * b2 + 1 : 2 * b2;
b >>= 1;
}
double temp = 2.0 * pi / Problemsize * (b2 << (log2size - counter - 1));
float2 intermediateresult;
intermediateresult.x = __cosf(temp);
intermediateresult.y = -__sinf(temp);
if(((Index) == __powf(2, (((log2size) - 1) - (counter))))){
float2 mult_res;
mult_res.x = (((intermediateresult).x * (Ai).x) - ((intermediateresult).y * (Ai).y));
mult_res.y = (((intermediateresult).x * (Ai).y) + ((intermediateresult).y * (Ai).x));
float2 add_res;
add_res.x = (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
+ (mult_res).x);
add_res.y = (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
+ (mult_res).y);
newa = (add_res);
}
else {
float2 mult_res2;
mult_res2.x = (((intermediateresult).x * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
) - ((intermediateresult).y * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
));
mult_res2.y = (((intermediateresult).x * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
) + ((intermediateresult).y * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
));
float2 add_res2;
add_res2.x = ((Ai).x + (mult_res2).x);
add_res2.y = ((Ai).y + (mult_res2).y);
newa = (add_res2);
}
return (newa);
}
void init(int device){
c_input_double.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int counter;
int log2size;
double pi;
int Problemsize;
mkt::DeviceArray<float2> c_input_double;
};
int main(int argc, char** argv) {
mkt::init();
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point complete_timer_start = std::chrono::high_resolution_clock::now();
GpuTimer timer;
double fir_time=0.0, fft_time=0.0, allocation = 0.0,fill = 0.0, rest = 0.0, rest2 = 0.0, out = 0.0;
timer.Start();
mkt::DArray<float> input(0, 201334784,201334784, 0.0f, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float2> c_input_double(0, 268435456,268435456, float2{}, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float2> c_output(0, 268435456,268435456, float2{}, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float> coeff(0, 512, 512, 0.0f, 1, 0, 0, mkt::DIST, mkt::COPY);
timer.Stop();
allocation += timer.Elapsed();
// timer.Start();
srand(1);
for (int n = 0; n < 201334784; n++) {
input[n] = (rand() / (float)RAND_MAX);
}
for (int n = 0; n < 512; n++) {
coeff[n] = (rand() / (float)RAND_MAX);
}
timer.Start();
input.update_devices();
coeff.update_devices();
timer.Stop();
fill += timer.Elapsed();
timer.Start();
FIR_map_index_in_place_array_functor fIR_map_index_in_place_array_functor{input, coeff};
Fetch_map_index_in_place_array_functor fetch_map_index_in_place_array_functor{c_output};
Combine_map_index_in_place_array_functor combine_map_index_in_place_array_functor{c_input_double};
timer.Stop();
rest += timer.Elapsed();
int ntaps = 32;
int nchans = 16;
int nspectra = 16777216;
int log2size = 4;
timer.Start();
fIR_map_index_in_place_array_functor.taps = (ntaps);fIR_map_index_in_place_array_functor.channels = (nchans);fIR_map_index_in_place_array_functor.spectra = (nspectra);
mkt::map_index_in_place<float2, FIR_map_index_in_place_array_functor>(c_output, fIR_map_index_in_place_array_functor);
timer.Stop();
fir_time += timer.Elapsed();
timer.Start();
for(int j = 0; ((j) < (log2size)); j++){
fetch_map_index_in_place_array_functor.counter = (j);fetch_map_index_in_place_array_functor.log2size = (log2size);
mkt::map_index_in_place<float2, Fetch_map_index_in_place_array_functor>(c_input_double, fetch_map_index_in_place_array_functor);
combine_map_index_in_place_array_functor.counter = (j);combine_map_index_in_place_array_functor.log2size = (log2size);combine_map_index_in_place_array_functor.pi = (PI);combine_map_index_in_place_array_functor.Problemsize = 16;
mkt::map_index_in_place<float2, Combine_map_index_in_place_array_functor>(c_output, combine_map_index_in_place_array_functor);
}
mkt::sync_streams();
timer.Stop();
fft_time += timer.Elapsed();
timer.Start();
c_output.update_self();
timer.Stop();
out += timer.Elapsed();
printf("\n%f;%f;%f;%f;%f;%f\n", fir_time, fft_time, allocation, fill, rest, out);
return EXIT_SUCCESS;
}
| 351e0280a1416d68f1a6944957f8257a2748faae.cu | #include <cuda.h>
#include <omp.h>
#include <stdlib.h>
#include <math.h>
#include <array>
#include <vector>
#include <sstream>
#include <chrono>
#include <curand_kernel.h>
#include <limits>
#include <memory>
#include <cstddef>
#include <type_traits>
#include "../include/timer.cuh"
#include "../include/musket.cuh"
#include "../include/spfb16_0.cuh"
const double PI = 3.141592653589793;
//Float2::Float2() : x(), y() {}
struct FIR_map_index_in_place_array_functor{
FIR_map_index_in_place_array_functor(const mkt::DArray<float>& _input, const mkt::DArray<float>& _coeff) : input(_input), coeff(_coeff){}
~FIR_map_index_in_place_array_functor() {}
__device__
auto operator()(int Index, float2 a){
float2 newa;
newa.x = 0.0f;
newa.y = 0.0f;
if(((Index) <= ((channels) * (spectra)))){
for(int j = 0; ((j) < (taps)); j++){
newa.x += (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
input.get_data_local(((Index) + ((j) * (channels))))
* // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
coeff.get_data_local((Index%(taps*channels)) + (j) * (channels))
);
}
}
return (newa);
}
void init(int device){
input.init(device);
coeff.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int taps;
int channels;
int spectra;
mkt::DeviceArray<float> input;
mkt::DeviceArray<float> coeff;
};
struct Fetch_map_index_in_place_array_functor{
Fetch_map_index_in_place_array_functor(const mkt::DArray<float2>& _c_output) : c_output(_c_output){}
~Fetch_map_index_in_place_array_functor() {}
__device__
auto operator()(int i, float2 Ti){
return // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_output.get_data_local((i ^ (int) __powf(2, (((log2size) - 1) - (counter)))))
;
}
void init(int device){
c_output.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int counter;
int log2size;
mkt::DeviceArray<float2> c_output;
};
struct Combine_map_index_in_place_array_functor{
Combine_map_index_in_place_array_functor(const mkt::DArray<float2>& _c_input_double) : c_input_double(_c_input_double){}
~Combine_map_index_in_place_array_functor() {}
__device__
auto operator()(int Index, float2 Ai){
float2 newa;
newa.x = 0.0f;
newa.y = 0.0f;
int b = Index >> (log2size - counter - 1);
int b2 = 0;
for(int l = 0;l <= counter;l++) {
b2 = (b & 1) ? 2 * b2 + 1 : 2 * b2;
b >>= 1;
}
double temp = 2.0 * pi / Problemsize * (b2 << (log2size - counter - 1));
float2 intermediateresult;
intermediateresult.x = __cosf(temp);
intermediateresult.y = -__sinf(temp);
if(((Index) == __powf(2, (((log2size) - 1) - (counter))))){
float2 mult_res;
mult_res.x = (((intermediateresult).x * (Ai).x) - ((intermediateresult).y * (Ai).y));
mult_res.y = (((intermediateresult).x * (Ai).y) + ((intermediateresult).y * (Ai).x));
float2 add_res;
add_res.x = (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
+ (mult_res).x);
add_res.y = (// TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
+ (mult_res).y);
newa = (add_res);
}
else {
float2 mult_res2;
mult_res2.x = (((intermediateresult).x * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
) - ((intermediateresult).y * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
));
mult_res2.y = (((intermediateresult).x * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).y
) + ((intermediateresult).y * // TODO: ExpressionGenerator.generateCollectionElementRef: Array, global indices, distributed
c_input_double.get_data_local((Index)).x
));
float2 add_res2;
add_res2.x = ((Ai).x + (mult_res2).x);
add_res2.y = ((Ai).y + (mult_res2).y);
newa = (add_res2);
}
return (newa);
}
void init(int device){
c_input_double.init(device);
}
size_t get_smem_bytes(){
size_t result = 0;
return result;
}
int counter;
int log2size;
double pi;
int Problemsize;
mkt::DeviceArray<float2> c_input_double;
};
int main(int argc, char** argv) {
mkt::init();
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point complete_timer_start = std::chrono::high_resolution_clock::now();
GpuTimer timer;
double fir_time=0.0, fft_time=0.0, allocation = 0.0,fill = 0.0, rest = 0.0, rest2 = 0.0, out = 0.0;
timer.Start();
mkt::DArray<float> input(0, 201334784,201334784, 0.0f, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float2> c_input_double(0, 268435456,268435456, float2{}, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float2> c_output(0, 268435456,268435456, float2{}, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<float> coeff(0, 512, 512, 0.0f, 1, 0, 0, mkt::DIST, mkt::COPY);
timer.Stop();
allocation += timer.Elapsed();
// timer.Start();
srand(1);
for (int n = 0; n < 201334784; n++) {
input[n] = (rand() / (float)RAND_MAX);
}
for (int n = 0; n < 512; n++) {
coeff[n] = (rand() / (float)RAND_MAX);
}
timer.Start();
input.update_devices();
coeff.update_devices();
timer.Stop();
fill += timer.Elapsed();
timer.Start();
FIR_map_index_in_place_array_functor fIR_map_index_in_place_array_functor{input, coeff};
Fetch_map_index_in_place_array_functor fetch_map_index_in_place_array_functor{c_output};
Combine_map_index_in_place_array_functor combine_map_index_in_place_array_functor{c_input_double};
timer.Stop();
rest += timer.Elapsed();
int ntaps = 32;
int nchans = 16;
int nspectra = 16777216;
int log2size = 4;
timer.Start();
fIR_map_index_in_place_array_functor.taps = (ntaps);fIR_map_index_in_place_array_functor.channels = (nchans);fIR_map_index_in_place_array_functor.spectra = (nspectra);
mkt::map_index_in_place<float2, FIR_map_index_in_place_array_functor>(c_output, fIR_map_index_in_place_array_functor);
timer.Stop();
fir_time += timer.Elapsed();
timer.Start();
for(int j = 0; ((j) < (log2size)); j++){
fetch_map_index_in_place_array_functor.counter = (j);fetch_map_index_in_place_array_functor.log2size = (log2size);
mkt::map_index_in_place<float2, Fetch_map_index_in_place_array_functor>(c_input_double, fetch_map_index_in_place_array_functor);
combine_map_index_in_place_array_functor.counter = (j);combine_map_index_in_place_array_functor.log2size = (log2size);combine_map_index_in_place_array_functor.pi = (PI);combine_map_index_in_place_array_functor.Problemsize = 16;
mkt::map_index_in_place<float2, Combine_map_index_in_place_array_functor>(c_output, combine_map_index_in_place_array_functor);
}
mkt::sync_streams();
timer.Stop();
fft_time += timer.Elapsed();
timer.Start();
c_output.update_self();
timer.Stop();
out += timer.Elapsed();
printf("\n%f;%f;%f;%f;%f;%f\n", fir_time, fft_time, allocation, fill, rest, out);
return EXIT_SUCCESS;
}
|
a655136b7242eff54ac3c32d7cff3b5d284aca69.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __HIPCC__
#define __HIPCC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
extern "C"
{
__constant__ int D_BINS;
__constant__ int D_BIN_PIXEL_WIDTH;
__constant__ int D_BIN_PIXEL_HEIGHT;
__constant__ unsigned int D_COLOR_ONE;
__constant__ unsigned int D_COLOR_TWO;
__constant__ unsigned int D_COLOR_BACKGROUND;
__constant__ unsigned int D_OUT_OF_BOUNDS;
//kernel code
__global__ void VisualizeHistogramKernel(
int *globalHist,
unsigned int* pixels
)
{
int globalThreadId = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ int maxValue;
if(globalThreadId < D_BINS * D_BIN_PIXEL_WIDTH)
{
if(threadIdx.x == 0)
{
maxValue = 0;
//search maximum value for each histogram bins
for(int h = 0; h < D_BINS; h++)
{
if(globalHist[h] > maxValue)
{
maxValue = globalHist[h];
}
}
}
__syncthreads();
//get the height of the actual column
int columnHeightInv = D_BIN_PIXEL_HEIGHT - (int)((double)D_BIN_PIXEL_HEIGHT * ((double)globalHist[blockIdx.x] / (double)maxValue));
unsigned int histColor;
if(blockIdx.x == 0 || blockIdx.x == D_BINS - 1)
{
histColor = D_OUT_OF_BOUNDS;
}
else
{
histColor = (blockIdx.x % 2 == 0) * D_COLOR_ONE + (blockIdx.x % 2 == 1) * D_COLOR_TWO;
}
for(int i = 0; i < D_BIN_PIXEL_HEIGHT; i++)
{
if(i < columnHeightInv)
{
//background color
pixels[D_BINS * D_BIN_PIXEL_WIDTH * i + blockIdx.x*D_BIN_PIXEL_WIDTH + threadIdx.x] = D_COLOR_BACKGROUND;
}
else
{
//color of histogram
pixels[D_BINS * D_BIN_PIXEL_WIDTH * i + blockIdx.x*D_BIN_PIXEL_WIDTH + threadIdx.x] = histColor;
}
}
}
}
} | a655136b7242eff54ac3c32d7cff3b5d284aca69.cu | #include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
extern "C"
{
__constant__ int D_BINS;
__constant__ int D_BIN_PIXEL_WIDTH;
__constant__ int D_BIN_PIXEL_HEIGHT;
__constant__ unsigned int D_COLOR_ONE;
__constant__ unsigned int D_COLOR_TWO;
__constant__ unsigned int D_COLOR_BACKGROUND;
__constant__ unsigned int D_OUT_OF_BOUNDS;
//kernel code
__global__ void VisualizeHistogramKernel(
int *globalHist,
unsigned int* pixels
)
{
int globalThreadId = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ int maxValue;
if(globalThreadId < D_BINS * D_BIN_PIXEL_WIDTH)
{
if(threadIdx.x == 0)
{
maxValue = 0;
//search maximum value for each histogram bins
for(int h = 0; h < D_BINS; h++)
{
if(globalHist[h] > maxValue)
{
maxValue = globalHist[h];
}
}
}
__syncthreads();
//get the height of the actual column
int columnHeightInv = D_BIN_PIXEL_HEIGHT - (int)((double)D_BIN_PIXEL_HEIGHT * ((double)globalHist[blockIdx.x] / (double)maxValue));
unsigned int histColor;
if(blockIdx.x == 0 || blockIdx.x == D_BINS - 1)
{
histColor = D_OUT_OF_BOUNDS;
}
else
{
histColor = (blockIdx.x % 2 == 0) * D_COLOR_ONE + (blockIdx.x % 2 == 1) * D_COLOR_TWO;
}
for(int i = 0; i < D_BIN_PIXEL_HEIGHT; i++)
{
if(i < columnHeightInv)
{
//background color
pixels[D_BINS * D_BIN_PIXEL_WIDTH * i + blockIdx.x*D_BIN_PIXEL_WIDTH + threadIdx.x] = D_COLOR_BACKGROUND;
}
else
{
//color of histogram
pixels[D_BINS * D_BIN_PIXEL_WIDTH * i + blockIdx.x*D_BIN_PIXEL_WIDTH + threadIdx.x] = histColor;
}
}
}
}
} |
8a041ec6a0e7a15bbf108776a29f4fa3e9b15102.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file gpuinflate.cu
Derived from zlib's contrib/puff.c, original copyright notice below
*/
/*
Copyright (C) 2002-2013 Mark Adler, all rights reserved
version 2.3, 21 Jan 2013
This software is provided 'as-is', without any express or implied
warranty. In no event will the author be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Mark Adler [email protected]
*/
#include "gpuinflate.hpp"
#include "io_uncomp.hpp"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
constexpr int max_bits = 15; // maximum bits in a code
constexpr int max_l_codes = 286; // maximum number of literal/length codes
constexpr int max_d_codes = 30; // maximum number of distance codes
constexpr int fix_l_codes = 288; // number of fixed literal/length codes
constexpr int log2_len_lut = 10;
constexpr int log2_dist_lut = 8;
/**
* @brief Intermediate arrays for building huffman tables
*/
struct scratch_arr {
int16_t lengths[max_l_codes + max_d_codes]; ///< descriptor code lengths
int16_t offs[max_bits + 1]; ///< offset in symbol table for each length (scratch)
};
/**
* @brief Huffman LUTs for length and distance codes
*/
struct lut_arr {
int32_t lenlut[1 << log2_len_lut]; ///< LUT for length decoding
int32_t distlut[1 << log2_dist_lut]; ///< LUT for fast distance decoding
};
/// 4 batches of 32 symbols
constexpr int log2_batch_count = 2; // 1..5
constexpr int log2_batch_size = 5;
constexpr int batch_count = (1 << log2_batch_count);
constexpr int batch_size = (1 << log2_batch_size);
/**
* @brief Inter-warp communication queue
*/
struct xwarp_s {
int32_t batch_len[batch_count]; //< Length of each batch - <0:end, 0:not ready, >0:symbol count
union {
uint32_t symqueue[batch_count * batch_size];
uint8_t symqueue8[batch_count * batch_size * 4];
} u;
};
#define ENABLE_PREFETCH 1
#if ENABLE_PREFETCH
constexpr int log2_prefetch_size = 9; // Must be at least LOG2_BATCH_SIZE+3
constexpr int prefetch_size = (1 << log2_prefetch_size);
/// @brief Prefetcher state
struct prefetch_queue_s {
const uint8_t* cur_p; ///< Prefetch location
int run; ///< prefetcher will exit when run=0
uint8_t pref_data[prefetch_size];
};
template <typename T>
inline __device__ volatile uint32_t* prefetch_addr32(volatile prefetch_queue_s& q, T* ptr)
{
return reinterpret_cast<volatile uint32_t*>(&q.pref_data[(prefetch_size - 4) & (size_t)(ptr)]);
}
#endif // ENABLE_PREFETCH
/**
* @brief Inflate decompressor state
*/
struct inflate_state_s {
// output state
uint8_t* out; ///< output buffer
uint8_t* outbase; ///< start of output buffer
uint8_t* outend; ///< end of output buffer
// Input state
uint8_t const* cur; ///< input buffer
uint8_t const* end; ///< end of input buffer
uint2 bitbuf; ///< bit buffer (64-bit)
uint32_t bitpos; ///< position in bit buffer
int32_t err; ///< Error status
int btype; ///< current block type
int blast; ///< last block
uint32_t stored_blk_len; ///< length of stored (uncompressed) block
uint16_t first_slow_len; ///< first code not in fast LUT
uint16_t index_slow_len;
uint16_t first_slow_dist;
uint16_t index_slow_dist;
volatile xwarp_s x;
#if ENABLE_PREFETCH
volatile prefetch_queue_s pref;
#endif
int16_t lencnt[max_bits + 1];
int16_t lensym[fix_l_codes]; // Assumes fix_l_codes >= max_l_codes
int16_t distcnt[max_bits + 1];
int16_t distsym[max_d_codes];
union {
scratch_arr scratch;
lut_arr lut;
} u;
};
inline __device__ unsigned int bfe(unsigned int source,
unsigned int bit_start,
unsigned int num_bits)
{
unsigned int bits;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"(source), "r"(bit_start), "r"(num_bits));
return bits;
};
inline __device__ uint32_t showbits(inflate_state_s* s, uint32_t n)
{
uint32_t next32 = __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
return (next32 & ((1 << n) - 1));
}
inline __device__ uint32_t nextbits32(inflate_state_s* s)
{
return __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
}
inline __device__ void skipbits(inflate_state_s* s, uint32_t n)
{
uint32_t bitpos = s->bitpos + n;
if (bitpos >= 32) {
auto cur = s->cur + 8;
s->bitbuf.x = s->bitbuf.y;
s->bitbuf.y = (cur < s->end) ? *reinterpret_cast<uint32_t const*>(cur) : 0;
s->cur = cur - 4;
bitpos &= 0x1f;
}
s->bitpos = bitpos;
}
// TODO: If we require 4-byte alignment of input bitstream & length (padded), reading bits would
// become quite a bit faster
__device__ uint32_t getbits(inflate_state_s* s, uint32_t n)
{
uint32_t v = showbits(s, n);
skipbits(s, n);
return v;
}
/**
* @brief Decode a code from the stream s using huffman table {symbols,counts}.
* Return the symbol or a negative value if there is an error.
* If all of the lengths are zero, i.e. an empty code, or if the code is
* incomplete and an invalid code is received, then -10 is returned after
* reading max_bits bits.
*
* Format notes:
*
* - The codes as stored in the compressed data are bit-reversed relative to
* a simple integer ordering of codes of the same lengths. Hence below the
* bits are pulled from the compressed data one at a time and used to
* build the code value reversed from what is in the stream in order to
* permit simple integer comparisons for decoding. A table-based decoding
* scheme (as used in zlib) does not need to do this reversal.
*
* - The first code for the shortest length is all zeros. Subsequent codes of
* the same length are simply integer increments of the previous code. When
* moving up a length, a zero bit is appended to the code. For a complete
* code, the last code of the longest length will be all ones.
*
* - Incomplete codes are handled by this decoder, since they are permitted
* in the deflate format. See the format notes for fixed() and dynamic().
*/
__device__ int decode(inflate_state_s* s, const int16_t* counts, const int16_t* symbols)
{
unsigned int len; // current number of bits in code
unsigned int code; // len bits being decoded
unsigned int first; // first code of length len
unsigned int count; // number of codes of length len
uint32_t next32r = __brev(nextbits32(s));
first = 0;
for (len = 1; len <= max_bits; len++) {
code = (next32r >> (32 - len)) - first;
count = counts[len];
if (code < count) // if length len, return symbol
{
skipbits(s, len);
return symbols[code];
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
return -10; // ran out of codes
}
/**
* @brief Given the list of code lengths length[0..n-1] representing a canonical
* Huffman code for n symbols, construct the tables required to decode those
* codes. Those tables are the number of codes of each length, and the symbols
* sorted by length, retaining their original order within each length. The
* return value is zero for a complete code set, negative for an over-
* subscribed code set, and positive for an incomplete code set. The tables
* can be used if the return value is zero or positive, but they cannot be used
* if the return value is negative. If the return value is zero, it is not
* possible for decode() using that table to return an error--any stream of
* enough bits will resolve to a symbol. If the return value is positive, then
* it is possible for decode() using that table to return an error for received
* codes past the end of the incomplete lengths.
*
* Not used by decode(), but used for error checking, count[0] is the number
* of the n symbols not in the code. So n - count[0] is the number of
* codes. This is useful for checking for incomplete codes that have more than
* one symbol, which is an error in a dynamic block.
*
* Assumption: for all i in 0..n-1, 0 <= length[i] <= max_bits
* This is assured by the construction of the length arrays in dynamic() and
* fixed() and is not verified by construct().
*
* Format notes:
*
* - Permitted and expected examples of incomplete codes are one of the fixed
* codes and any code with a single symbol which in deflate is coded as one
* bit instead of zero bits. See the format notes for fixed() and dynamic().
*
* - Within a given code length, the symbols are kept in ascending order for
* the code bits definition.
*/
__device__ int construct(
inflate_state_s* s, int16_t* counts, int16_t* symbols, const int16_t* length, int n)
{
int symbol; // current symbol when stepping through length[]
int len; // current length when stepping through counts[]
int left; // number of possible codes left of current length
int16_t* offs = s->u.scratch.offs;
// count number of codes of each length
for (len = 0; len <= max_bits; len++)
counts[len] = 0;
for (symbol = 0; symbol < n; symbol++)
(counts[length[symbol]])++; // assumes lengths are within bounds
if (counts[0] == n) // no codes!
return 0; // complete, but decode() will fail
// check for an over-subscribed or incomplete set of lengths
left = 1; // one possible code of zero length
for (len = 1; len <= max_bits; len++) {
left <<= 1; // one more bit, double codes left
left -= counts[len]; // deduct count from possible codes
if (left < 0) return left; // over-subscribed--return negative
} // left > 0 means incomplete
// generate offsets into symbol table for each length for sorting
offs[1] = 0;
for (len = 1; len < max_bits; len++)
offs[len + 1] = offs[len] + counts[len];
// put symbols in table sorted by length, by symbol order within each length
for (symbol = 0; symbol < n; symbol++)
if (length[symbol] != 0) symbols[offs[length[symbol]]++] = symbol;
// return zero for complete set, positive for incomplete set
return left;
}
/// permutation of code length codes
static const __device__ __constant__ uint8_t g_code_order[19 + 1] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, 0xff};
/// Dynamic block (custom huffman tables)
__device__ int init_dynamic(inflate_state_s* s)
{
int nlen, ndist, ncode; /* number of lengths in descriptor */
int index; /* index of lengths[] */
int err; /* construct() return value */
int16_t* lengths = s->u.scratch.lengths;
// get number of lengths in each table, check lengths
nlen = getbits(s, 5) + 257;
ndist = getbits(s, 5) + 1;
ncode = getbits(s, 4) + 4;
if (nlen > max_l_codes || ndist > max_d_codes) {
return -3; // bad counts
}
// read code length code lengths (really), missing lengths are zero
for (index = 0; index < ncode; index++)
lengths[g_code_order[index]] = getbits(s, 3);
for (; index < 19; index++)
lengths[g_code_order[index]] = 0;
// build huffman table for code lengths codes (use lencode temporarily)
err = construct(s, s->lencnt, s->lensym, lengths, 19);
if (err != 0) // require complete code set here
return -4;
// read length/literal and distance code length tables
index = 0;
while (index < nlen + ndist) {
int symbol = decode(s, s->lencnt, s->lensym);
if (symbol < 0) return symbol; // invalid symbol
if (symbol < 16) // length in 0..15
lengths[index++] = symbol;
else { // repeat instruction
int len = 0; // last length to repeat, assume repeating zeros
if (symbol == 16) { // repeat last length 3..6 times
if (index == 0) return -5; // no last length!
len = lengths[index - 1]; // last length
symbol = 3 + getbits(s, 2);
} else if (symbol == 17) // repeat zero 3..10 times
symbol = 3 + getbits(s, 3);
else // == 18, repeat zero 11..138 times
symbol = 11 + getbits(s, 7);
if (index + symbol > nlen + ndist) return -6; // too many lengths!
while (symbol--) // repeat last or zero symbol times
lengths[index++] = len;
}
}
// check for end-of-block code -- there better be one!
if (lengths[256] == 0) return -9;
// build huffman table for literal/length codes
err = construct(s, s->lencnt, s->lensym, lengths, nlen);
if (err && (err < 0 || nlen != s->lencnt[0] + s->lencnt[1]))
return -7; // incomplete code ok only for single length 1 code
// build huffman table for distance codes
err = construct(s, s->distcnt, s->distsym, &lengths[nlen], ndist);
if (err && (err < 0 || ndist != s->distcnt[0] + s->distcnt[1]))
return -8; // incomplete code ok only for single length 1 code
return 0;
}
/**
* @brief Initializes a fixed codes block.
*
* Format notes:
*
* - This block type can be useful for compressing small amounts of data for
* which the size of the code descriptions in a dynamic block exceeds the
* benefit of custom codes for that block. For fixed codes, no bits are
* spent on code descriptions. Instead the code lengths for literal/length
* codes and distance codes are fixed. The specific lengths for each symbol
* can be seen in the "for" loops below.
*
* - The literal/length code is complete, but has two symbols that are invalid
* and should result in an error if received. This cannot be implemented
* simply as an incomplete code since those two symbols are in the "middle"
* of the code. They are eight bits long and the longest literal/length\
* code is nine bits. Therefore the code must be constructed with those
* symbols, and the invalid symbols must be detected after decoding.
*
* - The fixed distance codes also have two invalid symbols that should result
* in an error if received. Since all of the distance codes are the same
* length, this can be implemented as an incomplete code. Then the invalid
* codes are detected while decoding.
*/
__device__ int init_fixed(inflate_state_s* s)
{
int16_t* lengths = s->u.scratch.lengths;
int symbol;
// literal/length table
for (symbol = 0; symbol < 144; symbol++)
lengths[symbol] = 8;
for (; symbol < 256; symbol++)
lengths[symbol] = 9;
for (; symbol < 280; symbol++)
lengths[symbol] = 7;
for (; symbol < fix_l_codes; symbol++)
lengths[symbol] = 8;
construct(s, s->lencnt, s->lensym, lengths, fix_l_codes);
// distance table
for (symbol = 0; symbol < max_d_codes; symbol++)
lengths[symbol] = 5;
// build huffman table for distance codes
construct(s, s->distcnt, s->distsym, lengths, max_d_codes);
return 0;
}
/**
* @brief Decode literal/length and distance codes until an end-of-block code.
*
* Format notes:
*
* - Compressed data that is after the block type if fixed or after the code
* description if dynamic is a combination of literals and length/distance
* pairs terminated by and end-of-block code. Literals are simply Huffman
* coded bytes. A length/distance pair is a coded length followed by a
* coded distance to represent a string that occurs earlier in the
* uncompressed data that occurs again at the current location.
*
* - Literals, lengths, and the end-of-block code are combined into a single
* code of up to 286 symbols. They are 256 literals (0..255), 29 length
* symbols (257..285), and the end-of-block symbol (256).
*
* - There are 256 possible lengths (3..258), and so 29 symbols are not enough
* to represent all of those. Lengths 3..10 and 258 are in fact represented
* by just a length symbol. Lengths 11..257 are represented as a symbol and
* some number of extra bits that are added as an integer to the base length
* of the length symbol. The number of extra bits is determined by the base
* length symbol. These are in the static arrays below, lens[] for the base
* lengths and lext[] for the corresponding number of extra bits.
*
* - The reason that 258 gets its own symbol is that the longest length is used
* often in highly redundant files. Note that 258 can also be coded as the
* base value 227 plus the maximum extra value of 31. While a good deflate
* should never do this, it is not an error, and should be decoded properly.
*
* - If a length is decoded, including its extra bits if any, then it is
* followed a distance code. There are up to 30 distance symbols. Again
* there are many more possible distances (1..32768), so extra bits are added
* to a base value represented by the symbol. The distances 1..4 get their
* own symbol, but the rest require extra bits. The base distances and
* corresponding number of extra bits are below in the static arrays dist[]
* and dext[].
*
* - Literal bytes are simply written to the output. A length/distance pair is
* an instruction to copy previously uncompressed bytes to the output. The
* copy is from distance bytes back in the output stream, copying for length
* bytes.
*
* - Distances pointing before the beginning of the output data are not
* permitted.
*
* - Overlapped copies, where the length is greater than the distance, are
* allowed and common. For example, a distance of one and a length of 258
* simply copies the last byte 258 times. A distance of four and a length of
* twelve copies the last four bytes three times. A simple forward copy
* ignoring whether the length is greater than the distance or not implements
* this correctly. You should not use memcpy() since its behavior is not
* defined for overlapped arrays. You should not use memmove() or bcopy()
* since though their behavior -is- defined for overlapping arrays, it is
* defined to do the wrong thing in this case.
*/
/// permutation of code length codes
static const __device__ __constant__ uint16_t g_lens[29] = { // Size base for length codes 257..285
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27,
31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258};
static const __device__ __constant__ uint16_t
g_lext[29] = { // Extra bits for length codes 257..285
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0};
static const __device__ __constant__ uint16_t
g_dists[30] = { // Offset base for distance codes 0..29
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129,
193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577};
static const __device__ __constant__ uint16_t g_dext[30] = { // Extra bits for distance codes 0..29
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
/// @brief Thread 0 only: decode bitstreams and output symbols into the symbol queue
__device__ void decode_symbols(inflate_state_s* s)
{
uint32_t bitpos = s->bitpos;
uint2 bitbuf = s->bitbuf;
auto cur = s->cur;
auto end = s->end;
int32_t batch = 0;
int32_t sym, batch_len;
do {
volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size];
// Wait for the next batch entry to be empty
#if ENABLE_PREFETCH
// Wait for prefetcher to fetch a worst-case of 48 bits per symbol
while ((*(volatile int32_t*)&s->pref.cur_p - (int32_t)(size_t)cur < batch_size * 6) ||
(s->x.batch_len[batch] != 0)) {}
#else
while (s->x.batch_len[batch] != 0) {}
#endif
batch_len = 0;
#if ENABLE_PREFETCH
if (cur + (bitpos >> 3) >= end) {
s->err = 1;
break;
}
#endif
// Inner loop decoding symbols
do {
uint32_t next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
uint32_t len;
sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)];
if ((uint32_t)sym < (uint32_t)(0x100 << 5)) {
// We can lookup a second symbol if this was a short literal
len = sym & 0x1f;
sym >>= 5;
b[batch_len++] = sym;
next32 >>= len;
bitpos += len;
sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)];
}
if (sym > 0) // short symbol
{
len = sym & 0x1f;
sym = ((sym >> 5) & 0x3ff) + ((next32 >> (sym >> 24)) & ((sym >> 16) & 0x1f));
} else {
// Slow length path
uint32_t next32r = __brev(next32);
const int16_t* symbols = &s->lensym[s->index_slow_len];
unsigned int first = s->first_slow_len;
int lext;
#pragma unroll 1
for (len = log2_len_lut + 1; len <= max_bits; len++) {
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->lencnt[len];
if (code < count) // if length len, return symbol
{
sym = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > max_bits) {
s->err = -10;
sym = 256;
len = 0;
}
if (sym > 256) {
sym -= 257;
lext = g_lext[sym];
sym = 256 + g_lens[sym] + bfe(next32, len, lext);
len += lext;
}
}
if (sym > 256) {
int dist, dext;
// skipbits(s, len) inlined - no limit check
bitpos += len;
if (bitpos >= 32) {
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *prefetch_addr32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
bitbuf.y = (cur < end) ? *(const uint32_t*)cur : 0;
cur -= 4;
#endif
bitpos &= 0x1f;
}
// get distance
next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
dist = s->u.lut.distlut[next32 & ((1 << log2_dist_lut) - 1)];
if (dist > 0) {
len = dist & 0x1f;
dext = bfe(dist, 20, 5);
dist = bfe(dist, 5, 15);
sym |= (dist + bfe(next32, len, dext)) << 16;
len += dext;
} else {
uint32_t next32r = __brev(next32);
const int16_t* symbols = &s->distsym[s->index_slow_dist];
unsigned int first = s->first_slow_dist;
#pragma unroll 1
for (len = log2_dist_lut + 1; len <= max_bits; len++) {
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->distcnt[len];
if (code < count) // if length len, return symbol
{
dist = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > max_bits) {
s->err = -10;
sym = 256;
len = 0;
} else {
dext = g_dext[dist];
sym |= (g_dists[dist] + bfe(next32, len, dext)) << 16;
len += dext;
}
}
}
// skipbits(s, len) inlined with added error check for reading past the end of the input
// buffer
bitpos += len;
if (bitpos >= 32) {
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *prefetch_addr32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
if (cur < end) {
bitbuf.y = *(const uint32_t*)cur;
cur -= 4;
} else {
bitbuf.y = 0;
cur -= 4;
if (cur > end) {
s->err = 1;
sym = 256;
}
}
#endif
bitpos &= 0x1f;
}
if (sym == 256) break;
b[batch_len++] = sym;
} while (batch_len < batch_size - 1);
s->x.batch_len[batch] = batch_len;
#if ENABLE_PREFETCH
((volatile inflate_state_s*)s)->cur = cur;
#endif
if (batch_len != 0) batch = (batch + 1) & (batch_count - 1);
} while (sym != 256);
while (s->x.batch_len[batch] != 0) {}
s->x.batch_len[batch] = -1;
s->bitbuf = bitbuf;
s->bitpos = bitpos;
#if !ENABLE_PREFETCH
s->cur = cur;
#endif
}
/**
* @brief Build lookup tables for faster decode
* LUT format is symbols*16+length
*/
__device__ void init_length_lut(inflate_state_s* s, int t)
{
int32_t* lut = s->u.lut.lenlut;
for (uint32_t bits = t; bits < (1 << log2_len_lut); bits += blockDim.x) {
const int16_t* cnt = s->lencnt;
const int16_t* symbols = s->lensym;
int sym = -10 << 5;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - log2_len_lut);
for (unsigned int len = 1; len <= log2_len_lut; len++) {
unsigned int code = (rbits >> (log2_len_lut - len)) - first;
unsigned int count = cnt[len];
if (code < count) {
sym = symbols[code];
if (sym > 256) {
int lext = g_lext[sym - 257];
sym = (256 + g_lens[sym - 257]) | (((1 << lext) - 1) << (16 - 5)) | (len << (24 - 5));
len += lext;
}
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t) {
unsigned int first = 0;
unsigned int index = 0;
const int16_t* cnt = s->lencnt;
for (unsigned int len = 1; len <= log2_len_lut; len++) {
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_len = first;
s->index_slow_len = index;
}
}
/**
* @brief Build lookup tables for faster decode of distance symbol
* LUT format is symbols*16+length
*/
__device__ void init_distance_lut(inflate_state_s* s, int t)
{
int32_t* lut = s->u.lut.distlut;
for (uint32_t bits = t; bits < (1 << log2_dist_lut); bits += blockDim.x) {
const int16_t* cnt = s->distcnt;
const int16_t* symbols = s->distsym;
int sym = 0;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - log2_dist_lut);
for (unsigned int len = 1; len <= log2_dist_lut; len++) {
unsigned int code = (rbits >> (log2_dist_lut - len)) - first;
unsigned int count = cnt[len];
if (code < count) {
int dist = symbols[code];
int dext = g_dext[dist];
sym = g_dists[dist] | (dext << 15);
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t) {
unsigned int first = 0;
unsigned int index = 0;
const int16_t* cnt = s->distcnt;
for (unsigned int len = 1; len <= log2_dist_lut; len++) {
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_dist = first;
s->index_slow_dist = index;
}
}
/// @brief WARP1: process symbols and output uncompressed stream
__device__ void process_symbols(inflate_state_s* s, int t)
{
uint8_t* out = s->out;
const uint8_t* outend = s->outend;
const uint8_t* outbase = s->outbase;
int batch = 0;
do {
volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size];
int batch_len = 0;
if (t == 0) {
while ((batch_len = s->x.batch_len[batch]) == 0) {}
}
batch_len = shuffle(batch_len);
if (batch_len < 0) { break; }
auto const symt = (t < batch_len) ? b[t] : 256;
auto const lit_mask = ballot(symt >= 256);
auto pos = min((__ffs(lit_mask) - 1) & 0xff, 32);
if (t == 0) { s->x.batch_len[batch] = 0; }
if (t < pos && out + t < outend) { out[t] = symt; }
out += pos;
batch_len -= pos;
while (batch_len > 0) {
int dist, len, symbol;
// Process a non-literal symbol
symbol = shuffle(symt, pos);
len = max((symbol & 0xffff) - 256, 0); // max should be unnecessary, but just in case
dist = symbol >> 16;
for (int i = t; i < len; i += 32) {
const uint8_t* src = out + ((i >= dist) ? (i % dist) : i) - dist;
uint8_t b = (src < outbase) ? 0 : *src;
if (out + i < outend) { out[i] = b; }
}
out += len;
pos++;
batch_len--;
// Process subsequent literals, if any
if (!((lit_mask >> pos) & 1)) {
len = min((__ffs(lit_mask >> pos) - 1) & 0xff, batch_len);
symbol = shuffle(symt, (pos + t) & 0x1f);
if (t < len && out + t < outend) { out[t] = symbol; }
out += len;
pos += len;
batch_len -= len;
}
}
batch = (batch + 1) & (batch_count - 1);
} while (true);
if (t == 0) { s->out = out; }
}
/**
* @brief Initializes a stored block.
*
* Format notes:
*
* - After the two-bit stored block type (00), the stored block length and
* stored bytes are byte-aligned for fast copying. Therefore any leftover
* bits in the byte that has the last bit of the type, as many as seven, are
* discarded. The value of the discarded bits are not defined and should not
* be checked against any expectation.
*
* - The second inverted copy of the stored block length does not have to be
* checked, but it's probably a good idea to do so anyway.
*
* - A stored block can have zero length. This is sometimes used to byte-align
* subsets of the compressed data for random access or partial recovery.
*/
__device__ int init_stored(inflate_state_s* s)
{
uint32_t len, nlen; // length of stored block
// Byte align
if (s->bitpos & 7) { skipbits(s, 8 - (s->bitpos & 7)); }
if (s->cur + (s->bitpos >> 3) >= s->end) {
return 2; // Not enough input
}
// get length and check against its one's complement
len = getbits(s, 16);
nlen = getbits(s, 16);
if (len != (nlen ^ 0xffff)) {
return -2; // didn't match complement!
}
if (s->cur + (s->bitpos >> 3) + len > s->end) {
return 2; // Not enough input
}
s->stored_blk_len = len;
// done with a valid stored block
return 0;
}
/// Copy bytes from stored block to destination
__device__ void copy_stored(inflate_state_s* s, int t)
{
auto len = s->stored_blk_len;
auto cur = s->cur + s->bitpos / 8;
auto out = s->out;
auto outend = s->outend;
auto const slow_bytes = min(len, (int)((16 - reinterpret_cast<size_t>(out)) % 16));
// Slow copy until output is 16B aligned
if (slow_bytes) {
for (int i = t; i < slow_bytes; i += blockDim.x) {
if (out + i < outend) {
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
cur += slow_bytes;
out += slow_bytes;
len -= slow_bytes;
}
auto fast_bytes = len;
if (out < outend) { fast_bytes = (int)min((size_t)fast_bytes, (outend - out)); }
fast_bytes &= ~0xf;
auto bitpos = ((int)((size_t)cur % 4)) * 8;
auto cur4 = cur - (bitpos / 8);
if (out < outend) {
// Fast copy 16 bytes at a time
for (int i = t * 16; i < fast_bytes; i += blockDim.x * 16) {
uint4 u;
u.x = *reinterpret_cast<const uint32_t*>(cur4 + i + 0 * 4);
u.y = *reinterpret_cast<const uint32_t*>(cur4 + i + 1 * 4);
u.z = *reinterpret_cast<const uint32_t*>(cur4 + i + 2 * 4);
u.w = *reinterpret_cast<const uint32_t*>(cur4 + i + 3 * 4);
if (bitpos != 0) {
uint32_t v = (bitpos != 0) ? *reinterpret_cast<const uint32_t*>(cur4 + i + 4 * 4) : 0;
u.x = __funnelshift_rc(u.x, u.y, bitpos);
u.y = __funnelshift_rc(u.y, u.z, bitpos);
u.z = __funnelshift_rc(u.z, u.w, bitpos);
u.w = __funnelshift_rc(u.w, v, bitpos);
}
*reinterpret_cast<uint4*>(out + i) = u;
}
}
cur += fast_bytes;
out += fast_bytes;
len -= fast_bytes;
// Slow copy for remaining bytes
for (int i = t; i < len; i += blockDim.x) {
if (out + i < outend) {
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
out += len;
__syncthreads();
if (t == 0) {
// Reset bitstream to end of block
auto p = cur + len;
auto prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
s->cur = p;
s->bitbuf.x = (p < s->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
p += 4;
s->bitbuf.y = (p < s->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
s->bitpos = prefix_bytes * 8;
s->out = out;
}
}
#if ENABLE_PREFETCH
__device__ void init_prefetcher(inflate_state_s* s, int t)
{
if (t == 0) {
s->pref.cur_p = s->cur;
s->pref.run = 1;
}
}
__device__ void prefetch_warp(volatile inflate_state_s* s, int t)
{
const uint8_t* cur_p = s->pref.cur_p;
const uint8_t* end = s->end;
while (shuffle((t == 0) ? s->pref.run : 0)) {
auto cur_lo = (int32_t)(size_t)cur_p;
int do_pref =
shuffle((t == 0) ? (cur_lo - *(volatile int32_t*)&s->cur < prefetch_size - 32 * 4 - 4) : 0);
if (do_pref) {
const uint8_t* p = cur_p + 4 * t;
*prefetch_addr32(s->pref, p) = (p < end) ? *reinterpret_cast<const uint32_t*>(p) : 0;
cur_p += 4 * 32;
__threadfence_block();
__syncwarp();
if (!t) {
s->pref.cur_p = cur_p;
__threadfence_block();
}
}
}
}
#endif // ENABLE_PREFETCH
/**
* @brief Parse GZIP header
* See https://tools.ietf.org/html/rfc1952
*/
__device__ int parse_gzip_header(const uint8_t* src, size_t src_size)
{
int hdr_len = -1;
if (src_size >= 18) {
uint32_t sig = (src[0] << 16) | (src[1] << 8) | src[2];
if (sig == 0x1f8b08) // 24-bit GZIP inflate signature {0x1f, 0x8b, 0x08}
{
uint8_t flags = src[3];
hdr_len = 10;
if (flags & GZIPHeaderFlag::fextra) // Extra fields present
{
int xlen = src[hdr_len] | (src[hdr_len + 1] << 8);
hdr_len += xlen;
if (hdr_len >= src_size) return -1;
}
if (flags & GZIPHeaderFlag::fname) // Original file name present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size) return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZIPHeaderFlag::fcomment) // Comment present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size) return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZIPHeaderFlag::fhcrc) // Header CRC present
{
hdr_len += 2;
}
if (hdr_len + 8 >= src_size) hdr_len = -1;
}
}
return hdr_len;
}
/**
* @brief INFLATE decompression kernel
*
* blockDim {block_size,1,1}
*
* @tparam block_size Thread block dimension for this call
* @param inputs Source and destination buffer information per block
* @param outputs Destination buffer information per block
* @param statuses Decompression status buffer per block
* @param parse_hdr If nonzero, indicates that the compressed bitstream includes a GZIP header
*/
template <int block_size>
__global__ void __launch_bounds__(block_size)
inflate_kernel(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<decompress_status> statuses,
gzip_header_included parse_hdr)
{
__shared__ __align__(16) inflate_state_s state_g;
int t = threadIdx.x;
int z = blockIdx.x;
inflate_state_s* state = &state_g;
if (!t) {
auto p = inputs[z].data();
auto src_size = inputs[z].size();
// Parse header if needed
state->err = 0;
if (parse_hdr == gzip_header_included::YES) {
int hdr_len = parse_gzip_header(p, src_size);
src_size = (src_size >= 8) ? src_size - 8 : 0; // ignore footer
if (hdr_len >= 0) {
p += hdr_len;
src_size -= hdr_len;
} else {
state->err = hdr_len;
}
}
// Initialize shared state
state->out = outputs[z].data();
state->outbase = state->out;
state->outend = state->out + outputs[z].size();
state->end = p + src_size;
auto const prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
state->cur = p;
state->bitbuf.x = (p < state->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
p += 4;
state->bitbuf.y = (p < state->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
state->bitpos = prefix_bytes * 8;
}
__syncthreads();
// Main loop decoding blocks
while (!state->err) {
if (!t) {
// Thread0: read last flag, block type and custom huffman tables if any
if (state->cur + (state->bitpos >> 3) >= state->end)
state->err = 2;
else {
state->blast = getbits(state, 1);
state->btype = getbits(state, 2);
if (state->btype == 0)
state->err = init_stored(state);
else if (state->btype == 1)
state->err = init_fixed(state);
else if (state->btype == 2)
state->err = init_dynamic(state);
else
state->err = -1; // Invalid block
}
}
__syncthreads();
if (!state->err && (state->btype == 1 || state->btype == 2)) {
// Initializes lookup tables (block wide)
init_length_lut(state, t);
init_distance_lut(state, t);
#if ENABLE_PREFETCH
// Initialize prefetcher
init_prefetcher(state, t);
#endif
if (t < batch_count) { state->x.batch_len[t] = 0; }
__syncthreads();
// decode data until end-of-block code
if (t < 1 * 32) {
// WARP0: decode variable-length symbols
if (!t) {
// Thread0: decode symbols (single threaded)
decode_symbols(state);
#if ENABLE_PREFETCH
state->pref.run = 0;
#endif
}
} else if (t < 2 * 32) {
// WARP1: perform LZ77 using length and distance codes from WARP0
process_symbols(state, t & 0x1f);
}
#if ENABLE_PREFETCH
else if (t < 3 * 32) {
// WARP2: Prefetcher: prefetch data for WARP0
prefetch_warp(state, t & 0x1f);
}
#endif
// else WARP3: idle
} else if (!state->err && state->btype == 0) {
// Uncompressed block (block-wide memcpy)
copy_stored(state, t);
}
if (state->blast) break;
__syncthreads();
}
__syncthreads();
// Output decompression status and length
if (!t) {
if (state->err == 0 && state->cur + ((state->bitpos + 7) >> 3) > state->end) {
// Read past the end of the input buffer
state->err = 2;
} else if (state->err == 0 && state->out > state->outend) {
// Output buffer too small
state->err = 1;
}
statuses[z].bytes_written = state->out - state->outbase;
statuses[z].status = state->err;
statuses[z].reserved = (int)(state->end - state->cur); // Here mainly for debug purposes
}
}
/**
* @brief Copy a group of buffers
*
* blockDim {1024,1,1}
*
* @param inputs Source and destination information per block
*/
__global__ void __launch_bounds__(1024)
copy_uncompressed_kernel(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs)
{
__shared__ const uint8_t* volatile src_g;
__shared__ uint8_t* volatile dst_g;
__shared__ uint32_t volatile copy_len_g;
uint32_t t = threadIdx.x;
uint32_t z = blockIdx.x;
const uint8_t* src;
uint8_t* dst;
uint32_t len, src_align_bytes, src_align_bits, dst_align_bytes;
if (!t) {
src = inputs[z].data();
dst = outputs[z].data();
len = static_cast<uint32_t>(min(inputs[z].size(), outputs[z].size()));
src_g = src;
dst_g = dst;
copy_len_g = len;
}
__syncthreads();
src = src_g;
dst = dst_g;
len = copy_len_g;
// Align output to 32-bit
dst_align_bytes = 3 & -reinterpret_cast<intptr_t>(dst);
if (dst_align_bytes != 0) {
uint32_t align_len = min(dst_align_bytes, len);
if (t < align_len) { dst[t] = src[t]; }
src += align_len;
dst += align_len;
len -= align_len;
}
src_align_bytes = (uint32_t)(3 & reinterpret_cast<uintptr_t>(src));
src_align_bits = src_align_bytes << 3;
while (len >= 32) {
const auto* src32 = reinterpret_cast<const uint32_t*>(src - src_align_bytes);
uint32_t copy_cnt = min(len >> 2, 1024);
if (t < copy_cnt) {
uint32_t v = src32[t];
if (src_align_bits != 0) { v = __funnelshift_r(v, src32[t + 1], src_align_bits); }
reinterpret_cast<uint32_t*>(dst)[t] = v;
}
src += copy_cnt * 4;
dst += copy_cnt * 4;
len -= copy_cnt * 4;
}
if (t < len) { dst[t] = src[t]; }
}
void gpuinflate(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<decompress_status> statuses,
gzip_header_included parse_hdr,
rmm::cuda_stream_view stream)
{
constexpr int block_size = 128; // Threads per block
if (inputs.size() > 0) {
hipLaunchKernelGGL(( inflate_kernel<block_size>)
, dim3(inputs.size()), dim3(block_size), 0, stream.value(), inputs, outputs, statuses, parse_hdr);
}
}
void gpu_copy_uncompressed_blocks(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
rmm::cuda_stream_view stream)
{
if (inputs.size() > 0) {
hipLaunchKernelGGL(( copy_uncompressed_kernel), dim3(inputs.size()), dim3(1024), 0, stream.value(), inputs, outputs);
}
}
} // namespace io
} // namespace cudf
| 8a041ec6a0e7a15bbf108776a29f4fa3e9b15102.cu | /*
* Copyright (c) 2018-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file gpuinflate.cu
Derived from zlib's contrib/puff.c, original copyright notice below
*/
/*
Copyright (C) 2002-2013 Mark Adler, all rights reserved
version 2.3, 21 Jan 2013
This software is provided 'as-is', without any express or implied
warranty. In no event will the author be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Mark Adler [email protected]
*/
#include "gpuinflate.hpp"
#include "io_uncomp.hpp"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
constexpr int max_bits = 15; // maximum bits in a code
constexpr int max_l_codes = 286; // maximum number of literal/length codes
constexpr int max_d_codes = 30; // maximum number of distance codes
constexpr int fix_l_codes = 288; // number of fixed literal/length codes
constexpr int log2_len_lut = 10;
constexpr int log2_dist_lut = 8;
/**
* @brief Intermediate arrays for building huffman tables
*/
struct scratch_arr {
int16_t lengths[max_l_codes + max_d_codes]; ///< descriptor code lengths
int16_t offs[max_bits + 1]; ///< offset in symbol table for each length (scratch)
};
/**
* @brief Huffman LUTs for length and distance codes
*/
struct lut_arr {
int32_t lenlut[1 << log2_len_lut]; ///< LUT for length decoding
int32_t distlut[1 << log2_dist_lut]; ///< LUT for fast distance decoding
};
/// 4 batches of 32 symbols
constexpr int log2_batch_count = 2; // 1..5
constexpr int log2_batch_size = 5;
constexpr int batch_count = (1 << log2_batch_count);
constexpr int batch_size = (1 << log2_batch_size);
/**
* @brief Inter-warp communication queue
*/
struct xwarp_s {
int32_t batch_len[batch_count]; //< Length of each batch - <0:end, 0:not ready, >0:symbol count
union {
uint32_t symqueue[batch_count * batch_size];
uint8_t symqueue8[batch_count * batch_size * 4];
} u;
};
#define ENABLE_PREFETCH 1
#if ENABLE_PREFETCH
constexpr int log2_prefetch_size = 9; // Must be at least LOG2_BATCH_SIZE+3
constexpr int prefetch_size = (1 << log2_prefetch_size);
/// @brief Prefetcher state
struct prefetch_queue_s {
const uint8_t* cur_p; ///< Prefetch location
int run; ///< prefetcher will exit when run=0
uint8_t pref_data[prefetch_size];
};
template <typename T>
inline __device__ volatile uint32_t* prefetch_addr32(volatile prefetch_queue_s& q, T* ptr)
{
return reinterpret_cast<volatile uint32_t*>(&q.pref_data[(prefetch_size - 4) & (size_t)(ptr)]);
}
#endif // ENABLE_PREFETCH
/**
* @brief Inflate decompressor state
*/
struct inflate_state_s {
// output state
uint8_t* out; ///< output buffer
uint8_t* outbase; ///< start of output buffer
uint8_t* outend; ///< end of output buffer
// Input state
uint8_t const* cur; ///< input buffer
uint8_t const* end; ///< end of input buffer
uint2 bitbuf; ///< bit buffer (64-bit)
uint32_t bitpos; ///< position in bit buffer
int32_t err; ///< Error status
int btype; ///< current block type
int blast; ///< last block
uint32_t stored_blk_len; ///< length of stored (uncompressed) block
uint16_t first_slow_len; ///< first code not in fast LUT
uint16_t index_slow_len;
uint16_t first_slow_dist;
uint16_t index_slow_dist;
volatile xwarp_s x;
#if ENABLE_PREFETCH
volatile prefetch_queue_s pref;
#endif
int16_t lencnt[max_bits + 1];
int16_t lensym[fix_l_codes]; // Assumes fix_l_codes >= max_l_codes
int16_t distcnt[max_bits + 1];
int16_t distsym[max_d_codes];
union {
scratch_arr scratch;
lut_arr lut;
} u;
};
inline __device__ unsigned int bfe(unsigned int source,
unsigned int bit_start,
unsigned int num_bits)
{
unsigned int bits;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"(source), "r"(bit_start), "r"(num_bits));
return bits;
};
inline __device__ uint32_t showbits(inflate_state_s* s, uint32_t n)
{
uint32_t next32 = __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
return (next32 & ((1 << n) - 1));
}
inline __device__ uint32_t nextbits32(inflate_state_s* s)
{
return __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
}
inline __device__ void skipbits(inflate_state_s* s, uint32_t n)
{
uint32_t bitpos = s->bitpos + n;
if (bitpos >= 32) {
auto cur = s->cur + 8;
s->bitbuf.x = s->bitbuf.y;
s->bitbuf.y = (cur < s->end) ? *reinterpret_cast<uint32_t const*>(cur) : 0;
s->cur = cur - 4;
bitpos &= 0x1f;
}
s->bitpos = bitpos;
}
// TODO: If we require 4-byte alignment of input bitstream & length (padded), reading bits would
// become quite a bit faster
__device__ uint32_t getbits(inflate_state_s* s, uint32_t n)
{
uint32_t v = showbits(s, n);
skipbits(s, n);
return v;
}
/**
* @brief Decode a code from the stream s using huffman table {symbols,counts}.
* Return the symbol or a negative value if there is an error.
* If all of the lengths are zero, i.e. an empty code, or if the code is
* incomplete and an invalid code is received, then -10 is returned after
* reading max_bits bits.
*
* Format notes:
*
* - The codes as stored in the compressed data are bit-reversed relative to
* a simple integer ordering of codes of the same lengths. Hence below the
* bits are pulled from the compressed data one at a time and used to
* build the code value reversed from what is in the stream in order to
* permit simple integer comparisons for decoding. A table-based decoding
* scheme (as used in zlib) does not need to do this reversal.
*
* - The first code for the shortest length is all zeros. Subsequent codes of
* the same length are simply integer increments of the previous code. When
* moving up a length, a zero bit is appended to the code. For a complete
* code, the last code of the longest length will be all ones.
*
* - Incomplete codes are handled by this decoder, since they are permitted
* in the deflate format. See the format notes for fixed() and dynamic().
*/
__device__ int decode(inflate_state_s* s, const int16_t* counts, const int16_t* symbols)
{
unsigned int len; // current number of bits in code
unsigned int code; // len bits being decoded
unsigned int first; // first code of length len
unsigned int count; // number of codes of length len
uint32_t next32r = __brev(nextbits32(s));
first = 0;
for (len = 1; len <= max_bits; len++) {
code = (next32r >> (32 - len)) - first;
count = counts[len];
if (code < count) // if length len, return symbol
{
skipbits(s, len);
return symbols[code];
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
return -10; // ran out of codes
}
/**
* @brief Given the list of code lengths length[0..n-1] representing a canonical
* Huffman code for n symbols, construct the tables required to decode those
* codes. Those tables are the number of codes of each length, and the symbols
* sorted by length, retaining their original order within each length. The
* return value is zero for a complete code set, negative for an over-
* subscribed code set, and positive for an incomplete code set. The tables
* can be used if the return value is zero or positive, but they cannot be used
* if the return value is negative. If the return value is zero, it is not
* possible for decode() using that table to return an error--any stream of
* enough bits will resolve to a symbol. If the return value is positive, then
* it is possible for decode() using that table to return an error for received
* codes past the end of the incomplete lengths.
*
* Not used by decode(), but used for error checking, count[0] is the number
* of the n symbols not in the code. So n - count[0] is the number of
* codes. This is useful for checking for incomplete codes that have more than
* one symbol, which is an error in a dynamic block.
*
* Assumption: for all i in 0..n-1, 0 <= length[i] <= max_bits
* This is assured by the construction of the length arrays in dynamic() and
* fixed() and is not verified by construct().
*
* Format notes:
*
* - Permitted and expected examples of incomplete codes are one of the fixed
* codes and any code with a single symbol which in deflate is coded as one
* bit instead of zero bits. See the format notes for fixed() and dynamic().
*
* - Within a given code length, the symbols are kept in ascending order for
* the code bits definition.
*/
__device__ int construct(
inflate_state_s* s, int16_t* counts, int16_t* symbols, const int16_t* length, int n)
{
int symbol; // current symbol when stepping through length[]
int len; // current length when stepping through counts[]
int left; // number of possible codes left of current length
int16_t* offs = s->u.scratch.offs;
// count number of codes of each length
for (len = 0; len <= max_bits; len++)
counts[len] = 0;
for (symbol = 0; symbol < n; symbol++)
(counts[length[symbol]])++; // assumes lengths are within bounds
if (counts[0] == n) // no codes!
return 0; // complete, but decode() will fail
// check for an over-subscribed or incomplete set of lengths
left = 1; // one possible code of zero length
for (len = 1; len <= max_bits; len++) {
left <<= 1; // one more bit, double codes left
left -= counts[len]; // deduct count from possible codes
if (left < 0) return left; // over-subscribed--return negative
} // left > 0 means incomplete
// generate offsets into symbol table for each length for sorting
offs[1] = 0;
for (len = 1; len < max_bits; len++)
offs[len + 1] = offs[len] + counts[len];
// put symbols in table sorted by length, by symbol order within each length
for (symbol = 0; symbol < n; symbol++)
if (length[symbol] != 0) symbols[offs[length[symbol]]++] = symbol;
// return zero for complete set, positive for incomplete set
return left;
}
/// permutation of code length codes
static const __device__ __constant__ uint8_t g_code_order[19 + 1] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, 0xff};
/// Dynamic block (custom huffman tables)
__device__ int init_dynamic(inflate_state_s* s)
{
int nlen, ndist, ncode; /* number of lengths in descriptor */
int index; /* index of lengths[] */
int err; /* construct() return value */
int16_t* lengths = s->u.scratch.lengths;
// get number of lengths in each table, check lengths
nlen = getbits(s, 5) + 257;
ndist = getbits(s, 5) + 1;
ncode = getbits(s, 4) + 4;
if (nlen > max_l_codes || ndist > max_d_codes) {
return -3; // bad counts
}
// read code length code lengths (really), missing lengths are zero
for (index = 0; index < ncode; index++)
lengths[g_code_order[index]] = getbits(s, 3);
for (; index < 19; index++)
lengths[g_code_order[index]] = 0;
// build huffman table for code lengths codes (use lencode temporarily)
err = construct(s, s->lencnt, s->lensym, lengths, 19);
if (err != 0) // require complete code set here
return -4;
// read length/literal and distance code length tables
index = 0;
while (index < nlen + ndist) {
int symbol = decode(s, s->lencnt, s->lensym);
if (symbol < 0) return symbol; // invalid symbol
if (symbol < 16) // length in 0..15
lengths[index++] = symbol;
else { // repeat instruction
int len = 0; // last length to repeat, assume repeating zeros
if (symbol == 16) { // repeat last length 3..6 times
if (index == 0) return -5; // no last length!
len = lengths[index - 1]; // last length
symbol = 3 + getbits(s, 2);
} else if (symbol == 17) // repeat zero 3..10 times
symbol = 3 + getbits(s, 3);
else // == 18, repeat zero 11..138 times
symbol = 11 + getbits(s, 7);
if (index + symbol > nlen + ndist) return -6; // too many lengths!
while (symbol--) // repeat last or zero symbol times
lengths[index++] = len;
}
}
// check for end-of-block code -- there better be one!
if (lengths[256] == 0) return -9;
// build huffman table for literal/length codes
err = construct(s, s->lencnt, s->lensym, lengths, nlen);
if (err && (err < 0 || nlen != s->lencnt[0] + s->lencnt[1]))
return -7; // incomplete code ok only for single length 1 code
// build huffman table for distance codes
err = construct(s, s->distcnt, s->distsym, &lengths[nlen], ndist);
if (err && (err < 0 || ndist != s->distcnt[0] + s->distcnt[1]))
return -8; // incomplete code ok only for single length 1 code
return 0;
}
/**
* @brief Initializes a fixed codes block.
*
* Format notes:
*
* - This block type can be useful for compressing small amounts of data for
* which the size of the code descriptions in a dynamic block exceeds the
* benefit of custom codes for that block. For fixed codes, no bits are
* spent on code descriptions. Instead the code lengths for literal/length
* codes and distance codes are fixed. The specific lengths for each symbol
* can be seen in the "for" loops below.
*
* - The literal/length code is complete, but has two symbols that are invalid
* and should result in an error if received. This cannot be implemented
* simply as an incomplete code since those two symbols are in the "middle"
* of the code. They are eight bits long and the longest literal/length\
* code is nine bits. Therefore the code must be constructed with those
* symbols, and the invalid symbols must be detected after decoding.
*
* - The fixed distance codes also have two invalid symbols that should result
* in an error if received. Since all of the distance codes are the same
* length, this can be implemented as an incomplete code. Then the invalid
* codes are detected while decoding.
*/
__device__ int init_fixed(inflate_state_s* s)
{
int16_t* lengths = s->u.scratch.lengths;
int symbol;
// literal/length table
for (symbol = 0; symbol < 144; symbol++)
lengths[symbol] = 8;
for (; symbol < 256; symbol++)
lengths[symbol] = 9;
for (; symbol < 280; symbol++)
lengths[symbol] = 7;
for (; symbol < fix_l_codes; symbol++)
lengths[symbol] = 8;
construct(s, s->lencnt, s->lensym, lengths, fix_l_codes);
// distance table
for (symbol = 0; symbol < max_d_codes; symbol++)
lengths[symbol] = 5;
// build huffman table for distance codes
construct(s, s->distcnt, s->distsym, lengths, max_d_codes);
return 0;
}
/**
* @brief Decode literal/length and distance codes until an end-of-block code.
*
* Format notes:
*
* - Compressed data that is after the block type if fixed or after the code
* description if dynamic is a combination of literals and length/distance
* pairs terminated by and end-of-block code. Literals are simply Huffman
* coded bytes. A length/distance pair is a coded length followed by a
* coded distance to represent a string that occurs earlier in the
* uncompressed data that occurs again at the current location.
*
* - Literals, lengths, and the end-of-block code are combined into a single
* code of up to 286 symbols. They are 256 literals (0..255), 29 length
* symbols (257..285), and the end-of-block symbol (256).
*
* - There are 256 possible lengths (3..258), and so 29 symbols are not enough
* to represent all of those. Lengths 3..10 and 258 are in fact represented
* by just a length symbol. Lengths 11..257 are represented as a symbol and
* some number of extra bits that are added as an integer to the base length
* of the length symbol. The number of extra bits is determined by the base
* length symbol. These are in the static arrays below, lens[] for the base
* lengths and lext[] for the corresponding number of extra bits.
*
* - The reason that 258 gets its own symbol is that the longest length is used
* often in highly redundant files. Note that 258 can also be coded as the
* base value 227 plus the maximum extra value of 31. While a good deflate
* should never do this, it is not an error, and should be decoded properly.
*
* - If a length is decoded, including its extra bits if any, then it is
* followed a distance code. There are up to 30 distance symbols. Again
* there are many more possible distances (1..32768), so extra bits are added
* to a base value represented by the symbol. The distances 1..4 get their
* own symbol, but the rest require extra bits. The base distances and
* corresponding number of extra bits are below in the static arrays dist[]
* and dext[].
*
* - Literal bytes are simply written to the output. A length/distance pair is
* an instruction to copy previously uncompressed bytes to the output. The
* copy is from distance bytes back in the output stream, copying for length
* bytes.
*
* - Distances pointing before the beginning of the output data are not
* permitted.
*
* - Overlapped copies, where the length is greater than the distance, are
* allowed and common. For example, a distance of one and a length of 258
* simply copies the last byte 258 times. A distance of four and a length of
* twelve copies the last four bytes three times. A simple forward copy
* ignoring whether the length is greater than the distance or not implements
* this correctly. You should not use memcpy() since its behavior is not
* defined for overlapped arrays. You should not use memmove() or bcopy()
* since though their behavior -is- defined for overlapping arrays, it is
* defined to do the wrong thing in this case.
*/
/// permutation of code length codes
static const __device__ __constant__ uint16_t g_lens[29] = { // Size base for length codes 257..285
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27,
31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258};
static const __device__ __constant__ uint16_t
g_lext[29] = { // Extra bits for length codes 257..285
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0};
static const __device__ __constant__ uint16_t
g_dists[30] = { // Offset base for distance codes 0..29
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129,
193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577};
static const __device__ __constant__ uint16_t g_dext[30] = { // Extra bits for distance codes 0..29
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
/// @brief Thread 0 only: decode bitstreams and output symbols into the symbol queue
__device__ void decode_symbols(inflate_state_s* s)
{
uint32_t bitpos = s->bitpos;
uint2 bitbuf = s->bitbuf;
auto cur = s->cur;
auto end = s->end;
int32_t batch = 0;
int32_t sym, batch_len;
do {
volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size];
// Wait for the next batch entry to be empty
#if ENABLE_PREFETCH
// Wait for prefetcher to fetch a worst-case of 48 bits per symbol
while ((*(volatile int32_t*)&s->pref.cur_p - (int32_t)(size_t)cur < batch_size * 6) ||
(s->x.batch_len[batch] != 0)) {}
#else
while (s->x.batch_len[batch] != 0) {}
#endif
batch_len = 0;
#if ENABLE_PREFETCH
if (cur + (bitpos >> 3) >= end) {
s->err = 1;
break;
}
#endif
// Inner loop decoding symbols
do {
uint32_t next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
uint32_t len;
sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)];
if ((uint32_t)sym < (uint32_t)(0x100 << 5)) {
// We can lookup a second symbol if this was a short literal
len = sym & 0x1f;
sym >>= 5;
b[batch_len++] = sym;
next32 >>= len;
bitpos += len;
sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)];
}
if (sym > 0) // short symbol
{
len = sym & 0x1f;
sym = ((sym >> 5) & 0x3ff) + ((next32 >> (sym >> 24)) & ((sym >> 16) & 0x1f));
} else {
// Slow length path
uint32_t next32r = __brev(next32);
const int16_t* symbols = &s->lensym[s->index_slow_len];
unsigned int first = s->first_slow_len;
int lext;
#pragma unroll 1
for (len = log2_len_lut + 1; len <= max_bits; len++) {
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->lencnt[len];
if (code < count) // if length len, return symbol
{
sym = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > max_bits) {
s->err = -10;
sym = 256;
len = 0;
}
if (sym > 256) {
sym -= 257;
lext = g_lext[sym];
sym = 256 + g_lens[sym] + bfe(next32, len, lext);
len += lext;
}
}
if (sym > 256) {
int dist, dext;
// skipbits(s, len) inlined - no limit check
bitpos += len;
if (bitpos >= 32) {
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *prefetch_addr32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
bitbuf.y = (cur < end) ? *(const uint32_t*)cur : 0;
cur -= 4;
#endif
bitpos &= 0x1f;
}
// get distance
next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
dist = s->u.lut.distlut[next32 & ((1 << log2_dist_lut) - 1)];
if (dist > 0) {
len = dist & 0x1f;
dext = bfe(dist, 20, 5);
dist = bfe(dist, 5, 15);
sym |= (dist + bfe(next32, len, dext)) << 16;
len += dext;
} else {
uint32_t next32r = __brev(next32);
const int16_t* symbols = &s->distsym[s->index_slow_dist];
unsigned int first = s->first_slow_dist;
#pragma unroll 1
for (len = log2_dist_lut + 1; len <= max_bits; len++) {
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->distcnt[len];
if (code < count) // if length len, return symbol
{
dist = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > max_bits) {
s->err = -10;
sym = 256;
len = 0;
} else {
dext = g_dext[dist];
sym |= (g_dists[dist] + bfe(next32, len, dext)) << 16;
len += dext;
}
}
}
// skipbits(s, len) inlined with added error check for reading past the end of the input
// buffer
bitpos += len;
if (bitpos >= 32) {
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *prefetch_addr32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
if (cur < end) {
bitbuf.y = *(const uint32_t*)cur;
cur -= 4;
} else {
bitbuf.y = 0;
cur -= 4;
if (cur > end) {
s->err = 1;
sym = 256;
}
}
#endif
bitpos &= 0x1f;
}
if (sym == 256) break;
b[batch_len++] = sym;
} while (batch_len < batch_size - 1);
s->x.batch_len[batch] = batch_len;
#if ENABLE_PREFETCH
((volatile inflate_state_s*)s)->cur = cur;
#endif
if (batch_len != 0) batch = (batch + 1) & (batch_count - 1);
} while (sym != 256);
while (s->x.batch_len[batch] != 0) {}
s->x.batch_len[batch] = -1;
s->bitbuf = bitbuf;
s->bitpos = bitpos;
#if !ENABLE_PREFETCH
s->cur = cur;
#endif
}
/**
* @brief Build lookup tables for faster decode
* LUT format is symbols*16+length
*/
__device__ void init_length_lut(inflate_state_s* s, int t)
{
int32_t* lut = s->u.lut.lenlut;
for (uint32_t bits = t; bits < (1 << log2_len_lut); bits += blockDim.x) {
const int16_t* cnt = s->lencnt;
const int16_t* symbols = s->lensym;
int sym = -10 << 5;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - log2_len_lut);
for (unsigned int len = 1; len <= log2_len_lut; len++) {
unsigned int code = (rbits >> (log2_len_lut - len)) - first;
unsigned int count = cnt[len];
if (code < count) {
sym = symbols[code];
if (sym > 256) {
int lext = g_lext[sym - 257];
sym = (256 + g_lens[sym - 257]) | (((1 << lext) - 1) << (16 - 5)) | (len << (24 - 5));
len += lext;
}
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t) {
unsigned int first = 0;
unsigned int index = 0;
const int16_t* cnt = s->lencnt;
for (unsigned int len = 1; len <= log2_len_lut; len++) {
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_len = first;
s->index_slow_len = index;
}
}
/**
* @brief Build lookup tables for faster decode of distance symbol
* LUT format is symbols*16+length
*/
__device__ void init_distance_lut(inflate_state_s* s, int t)
{
int32_t* lut = s->u.lut.distlut;
for (uint32_t bits = t; bits < (1 << log2_dist_lut); bits += blockDim.x) {
const int16_t* cnt = s->distcnt;
const int16_t* symbols = s->distsym;
int sym = 0;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - log2_dist_lut);
for (unsigned int len = 1; len <= log2_dist_lut; len++) {
unsigned int code = (rbits >> (log2_dist_lut - len)) - first;
unsigned int count = cnt[len];
if (code < count) {
int dist = symbols[code];
int dext = g_dext[dist];
sym = g_dists[dist] | (dext << 15);
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t) {
unsigned int first = 0;
unsigned int index = 0;
const int16_t* cnt = s->distcnt;
for (unsigned int len = 1; len <= log2_dist_lut; len++) {
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_dist = first;
s->index_slow_dist = index;
}
}
/// @brief WARP1: process symbols and output uncompressed stream
__device__ void process_symbols(inflate_state_s* s, int t)
{
uint8_t* out = s->out;
const uint8_t* outend = s->outend;
const uint8_t* outbase = s->outbase;
int batch = 0;
do {
volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size];
int batch_len = 0;
if (t == 0) {
while ((batch_len = s->x.batch_len[batch]) == 0) {}
}
batch_len = shuffle(batch_len);
if (batch_len < 0) { break; }
auto const symt = (t < batch_len) ? b[t] : 256;
auto const lit_mask = ballot(symt >= 256);
auto pos = min((__ffs(lit_mask) - 1) & 0xff, 32);
if (t == 0) { s->x.batch_len[batch] = 0; }
if (t < pos && out + t < outend) { out[t] = symt; }
out += pos;
batch_len -= pos;
while (batch_len > 0) {
int dist, len, symbol;
// Process a non-literal symbol
symbol = shuffle(symt, pos);
len = max((symbol & 0xffff) - 256, 0); // max should be unnecessary, but just in case
dist = symbol >> 16;
for (int i = t; i < len; i += 32) {
const uint8_t* src = out + ((i >= dist) ? (i % dist) : i) - dist;
uint8_t b = (src < outbase) ? 0 : *src;
if (out + i < outend) { out[i] = b; }
}
out += len;
pos++;
batch_len--;
// Process subsequent literals, if any
if (!((lit_mask >> pos) & 1)) {
len = min((__ffs(lit_mask >> pos) - 1) & 0xff, batch_len);
symbol = shuffle(symt, (pos + t) & 0x1f);
if (t < len && out + t < outend) { out[t] = symbol; }
out += len;
pos += len;
batch_len -= len;
}
}
batch = (batch + 1) & (batch_count - 1);
} while (true);
if (t == 0) { s->out = out; }
}
/**
* @brief Initializes a stored block.
*
* Format notes:
*
* - After the two-bit stored block type (00), the stored block length and
* stored bytes are byte-aligned for fast copying. Therefore any leftover
* bits in the byte that has the last bit of the type, as many as seven, are
* discarded. The value of the discarded bits are not defined and should not
* be checked against any expectation.
*
* - The second inverted copy of the stored block length does not have to be
* checked, but it's probably a good idea to do so anyway.
*
* - A stored block can have zero length. This is sometimes used to byte-align
* subsets of the compressed data for random access or partial recovery.
*/
__device__ int init_stored(inflate_state_s* s)
{
uint32_t len, nlen; // length of stored block
// Byte align
if (s->bitpos & 7) { skipbits(s, 8 - (s->bitpos & 7)); }
if (s->cur + (s->bitpos >> 3) >= s->end) {
return 2; // Not enough input
}
// get length and check against its one's complement
len = getbits(s, 16);
nlen = getbits(s, 16);
if (len != (nlen ^ 0xffff)) {
return -2; // didn't match complement!
}
if (s->cur + (s->bitpos >> 3) + len > s->end) {
return 2; // Not enough input
}
s->stored_blk_len = len;
// done with a valid stored block
return 0;
}
/// Copy bytes from stored block to destination
__device__ void copy_stored(inflate_state_s* s, int t)
{
auto len = s->stored_blk_len;
auto cur = s->cur + s->bitpos / 8;
auto out = s->out;
auto outend = s->outend;
auto const slow_bytes = min(len, (int)((16 - reinterpret_cast<size_t>(out)) % 16));
// Slow copy until output is 16B aligned
if (slow_bytes) {
for (int i = t; i < slow_bytes; i += blockDim.x) {
if (out + i < outend) {
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
cur += slow_bytes;
out += slow_bytes;
len -= slow_bytes;
}
auto fast_bytes = len;
if (out < outend) { fast_bytes = (int)min((size_t)fast_bytes, (outend - out)); }
fast_bytes &= ~0xf;
auto bitpos = ((int)((size_t)cur % 4)) * 8;
auto cur4 = cur - (bitpos / 8);
if (out < outend) {
// Fast copy 16 bytes at a time
for (int i = t * 16; i < fast_bytes; i += blockDim.x * 16) {
uint4 u;
u.x = *reinterpret_cast<const uint32_t*>(cur4 + i + 0 * 4);
u.y = *reinterpret_cast<const uint32_t*>(cur4 + i + 1 * 4);
u.z = *reinterpret_cast<const uint32_t*>(cur4 + i + 2 * 4);
u.w = *reinterpret_cast<const uint32_t*>(cur4 + i + 3 * 4);
if (bitpos != 0) {
uint32_t v = (bitpos != 0) ? *reinterpret_cast<const uint32_t*>(cur4 + i + 4 * 4) : 0;
u.x = __funnelshift_rc(u.x, u.y, bitpos);
u.y = __funnelshift_rc(u.y, u.z, bitpos);
u.z = __funnelshift_rc(u.z, u.w, bitpos);
u.w = __funnelshift_rc(u.w, v, bitpos);
}
*reinterpret_cast<uint4*>(out + i) = u;
}
}
cur += fast_bytes;
out += fast_bytes;
len -= fast_bytes;
// Slow copy for remaining bytes
for (int i = t; i < len; i += blockDim.x) {
if (out + i < outend) {
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
out += len;
__syncthreads();
if (t == 0) {
// Reset bitstream to end of block
auto p = cur + len;
auto prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
s->cur = p;
s->bitbuf.x = (p < s->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
p += 4;
s->bitbuf.y = (p < s->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
s->bitpos = prefix_bytes * 8;
s->out = out;
}
}
#if ENABLE_PREFETCH
__device__ void init_prefetcher(inflate_state_s* s, int t)
{
if (t == 0) {
s->pref.cur_p = s->cur;
s->pref.run = 1;
}
}
__device__ void prefetch_warp(volatile inflate_state_s* s, int t)
{
const uint8_t* cur_p = s->pref.cur_p;
const uint8_t* end = s->end;
while (shuffle((t == 0) ? s->pref.run : 0)) {
auto cur_lo = (int32_t)(size_t)cur_p;
int do_pref =
shuffle((t == 0) ? (cur_lo - *(volatile int32_t*)&s->cur < prefetch_size - 32 * 4 - 4) : 0);
if (do_pref) {
const uint8_t* p = cur_p + 4 * t;
*prefetch_addr32(s->pref, p) = (p < end) ? *reinterpret_cast<const uint32_t*>(p) : 0;
cur_p += 4 * 32;
__threadfence_block();
__syncwarp();
if (!t) {
s->pref.cur_p = cur_p;
__threadfence_block();
}
}
}
}
#endif // ENABLE_PREFETCH
/**
* @brief Parse GZIP header
* See https://tools.ietf.org/html/rfc1952
*/
__device__ int parse_gzip_header(const uint8_t* src, size_t src_size)
{
int hdr_len = -1;
if (src_size >= 18) {
uint32_t sig = (src[0] << 16) | (src[1] << 8) | src[2];
if (sig == 0x1f8b08) // 24-bit GZIP inflate signature {0x1f, 0x8b, 0x08}
{
uint8_t flags = src[3];
hdr_len = 10;
if (flags & GZIPHeaderFlag::fextra) // Extra fields present
{
int xlen = src[hdr_len] | (src[hdr_len + 1] << 8);
hdr_len += xlen;
if (hdr_len >= src_size) return -1;
}
if (flags & GZIPHeaderFlag::fname) // Original file name present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size) return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZIPHeaderFlag::fcomment) // Comment present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size) return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZIPHeaderFlag::fhcrc) // Header CRC present
{
hdr_len += 2;
}
if (hdr_len + 8 >= src_size) hdr_len = -1;
}
}
return hdr_len;
}
/**
* @brief INFLATE decompression kernel
*
* blockDim {block_size,1,1}
*
* @tparam block_size Thread block dimension for this call
* @param inputs Source and destination buffer information per block
* @param outputs Destination buffer information per block
* @param statuses Decompression status buffer per block
* @param parse_hdr If nonzero, indicates that the compressed bitstream includes a GZIP header
*/
template <int block_size>
__global__ void __launch_bounds__(block_size)
inflate_kernel(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<decompress_status> statuses,
gzip_header_included parse_hdr)
{
__shared__ __align__(16) inflate_state_s state_g;
int t = threadIdx.x;
int z = blockIdx.x;
inflate_state_s* state = &state_g;
if (!t) {
auto p = inputs[z].data();
auto src_size = inputs[z].size();
// Parse header if needed
state->err = 0;
if (parse_hdr == gzip_header_included::YES) {
int hdr_len = parse_gzip_header(p, src_size);
src_size = (src_size >= 8) ? src_size - 8 : 0; // ignore footer
if (hdr_len >= 0) {
p += hdr_len;
src_size -= hdr_len;
} else {
state->err = hdr_len;
}
}
// Initialize shared state
state->out = outputs[z].data();
state->outbase = state->out;
state->outend = state->out + outputs[z].size();
state->end = p + src_size;
auto const prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
state->cur = p;
state->bitbuf.x = (p < state->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
p += 4;
state->bitbuf.y = (p < state->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
state->bitpos = prefix_bytes * 8;
}
__syncthreads();
// Main loop decoding blocks
while (!state->err) {
if (!t) {
// Thread0: read last flag, block type and custom huffman tables if any
if (state->cur + (state->bitpos >> 3) >= state->end)
state->err = 2;
else {
state->blast = getbits(state, 1);
state->btype = getbits(state, 2);
if (state->btype == 0)
state->err = init_stored(state);
else if (state->btype == 1)
state->err = init_fixed(state);
else if (state->btype == 2)
state->err = init_dynamic(state);
else
state->err = -1; // Invalid block
}
}
__syncthreads();
if (!state->err && (state->btype == 1 || state->btype == 2)) {
// Initializes lookup tables (block wide)
init_length_lut(state, t);
init_distance_lut(state, t);
#if ENABLE_PREFETCH
// Initialize prefetcher
init_prefetcher(state, t);
#endif
if (t < batch_count) { state->x.batch_len[t] = 0; }
__syncthreads();
// decode data until end-of-block code
if (t < 1 * 32) {
// WARP0: decode variable-length symbols
if (!t) {
// Thread0: decode symbols (single threaded)
decode_symbols(state);
#if ENABLE_PREFETCH
state->pref.run = 0;
#endif
}
} else if (t < 2 * 32) {
// WARP1: perform LZ77 using length and distance codes from WARP0
process_symbols(state, t & 0x1f);
}
#if ENABLE_PREFETCH
else if (t < 3 * 32) {
// WARP2: Prefetcher: prefetch data for WARP0
prefetch_warp(state, t & 0x1f);
}
#endif
// else WARP3: idle
} else if (!state->err && state->btype == 0) {
// Uncompressed block (block-wide memcpy)
copy_stored(state, t);
}
if (state->blast) break;
__syncthreads();
}
__syncthreads();
// Output decompression status and length
if (!t) {
if (state->err == 0 && state->cur + ((state->bitpos + 7) >> 3) > state->end) {
// Read past the end of the input buffer
state->err = 2;
} else if (state->err == 0 && state->out > state->outend) {
// Output buffer too small
state->err = 1;
}
statuses[z].bytes_written = state->out - state->outbase;
statuses[z].status = state->err;
statuses[z].reserved = (int)(state->end - state->cur); // Here mainly for debug purposes
}
}
/**
* @brief Copy a group of buffers
*
* blockDim {1024,1,1}
*
* @param inputs Source and destination information per block
*/
__global__ void __launch_bounds__(1024)
copy_uncompressed_kernel(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs)
{
__shared__ const uint8_t* volatile src_g;
__shared__ uint8_t* volatile dst_g;
__shared__ uint32_t volatile copy_len_g;
uint32_t t = threadIdx.x;
uint32_t z = blockIdx.x;
const uint8_t* src;
uint8_t* dst;
uint32_t len, src_align_bytes, src_align_bits, dst_align_bytes;
if (!t) {
src = inputs[z].data();
dst = outputs[z].data();
len = static_cast<uint32_t>(min(inputs[z].size(), outputs[z].size()));
src_g = src;
dst_g = dst;
copy_len_g = len;
}
__syncthreads();
src = src_g;
dst = dst_g;
len = copy_len_g;
// Align output to 32-bit
dst_align_bytes = 3 & -reinterpret_cast<intptr_t>(dst);
if (dst_align_bytes != 0) {
uint32_t align_len = min(dst_align_bytes, len);
if (t < align_len) { dst[t] = src[t]; }
src += align_len;
dst += align_len;
len -= align_len;
}
src_align_bytes = (uint32_t)(3 & reinterpret_cast<uintptr_t>(src));
src_align_bits = src_align_bytes << 3;
while (len >= 32) {
const auto* src32 = reinterpret_cast<const uint32_t*>(src - src_align_bytes);
uint32_t copy_cnt = min(len >> 2, 1024);
if (t < copy_cnt) {
uint32_t v = src32[t];
if (src_align_bits != 0) { v = __funnelshift_r(v, src32[t + 1], src_align_bits); }
reinterpret_cast<uint32_t*>(dst)[t] = v;
}
src += copy_cnt * 4;
dst += copy_cnt * 4;
len -= copy_cnt * 4;
}
if (t < len) { dst[t] = src[t]; }
}
void gpuinflate(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<decompress_status> statuses,
gzip_header_included parse_hdr,
rmm::cuda_stream_view stream)
{
constexpr int block_size = 128; // Threads per block
if (inputs.size() > 0) {
inflate_kernel<block_size>
<<<inputs.size(), block_size, 0, stream.value()>>>(inputs, outputs, statuses, parse_hdr);
}
}
void gpu_copy_uncompressed_blocks(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
rmm::cuda_stream_view stream)
{
if (inputs.size() > 0) {
copy_uncompressed_kernel<<<inputs.size(), 1024, 0, stream.value()>>>(inputs, outputs);
}
}
} // namespace io
} // namespace cudf
|
b48d1dc678681dc863091ee0e6428943a3e2f52e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
The implementation of this file is based on embLayerNorm plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "layer_norm.cuh"
#include "embed_layer_norm_impl.h"
#include <hip/hip_fp16.h>
using namespace onnxruntime::cuda;
using namespace cub;
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <unsigned TPB>
__global__ void MaskIndexKernelSmall(int sequence_length, const int* mask, int* mask_index) {
using BlockReduce = hipcub::BlockReduce<int, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
// blockIdx.x is b
const int offset = blockIdx.x * sequence_length; // batch strides of sequence_length
hipcub::Min min;
int thread_data(sequence_length);
const int idx = offset + threadIdx.x;
if (threadIdx.x < sequence_length) {
const int val = mask[idx];
if (val == 0) // masked position: report thread idx
{
thread_data = threadIdx.x;
}
}
const auto min_index = BlockReduce(temp_storage).Reduce(thread_data, min);
if (threadIdx.x == 0) {
mask_index[blockIdx.x] = min_index;
}
}
template <unsigned TPB>
__global__ void MaskIndexKernel(int sequence_length, const int* mask, int* mask_index) {
using BlockReduce = hipcub::BlockReduce<int, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
// blockIdx.x is b
const int offset = blockIdx.x * sequence_length; // batch strides of sequence_length
hipcub::Min min;
int thread_data(sequence_length);
for (int i = threadIdx.x; i < sequence_length; i += TPB) {
const int idx = offset + i;
const int val = mask[idx];
if (val == 0) // masked position: report thread idx
{
thread_data = min(thread_data, i);
}
}
const auto min_index = BlockReduce(temp_storage).Reduce(thread_data, min);
if (threadIdx.x == 0) {
mask_index[blockIdx.x] = min_index;
}
}
inline bool ComputeMaskIndex(hipStream_t stream, const int sequence_length, const int batch_size, const int* mask, int* mask_index) {
// Mask idx is of length batch_size and assumes the valid region is contiguous starting
// from the beginning of the sequence
// Assume n = batch_size x sequence_length
if (sequence_length <= 32) {
hipLaunchKernelGGL(( MaskIndexKernelSmall<32>), dim3(batch_size), dim3(32), 0, stream, sequence_length, mask, mask_index);
} else if (sequence_length <= 128) {
hipLaunchKernelGGL(( MaskIndexKernelSmall<128>), dim3(batch_size), dim3(128), 0, stream, sequence_length, mask, mask_index);
} else if (sequence_length == 384) {
hipLaunchKernelGGL(( MaskIndexKernelSmall<384>), dim3(batch_size), dim3(384), 0, stream, sequence_length, mask, mask_index);
} else {
hipLaunchKernelGGL(( MaskIndexKernel<256>), dim3(batch_size), dim3(256), 0, stream, sequence_length, mask, mask_index);
}
return CUDA_CALL(hipPeekAtLastError());
}
template <typename T, unsigned TPB>
__global__ void EmbedLayerNormKernel(
int hidden_size, const int* input_ids, const int* segment_ids, const T* beta, const T* gamma,
const T* word_embedding, const T* position_embedding, const T* segment_embedding,
const T epsilon, T* output) {
KeyValuePairSum pair_sum;
// 1. lookup word and segment of the block
// blockIdx.x = position in the sequence
// blockIdx.y = batch
// gridDim.x = sequence_length
// gridDim.y = batch_size
__shared__ int word_id;
__shared__ int segment_id;
const T rld = T(1.f / hidden_size);
const int sequence_position = blockIdx.y * gridDim.x + blockIdx.x;
if (threadIdx.x == 0) {
word_id = input_ids[sequence_position];
if (nullptr == segment_ids) {
segment_id = 0;
} else {
segment_id = segment_ids[sequence_position];;
}
}
__syncthreads();
// 2. load pos/segment/word embeddings and add them toghether
// offset into embeddings is given by word_id * hidden_size
const int position_offset = blockIdx.x * hidden_size;
const int word_offset = word_id * hidden_size;
const int segment_offset = segment_id * hidden_size;
// the output offset is given by b * (sequence_length * hidden_size) + s * hidden_size
const int output_offset = sequence_position * hidden_size;
hipcub::KeyValuePair<T, T> thread_data(0, 0);
for (int it = threadIdx.x; it < hidden_size; it += TPB) {
const T w(word_embedding[word_offset + it]);
T t(0);
if (nullptr != segment_embedding)
t = segment_embedding[segment_offset + it];
const T p(position_embedding[position_offset + it]);
const T val = w + t + p;
output[output_offset + it] = val;
const T rldval = rld * val;
thread_data = pair_sum(thread_data, hipcub::KeyValuePair<T, T>(rldval, rldval * val));
}
// 3. layer norm on the sum
LayerNorm<T, TPB>(thread_data, hidden_size, output_offset, beta, gamma, epsilon, output);
}
template <typename T>
bool EmbedSkipLayerNorm(
hipStream_t stream, int hidden_size, int batch_size, int sequence_length,
const int* input_ids, const int* segment_ids, const T* beta, const T* gamma,
const T* word_embedding, const T* position_embedding, const T* segment_embedding,
const T epsilon, T* output) {
constexpr int tpb = 256;
const dim3 grid(sequence_length, batch_size, 1);
const dim3 block(tpb, 1, 1);
hipLaunchKernelGGL(( EmbedLayerNormKernel<T, tpb>)
, dim3(grid), dim3(block), 0, stream, hidden_size, input_ids, segment_ids, beta, gamma, word_embedding, position_embedding, segment_embedding, epsilon, output);
return CUDA_CALL(hipPeekAtLastError());
}
bool LaunchEmbedLayerNormKernel(
void* output,
void* mask_index,
const int* input_ids,
const int* segment_ids,
const int* input_mask,
const void* gamma,
const void* beta,
const void* word_embedding,
const void* position_embedding,
const void* segment_embedding,
float epsilon,
const int hidden_size,
int batch_size,
int sequence_length,
const size_t element_size) {
const hipStream_t stream = nullptr; // default stream
if (nullptr == input_mask) {
if (!CUDA_CALL(hipMemsetAsync(mask_index, 0, sizeof(int) * batch_size)))
return false;
} else if (!ComputeMaskIndex(stream, sequence_length, batch_size, input_mask, static_cast<int*>(mask_index))) {
return false;
}
if (element_size == 2) {
return EmbedSkipLayerNorm<half>(
stream, hidden_size, batch_size, sequence_length, input_ids, segment_ids,
reinterpret_cast<const half*>(beta), reinterpret_cast<const half*>(gamma),
reinterpret_cast<const half*>(word_embedding), reinterpret_cast<const half*>(position_embedding),
reinterpret_cast<const half*>(segment_embedding), __float2half_rn(epsilon),
reinterpret_cast<half*>(output));
} else {
return EmbedSkipLayerNorm<float>(
stream, hidden_size, batch_size, sequence_length, input_ids, segment_ids,
reinterpret_cast<const float*>(beta), reinterpret_cast<const float*>(gamma),
reinterpret_cast<const float*>(word_embedding), reinterpret_cast<const float*>(position_embedding),
reinterpret_cast<const float*>(segment_embedding), epsilon,
reinterpret_cast<float*>(output));
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| b48d1dc678681dc863091ee0e6428943a3e2f52e.cu | /*
The implementation of this file is based on embLayerNorm plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "layer_norm.cuh"
#include "embed_layer_norm_impl.h"
#include <cuda_fp16.h>
using namespace onnxruntime::cuda;
using namespace cub;
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <unsigned TPB>
__global__ void MaskIndexKernelSmall(int sequence_length, const int* mask, int* mask_index) {
using BlockReduce = cub::BlockReduce<int, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
// blockIdx.x is b
const int offset = blockIdx.x * sequence_length; // batch strides of sequence_length
cub::Min min;
int thread_data(sequence_length);
const int idx = offset + threadIdx.x;
if (threadIdx.x < sequence_length) {
const int val = mask[idx];
if (val == 0) // masked position: report thread idx
{
thread_data = threadIdx.x;
}
}
const auto min_index = BlockReduce(temp_storage).Reduce(thread_data, min);
if (threadIdx.x == 0) {
mask_index[blockIdx.x] = min_index;
}
}
template <unsigned TPB>
__global__ void MaskIndexKernel(int sequence_length, const int* mask, int* mask_index) {
using BlockReduce = cub::BlockReduce<int, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
// blockIdx.x is b
const int offset = blockIdx.x * sequence_length; // batch strides of sequence_length
cub::Min min;
int thread_data(sequence_length);
for (int i = threadIdx.x; i < sequence_length; i += TPB) {
const int idx = offset + i;
const int val = mask[idx];
if (val == 0) // masked position: report thread idx
{
thread_data = min(thread_data, i);
}
}
const auto min_index = BlockReduce(temp_storage).Reduce(thread_data, min);
if (threadIdx.x == 0) {
mask_index[blockIdx.x] = min_index;
}
}
inline bool ComputeMaskIndex(cudaStream_t stream, const int sequence_length, const int batch_size, const int* mask, int* mask_index) {
// Mask idx is of length batch_size and assumes the valid region is contiguous starting
// from the beginning of the sequence
// Assume n = batch_size x sequence_length
if (sequence_length <= 32) {
MaskIndexKernelSmall<32><<<batch_size, 32, 0, stream>>>(sequence_length, mask, mask_index);
} else if (sequence_length <= 128) {
MaskIndexKernelSmall<128><<<batch_size, 128, 0, stream>>>(sequence_length, mask, mask_index);
} else if (sequence_length == 384) {
MaskIndexKernelSmall<384><<<batch_size, 384, 0, stream>>>(sequence_length, mask, mask_index);
} else {
MaskIndexKernel<256><<<batch_size, 256, 0, stream>>>(sequence_length, mask, mask_index);
}
return CUDA_CALL(cudaPeekAtLastError());
}
template <typename T, unsigned TPB>
__global__ void EmbedLayerNormKernel(
int hidden_size, const int* input_ids, const int* segment_ids, const T* beta, const T* gamma,
const T* word_embedding, const T* position_embedding, const T* segment_embedding,
const T epsilon, T* output) {
KeyValuePairSum pair_sum;
// 1. lookup word and segment of the block
// blockIdx.x = position in the sequence
// blockIdx.y = batch
// gridDim.x = sequence_length
// gridDim.y = batch_size
__shared__ int word_id;
__shared__ int segment_id;
const T rld = T(1.f / hidden_size);
const int sequence_position = blockIdx.y * gridDim.x + blockIdx.x;
if (threadIdx.x == 0) {
word_id = input_ids[sequence_position];
if (nullptr == segment_ids) {
segment_id = 0;
} else {
segment_id = segment_ids[sequence_position];;
}
}
__syncthreads();
// 2. load pos/segment/word embeddings and add them toghether
// offset into embeddings is given by word_id * hidden_size
const int position_offset = blockIdx.x * hidden_size;
const int word_offset = word_id * hidden_size;
const int segment_offset = segment_id * hidden_size;
// the output offset is given by b * (sequence_length * hidden_size) + s * hidden_size
const int output_offset = sequence_position * hidden_size;
cub::KeyValuePair<T, T> thread_data(0, 0);
for (int it = threadIdx.x; it < hidden_size; it += TPB) {
const T w(word_embedding[word_offset + it]);
T t(0);
if (nullptr != segment_embedding)
t = segment_embedding[segment_offset + it];
const T p(position_embedding[position_offset + it]);
const T val = w + t + p;
output[output_offset + it] = val;
const T rldval = rld * val;
thread_data = pair_sum(thread_data, cub::KeyValuePair<T, T>(rldval, rldval * val));
}
// 3. layer norm on the sum
LayerNorm<T, TPB>(thread_data, hidden_size, output_offset, beta, gamma, epsilon, output);
}
template <typename T>
bool EmbedSkipLayerNorm(
cudaStream_t stream, int hidden_size, int batch_size, int sequence_length,
const int* input_ids, const int* segment_ids, const T* beta, const T* gamma,
const T* word_embedding, const T* position_embedding, const T* segment_embedding,
const T epsilon, T* output) {
constexpr int tpb = 256;
const dim3 grid(sequence_length, batch_size, 1);
const dim3 block(tpb, 1, 1);
EmbedLayerNormKernel<T, tpb>
<<<grid, block, 0, stream>>>(hidden_size, input_ids, segment_ids, beta, gamma, word_embedding, position_embedding, segment_embedding, epsilon, output);
return CUDA_CALL(cudaPeekAtLastError());
}
bool LaunchEmbedLayerNormKernel(
void* output,
void* mask_index,
const int* input_ids,
const int* segment_ids,
const int* input_mask,
const void* gamma,
const void* beta,
const void* word_embedding,
const void* position_embedding,
const void* segment_embedding,
float epsilon,
const int hidden_size,
int batch_size,
int sequence_length,
const size_t element_size) {
const cudaStream_t stream = nullptr; // default stream
if (nullptr == input_mask) {
if (!CUDA_CALL(cudaMemsetAsync(mask_index, 0, sizeof(int) * batch_size)))
return false;
} else if (!ComputeMaskIndex(stream, sequence_length, batch_size, input_mask, static_cast<int*>(mask_index))) {
return false;
}
if (element_size == 2) {
return EmbedSkipLayerNorm<half>(
stream, hidden_size, batch_size, sequence_length, input_ids, segment_ids,
reinterpret_cast<const half*>(beta), reinterpret_cast<const half*>(gamma),
reinterpret_cast<const half*>(word_embedding), reinterpret_cast<const half*>(position_embedding),
reinterpret_cast<const half*>(segment_embedding), __float2half_rn(epsilon),
reinterpret_cast<half*>(output));
} else {
return EmbedSkipLayerNorm<float>(
stream, hidden_size, batch_size, sequence_length, input_ids, segment_ids,
reinterpret_cast<const float*>(beta), reinterpret_cast<const float*>(gamma),
reinterpret_cast<const float*>(word_embedding), reinterpret_cast<const float*>(position_embedding),
reinterpret_cast<const float*>(segment_embedding), epsilon,
reinterpret_cast<float*>(output));
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
7e168892ded1116912495d0c959d95188b2f88ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_plus_2_right;
int xdim0_update_halo_kernel2_zvel_plus_2_right_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_plus_2_right;
int ydim0_update_halo_kernel2_zvel_plus_2_right_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_plus_2_right;
int xdim1_update_halo_kernel2_zvel_plus_2_right_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_plus_2_right;
int ydim1_update_halo_kernel2_zvel_plus_2_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_plus_2_right*(y)+xdim0_update_halo_kernel2_zvel_plus_2_right*ydim0_update_halo_kernel2_zvel_plus_2_right*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_plus_2_right*(y)+xdim1_update_halo_kernel2_zvel_plus_2_right*ydim1_update_halo_kernel2_zvel_plus_2_right*(z))
//user function
__device__
inline void update_halo_kernel2_zvel_plus_2_right_gpu(double *zvel0, double *zvel1, const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = zvel0[OPS_ACC0(-2,0,0)];
if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = zvel1[OPS_ACC1(-2,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_plus_2_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_zvel_plus_2_right + idx_z * 1*1 * xdim0_update_halo_kernel2_zvel_plus_2_right * ydim0_update_halo_kernel2_zvel_plus_2_right;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_zvel_plus_2_right + idx_z * 1*1 * xdim1_update_halo_kernel2_zvel_plus_2_right * ydim1_update_halo_kernel2_zvel_plus_2_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_plus_2_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_plus_2_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,55)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(55,"update_halo_kernel2_zvel_plus_2_right");
OPS_kernels[55].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_right_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_right_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_right_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_right_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_plus_2_right, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_zvel_plus_2_right_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_plus_2_right, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_zvel_plus_2_right_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_plus_2_right, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_zvel_plus_2_right_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_plus_2_right, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_zvel_plus_2_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[55].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_2_right), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[55].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[55].mpi_time += t2-t1;
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 55;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 55;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_2_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(55,"update_halo_kernel2_zvel_plus_2_right");
}
ops_enqueue_kernel(desc);
}
#endif
| 7e168892ded1116912495d0c959d95188b2f88ed.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_plus_2_right;
int xdim0_update_halo_kernel2_zvel_plus_2_right_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_plus_2_right;
int ydim0_update_halo_kernel2_zvel_plus_2_right_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_plus_2_right;
int xdim1_update_halo_kernel2_zvel_plus_2_right_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_plus_2_right;
int ydim1_update_halo_kernel2_zvel_plus_2_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_plus_2_right*(y)+xdim0_update_halo_kernel2_zvel_plus_2_right*ydim0_update_halo_kernel2_zvel_plus_2_right*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_plus_2_right*(y)+xdim1_update_halo_kernel2_zvel_plus_2_right*ydim1_update_halo_kernel2_zvel_plus_2_right*(z))
//user function
__device__
inline void update_halo_kernel2_zvel_plus_2_right_gpu(double *zvel0, double *zvel1, const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = zvel0[OPS_ACC0(-2,0,0)];
if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = zvel1[OPS_ACC1(-2,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_plus_2_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_zvel_plus_2_right + idx_z * 1*1 * xdim0_update_halo_kernel2_zvel_plus_2_right * ydim0_update_halo_kernel2_zvel_plus_2_right;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_zvel_plus_2_right + idx_z * 1*1 * xdim1_update_halo_kernel2_zvel_plus_2_right * ydim1_update_halo_kernel2_zvel_plus_2_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_plus_2_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_plus_2_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,55)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(55,"update_halo_kernel2_zvel_plus_2_right");
OPS_kernels[55].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_right_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_right_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_right_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_right_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_plus_2_right, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_zvel_plus_2_right_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_plus_2_right, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_zvel_plus_2_right_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_plus_2_right, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_zvel_plus_2_right_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_plus_2_right, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_zvel_plus_2_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[55].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_zvel_plus_2_right<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[55].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[55].mpi_time += t2-t1;
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 55;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 55;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_2_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(55,"update_halo_kernel2_zvel_plus_2_right");
}
ops_enqueue_kernel(desc);
}
#endif
|
8a543be3620df1a584f6b238e44a162d6513a942.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "decode.h"
#include "stdio.h"
namespace nvinfer1
{
DecodePlugin::DecodePlugin()
{
}
DecodePlugin::~DecodePlugin()
{
}
// create the plugin at runtime from a byte stream
DecodePlugin::DecodePlugin(const void* data, size_t length)
{
}
void DecodePlugin::serialize(void* buffer) const
{
}
size_t DecodePlugin::getSerializationSize() const
{
return 0;
}
int DecodePlugin::initialize()
{
return 0;
}
Dims DecodePlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalCount = 0;
totalCount += decodeplugin::INPUT_H / 8 * decodeplugin::INPUT_W / 8 * 2 * sizeof(decodeplugin::Detection) / sizeof(float);
totalCount += decodeplugin::INPUT_H / 16 * decodeplugin::INPUT_W / 16 * 2 * sizeof(decodeplugin::Detection) / sizeof(float);
totalCount += decodeplugin::INPUT_H / 32 * decodeplugin::INPUT_W / 32 * 2 * sizeof(decodeplugin::Detection) / sizeof(float);
return Dims3(totalCount + 1, 1, 1);
}
// Set plugin namespace
void DecodePlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* DecodePlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType DecodePlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool DecodePlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool DecodePlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void DecodePlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void DecodePlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void DecodePlugin::detachFromContext() {}
const char* DecodePlugin::getPluginType() const
{
return "Decode_TRT";
}
const char* DecodePlugin::getPluginVersion() const
{
return "1";
}
void DecodePlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* DecodePlugin::clone() const
{
DecodePlugin *p = new DecodePlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data){ return 1./(1. + expf(-data)); };
__global__ void CalDetection(const float *input, float *output, int num_elem, int step, int anchor, int output_elem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= num_elem) return;
int h = decodeplugin::INPUT_H / step;
int w = decodeplugin::INPUT_W / step;
int total_grid = h * w;
int bn_idx = idx / total_grid;
idx = idx - bn_idx * total_grid;
int y = idx / w;
int x = idx % w;
const float* cur_input = input + bn_idx * (4 + 2 + 10) * 2 * total_grid;
const float *bbox_reg = &cur_input[0];
const float *cls_reg = &cur_input[2 * 4 * total_grid];
const float *lmk_reg = &cur_input[2 * 4 * total_grid + 2 * 2 * total_grid];
for (int k = 0; k < 2; ++k) {
float conf1 = cls_reg[idx + k * total_grid * 2];
float conf2 = cls_reg[idx + k * total_grid * 2 + total_grid];
conf2 = expf(conf2) / (expf(conf1) + expf(conf2));
if (conf2 <= 0.02) continue;
float *res_count = output + bn_idx * output_elem;
int count = (int)atomicAdd(res_count, 1);
char* data = (char *)res_count + sizeof(float) + count * sizeof(decodeplugin::Detection);
decodeplugin::Detection* det = (decodeplugin::Detection*)(data);
float prior[4];
prior[0] = ((float)x + 0.5) / w;
prior[1] = ((float)y + 0.5) / h;
prior[2] = (float)anchor * (k + 1) / decodeplugin::INPUT_W;
prior[3] = (float)anchor * (k + 1) / decodeplugin::INPUT_H;
//Location
det->bbox[0] = prior[0] + bbox_reg[idx + k * total_grid * 4] * 0.1 * prior[2];
det->bbox[1] = prior[1] + bbox_reg[idx + k * total_grid * 4 + total_grid] * 0.1 * prior[3];
det->bbox[2] = prior[2] * expf(bbox_reg[idx + k * total_grid * 4 + total_grid * 2] * 0.2);
det->bbox[3] = prior[3] * expf(bbox_reg[idx + k * total_grid * 4 + total_grid * 3] * 0.2);
det->bbox[0] -= det->bbox[2] / 2;
det->bbox[1] -= det->bbox[3] / 2;
det->bbox[2] += det->bbox[0];
det->bbox[3] += det->bbox[1];
det->bbox[0] *= decodeplugin::INPUT_W;
det->bbox[1] *= decodeplugin::INPUT_H;
det->bbox[2] *= decodeplugin::INPUT_W;
det->bbox[3] *= decodeplugin::INPUT_H;
det->class_confidence = conf2;
for (int i = 0; i < 10; i += 2) {
det->landmark[i] = prior[0] + lmk_reg[idx + k * total_grid * 10 + total_grid * i] * 0.1 * prior[2];
det->landmark[i+1] = prior[1] + lmk_reg[idx + k * total_grid * 10 + total_grid * (i + 1)] * 0.1 * prior[3];
det->landmark[i] *= decodeplugin::INPUT_W;
det->landmark[i+1] *= decodeplugin::INPUT_H;
}
}
}
void DecodePlugin::forwardGpu(const float *const * inputs, float * output, hipStream_t stream, int batchSize)
{
int num_elem = 0;
int base_step = 8;
int base_anchor = 16;
int thread_count;
int totalCount = 1;
totalCount += decodeplugin::INPUT_H / 8 * decodeplugin::INPUT_W / 8 * 2 * sizeof(decodeplugin::Detection) / sizeof(float);
totalCount += decodeplugin::INPUT_H / 16 * decodeplugin::INPUT_W / 16 * 2 * sizeof(decodeplugin::Detection) / sizeof(float);
totalCount += decodeplugin::INPUT_H / 32 * decodeplugin::INPUT_W / 32 * 2 * sizeof(decodeplugin::Detection) / sizeof(float);
for(int idx = 0 ; idx < batchSize; ++idx) {
hipMemset(output + idx * totalCount, 0, sizeof(float));
}
for (unsigned int i = 0; i < 3; ++i)
{
num_elem = batchSize * decodeplugin::INPUT_H / base_step * decodeplugin::INPUT_W / base_step;
thread_count = (num_elem < thread_count_) ? num_elem : thread_count_;
hipLaunchKernelGGL(( CalDetection), dim3((num_elem + thread_count - 1) / thread_count), dim3(thread_count), 0, 0,
inputs[i], output, num_elem, base_step, base_anchor, totalCount);
base_step *= 2;
base_anchor *= 4;
}
}
int DecodePlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
//GPU
//CUDA_CHECK(hipStreamSynchronize(stream));
forwardGpu((const float *const *)inputs, (float *)outputs[0], stream, batchSize);
return 0;
};
PluginFieldCollection DecodePluginCreator::mFC{};
std::vector<PluginField> DecodePluginCreator::mPluginAttributes;
DecodePluginCreator::DecodePluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* DecodePluginCreator::getPluginName() const
{
return "Decode_TRT";
}
const char* DecodePluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* DecodePluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* DecodePluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
DecodePlugin* obj = new DecodePlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* DecodePluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call PReluPlugin::destroy()
DecodePlugin* obj = new DecodePlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
| 8a543be3620df1a584f6b238e44a162d6513a942.cu | #include "decode.h"
#include "stdio.h"
namespace nvinfer1
{
DecodePlugin::DecodePlugin()
{
}
DecodePlugin::~DecodePlugin()
{
}
// create the plugin at runtime from a byte stream
DecodePlugin::DecodePlugin(const void* data, size_t length)
{
}
void DecodePlugin::serialize(void* buffer) const
{
}
size_t DecodePlugin::getSerializationSize() const
{
return 0;
}
int DecodePlugin::initialize()
{
return 0;
}
Dims DecodePlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalCount = 0;
totalCount += decodeplugin::INPUT_H / 8 * decodeplugin::INPUT_W / 8 * 2 * sizeof(decodeplugin::Detection) / sizeof(float);
totalCount += decodeplugin::INPUT_H / 16 * decodeplugin::INPUT_W / 16 * 2 * sizeof(decodeplugin::Detection) / sizeof(float);
totalCount += decodeplugin::INPUT_H / 32 * decodeplugin::INPUT_W / 32 * 2 * sizeof(decodeplugin::Detection) / sizeof(float);
return Dims3(totalCount + 1, 1, 1);
}
// Set plugin namespace
void DecodePlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* DecodePlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType DecodePlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool DecodePlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool DecodePlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void DecodePlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void DecodePlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void DecodePlugin::detachFromContext() {}
const char* DecodePlugin::getPluginType() const
{
return "Decode_TRT";
}
const char* DecodePlugin::getPluginVersion() const
{
return "1";
}
void DecodePlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* DecodePlugin::clone() const
{
DecodePlugin *p = new DecodePlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data){ return 1./(1. + expf(-data)); };
__global__ void CalDetection(const float *input, float *output, int num_elem, int step, int anchor, int output_elem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= num_elem) return;
int h = decodeplugin::INPUT_H / step;
int w = decodeplugin::INPUT_W / step;
int total_grid = h * w;
int bn_idx = idx / total_grid;
idx = idx - bn_idx * total_grid;
int y = idx / w;
int x = idx % w;
const float* cur_input = input + bn_idx * (4 + 2 + 10) * 2 * total_grid;
const float *bbox_reg = &cur_input[0];
const float *cls_reg = &cur_input[2 * 4 * total_grid];
const float *lmk_reg = &cur_input[2 * 4 * total_grid + 2 * 2 * total_grid];
for (int k = 0; k < 2; ++k) {
float conf1 = cls_reg[idx + k * total_grid * 2];
float conf2 = cls_reg[idx + k * total_grid * 2 + total_grid];
conf2 = expf(conf2) / (expf(conf1) + expf(conf2));
if (conf2 <= 0.02) continue;
float *res_count = output + bn_idx * output_elem;
int count = (int)atomicAdd(res_count, 1);
char* data = (char *)res_count + sizeof(float) + count * sizeof(decodeplugin::Detection);
decodeplugin::Detection* det = (decodeplugin::Detection*)(data);
float prior[4];
prior[0] = ((float)x + 0.5) / w;
prior[1] = ((float)y + 0.5) / h;
prior[2] = (float)anchor * (k + 1) / decodeplugin::INPUT_W;
prior[3] = (float)anchor * (k + 1) / decodeplugin::INPUT_H;
//Location
det->bbox[0] = prior[0] + bbox_reg[idx + k * total_grid * 4] * 0.1 * prior[2];
det->bbox[1] = prior[1] + bbox_reg[idx + k * total_grid * 4 + total_grid] * 0.1 * prior[3];
det->bbox[2] = prior[2] * expf(bbox_reg[idx + k * total_grid * 4 + total_grid * 2] * 0.2);
det->bbox[3] = prior[3] * expf(bbox_reg[idx + k * total_grid * 4 + total_grid * 3] * 0.2);
det->bbox[0] -= det->bbox[2] / 2;
det->bbox[1] -= det->bbox[3] / 2;
det->bbox[2] += det->bbox[0];
det->bbox[3] += det->bbox[1];
det->bbox[0] *= decodeplugin::INPUT_W;
det->bbox[1] *= decodeplugin::INPUT_H;
det->bbox[2] *= decodeplugin::INPUT_W;
det->bbox[3] *= decodeplugin::INPUT_H;
det->class_confidence = conf2;
for (int i = 0; i < 10; i += 2) {
det->landmark[i] = prior[0] + lmk_reg[idx + k * total_grid * 10 + total_grid * i] * 0.1 * prior[2];
det->landmark[i+1] = prior[1] + lmk_reg[idx + k * total_grid * 10 + total_grid * (i + 1)] * 0.1 * prior[3];
det->landmark[i] *= decodeplugin::INPUT_W;
det->landmark[i+1] *= decodeplugin::INPUT_H;
}
}
}
void DecodePlugin::forwardGpu(const float *const * inputs, float * output, cudaStream_t stream, int batchSize)
{
int num_elem = 0;
int base_step = 8;
int base_anchor = 16;
int thread_count;
int totalCount = 1;
totalCount += decodeplugin::INPUT_H / 8 * decodeplugin::INPUT_W / 8 * 2 * sizeof(decodeplugin::Detection) / sizeof(float);
totalCount += decodeplugin::INPUT_H / 16 * decodeplugin::INPUT_W / 16 * 2 * sizeof(decodeplugin::Detection) / sizeof(float);
totalCount += decodeplugin::INPUT_H / 32 * decodeplugin::INPUT_W / 32 * 2 * sizeof(decodeplugin::Detection) / sizeof(float);
for(int idx = 0 ; idx < batchSize; ++idx) {
cudaMemset(output + idx * totalCount, 0, sizeof(float));
}
for (unsigned int i = 0; i < 3; ++i)
{
num_elem = batchSize * decodeplugin::INPUT_H / base_step * decodeplugin::INPUT_W / base_step;
thread_count = (num_elem < thread_count_) ? num_elem : thread_count_;
CalDetection<<< (num_elem + thread_count - 1) / thread_count, thread_count>>>
(inputs[i], output, num_elem, base_step, base_anchor, totalCount);
base_step *= 2;
base_anchor *= 4;
}
}
int DecodePlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
//GPU
//CUDA_CHECK(cudaStreamSynchronize(stream));
forwardGpu((const float *const *)inputs, (float *)outputs[0], stream, batchSize);
return 0;
};
PluginFieldCollection DecodePluginCreator::mFC{};
std::vector<PluginField> DecodePluginCreator::mPluginAttributes;
DecodePluginCreator::DecodePluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* DecodePluginCreator::getPluginName() const
{
return "Decode_TRT";
}
const char* DecodePluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* DecodePluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* DecodePluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
DecodePlugin* obj = new DecodePlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* DecodePluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call PReluPlugin::destroy()
DecodePlugin* obj = new DecodePlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
5ea3f19d8ada5e768a5b3aa94a62baecc40a3d7e.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/partition.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
// TODO: clean up any extra device memory you created
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// TODO: implement antialiasing by jittering the ray
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
//float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
//pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
float lightTerm = glm::dot(intersection.surfaceNormal, -pathSegments[idx].ray.direction);
//pathSegments[idx].color *= (materialColor * lightTerm);
pathSegments[idx].color *= (materialColor);
//pathSegments[idx].color *= u01(rng); // apply some noise because why not
scatterRay(pathSegments[idx], pathSegments[idx].ray.origin + intersection.t * pathSegments[idx].ray.direction,
intersection.surfaceNormal, material, rng);
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
} else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
struct is_path_end {
__host__ __device__
bool operator()(const PathSegment& path) {
return (path.remainingBounces > 0);
}
};
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
hipLaunchKernelGGL(( generateRayFromCamera) , dim3(blocksPerGrid2d), dim3(blockSize2d) , 0, 0, cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( computeIntersections) , dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
depth++;
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
hipLaunchKernelGGL(( shadeFakeMaterial), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
hipDeviceSynchronize();
dev_path_end = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, is_path_end());
num_paths = dev_path_end - dev_paths;
if (num_paths <= 0) {
iterationComplete = true; // TODO: should be based off stream compaction results.
}
else {
}
//iterationComplete = true;
}
num_paths = pixelcount;
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( finalGather), dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, num_paths, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
| 5ea3f19d8ada5e768a5b3aa94a62baecc40a3d7e.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/partition.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static Scene * hst_scene = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// TODO: initialize any extra device memeory you need
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
// TODO: clean up any extra device memory you created
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
// TODO: implement antialiasing by jittering the ray
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
// TODO:
// computeIntersections handles generating ray intersections ONLY.
// Generating new rays is handled in your shader(s).
// Feel free to modify the code below.
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// TODO: add more intersection tests here... triangle? metaball? CSG?
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
// LOOK: "fake" shader demonstrating what you might do with the info in
// a ShadeableIntersection, as well as how to use thrust's random number
// generator. Observe that since the thrust random number generator basically
// adds "noise" to the iteration, the image should start off noisy and get
// cleaner as more iterations are computed.
//
// Note that this shader does NOT do a BSDF evaluation!
// Your shaders should handle that - this can allow techniques such as
// bump mapping.
__global__ void shadeFakeMaterial (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
if (intersection.t > 0.0f) { // if the intersection exists...
// Set up the RNG
// LOOK: this is how you use thrust's RNG! Please look at
// makeSeededRandomEngine as well.
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0);
thrust::uniform_real_distribution<float> u01(0, 1);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
pathSegments[idx].color *= (materialColor * material.emittance);
pathSegments[idx].remainingBounces = 0;
}
// Otherwise, do some pseudo-lighting computation. This is actually more
// like what you would expect from shading in a rasterizer like OpenGL.
// TODO: replace this! you should be able to start with basically a one-liner
else {
//float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f));
//pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f;
float lightTerm = glm::dot(intersection.surfaceNormal, -pathSegments[idx].ray.direction);
//pathSegments[idx].color *= (materialColor * lightTerm);
pathSegments[idx].color *= (materialColor);
//pathSegments[idx].color *= u01(rng); // apply some noise because why not
scatterRay(pathSegments[idx], pathSegments[idx].ray.origin + intersection.t * pathSegments[idx].ray.direction,
intersection.surfaceNormal, material, rng);
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
} else {
pathSegments[idx].color = glm::vec3(0.0f);
pathSegments[idx].remainingBounces = 0;
}
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
struct is_path_end {
__host__ __device__
bool operator()(const PathSegment& path) {
return (path.remainingBounces > 0);
}
};
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(uchar4 *pbo, int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * TODO: Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * TODO: Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally, add this iteration's results to the image. This has been done
// for you.
// TODO: perform one iteration of path tracing
generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
bool iterationComplete = false;
while (!iterationComplete) {
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
computeIntersections <<<numblocksPathSegmentTracing, blockSize1d>>> (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
depth++;
// TODO:
// --- Shading Stage ---
// Shade path segments based on intersections and generate new rays by
// evaluating the BSDF.
// Start off with just a big kernel that handles all the different
// materials you have in the scenefile.
// TODO: compare between directly shading the path segments and shading
// path segments that have been reshuffled to be contiguous in memory.
shadeFakeMaterial<<<numblocksPathSegmentTracing, blockSize1d>>> (
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
cudaDeviceSynchronize();
dev_path_end = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, is_path_end());
num_paths = dev_path_end - dev_paths;
if (num_paths <= 0) {
iterationComplete = true; // TODO: should be based off stream compaction results.
}
else {
}
//iterationComplete = true;
}
num_paths = pixelcount;
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather<<<numBlocksPixels, blockSize1d>>>(num_paths, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
// Send results to OpenGL buffer for rendering
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image);
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
|
c524730f8cf0b8ea6d85f811d29b24ae1563b3c1.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Copyright (c) 2022 NVIDIA Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/fused/fused_gemm_epilogue_op.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/framework/scope_guard.h"
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/dynload/cublasLt.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
using Tensor = phi::DenseTensor;
template <typename DeviceContext, typename T>
class FusedGemmEpilogueKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
const phi::DenseTensor* x = ctx.Input<phi::DenseTensor>("X");
const phi::DenseTensor* y = ctx.Input<phi::DenseTensor>("Y");
const phi::DenseTensor* bias = ctx.Input<phi::DenseTensor>("Bias");
phi::DenseTensor* out = ctx.Output<phi::DenseTensor>("Out");
phi::DenseTensor* reserve_space =
ctx.Output<phi::DenseTensor>("ReserveSpace");
bool trans_x = ctx.Attr<bool>("trans_x");
bool trans_y = ctx.Attr<bool>("trans_y");
std::string activation = ctx.Attr<std::string>("activation");
VLOG(10) << "trans_x = " << trans_x << " , trans_y = " << trans_y
<< " , activation = " << activation;
bool enable_auxiliary = reserve_space == nullptr ? false : true;
dev_ctx.Alloc<T>(out, out->numel() * sizeof(T));
auto* out_data = out->data<T>();
auto x_mat_dims =
phi::flatten_to_2d(x->dims(), trans_x ? 1 : x->dims().size() - 1);
// (M * K) * (K * N)
int64_t M = trans_x ? x_mat_dims[1] : x_mat_dims[0];
int64_t K = trans_y ? y->dims()[1] : y->dims()[0];
int64_t N = trans_y ? y->dims()[0] : y->dims()[1];
hipDataType mat_type = HIP_R_32F;
hipDataType scale_type = HIP_R_32F;
hipblasComputeType_t compute_type = CUBLAS_COMPUTE_32F;
if (std::is_same<T, paddle::platform::float16>::value) {
mat_type = HIP_R_16F;
}
if (std::is_same<T, platform::bfloat16>::value) {
mat_type = CUDA_R_16BF;
}
if (std::is_same<T, double>::value) {
mat_type = HIP_R_64F;
scale_type = HIP_R_64F;
compute_type = CUBLAS_COMPUTE_64F;
}
cublasLtMatmulDesc_t operation_desc = NULL;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&operation_desc, compute_type, scale_type));
hipblasOperation_t transx = trans_x ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasOperation_t transy = trans_y ? HIPBLAS_OP_T : HIPBLAS_OP_N;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_TRANSB,
&transx,
sizeof(transx)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_TRANSA,
&transy,
sizeof(transy)));
cublasLtEpilogue_t epiloque_func =
get_epilogue_type_(activation, enable_auxiliary);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func,
sizeof(epiloque_func)));
const T* bias_data = bias->data<T>();
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_BIAS_POINTER,
&bias_data,
sizeof(bias_data)));
if (enable_auxiliary && activation != "none") {
size_t reserve_space_size = 0;
if (activation == "relu") {
// Count in bits.
reserve_space_size = phi::product(out->dims()) / 8;
} else {
reserve_space_size = phi::product(out->dims()) * sizeof(T);
}
dev_ctx.Alloc(reserve_space, out->type(), reserve_space_size);
void* aux_data = reinterpret_cast<void*>(reserve_space->data<T>());
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&aux_data,
sizeof(aux_data)));
int64_t aux_ld = N;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD,
&aux_ld,
sizeof(aux_ld)));
}
cublasLtMatrixLayout_t x_desc = NULL, y_desc = NULL, out_desc = NULL;
if (trans_x)
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&x_desc, mat_type, M, K, M));
else
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&x_desc, mat_type, K, M, K));
if (trans_y)
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&y_desc, mat_type, K, N, K));
else
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&y_desc, mat_type, N, K, N));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&out_desc, mat_type, N, M, N));
cublasLtHandle_t lt_handle = dev_ctx.cublaslt_handle();
// NOTE(zengjinle): I do not know whether the 4MB workspace size is
// "enough". I just followed the settings from the NVIDIA MLPerf BERT code.
size_t workspace_size = static_cast<size_t>(4) * 1024 * 1024;
hipStream_t stream = dev_ctx.stream();
memory::allocation::AllocationPtr workspace = memory::Alloc(
dev_ctx.GetPlace(),
workspace_size,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
double alpha64 = 1.0, beta64 = 0.0;
float alpha32 = 1.0f, beta32 = 0.0f;
void *alpha = nullptr, *beta = nullptr;
if (std::is_same<T, double>::value) {
alpha = &alpha64;
beta = &beta64;
} else {
alpha = &alpha32;
beta = &beta32;
}
const auto* y_data = y->data<T>();
const auto* x_data = x->data<T>();
auto algo = GemmEpilogueAlgoCache::Instance().GetGemmAlgo(lt_handle,
operation_desc,
y_desc,
x_desc,
out_desc,
alpha,
beta,
y_data,
x_data,
out_data,
stream,
workspace->ptr(),
workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmul(lt_handle,
operation_desc,
alpha,
y_data,
y_desc,
x_data,
x_desc,
beta,
out_data,
out_desc,
out_data,
out_desc,
algo,
workspace->ptr(),
workspace_size,
stream));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(operation_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(y_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(x_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(out_desc));
}
private:
static cublasLtEpilogue_t get_epilogue_type_(const std::string& activation,
bool enable_auxiliary) {
if (activation == "relu") {
return enable_auxiliary ? CUBLASLT_EPILOGUE_RELU_AUX_BIAS
: CUBLASLT_EPILOGUE_RELU_BIAS;
} else if (activation == "gelu") {
return enable_auxiliary ? CUBLASLT_EPILOGUE_GELU_AUX_BIAS
: CUBLASLT_EPILOGUE_GELU_BIAS;
} else if (activation == "none") {
return CUBLASLT_EPILOGUE_BIAS;
} else {
PADDLE_ENFORCE_EQ(
true,
false,
platform::errors::InvalidArgument(
"The activation attribute of fused_gemm_epilogue op should be"
" one of {\"none\", \"relu\", \"gelu\"}. But received %s."
"But received activation=%s.",
activation));
}
}
};
enum FusedGEMMGradInType { kDX = 0, kDY = 1, kDZ = 2 };
template <bool TransX, bool TransY>
struct FusedGEMMGradTrait;
template <>
struct FusedGEMMGradTrait<false, false> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradB = FusedGEMMGradInType::kDY;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDX;
static constexpr auto kYGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<true, false> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDY;
static constexpr auto kXGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDX;
static constexpr auto kYGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradATrans = false;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<false, true> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradB = FusedGEMMGradInType::kDY;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = false;
static constexpr auto kYGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradB = FusedGEMMGradInType::kDX;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<true, true> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDY;
static constexpr auto kXGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradATrans = true;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradB = FusedGEMMGradInType::kDX;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = true;
};
static constexpr auto BoolToCuBlasEnum(bool transpose) {
return transpose ? HIPBLAS_OP_T : HIPBLAS_OP_N;
}
template <typename DeviceContext, typename T>
class FusedGemmEpilogueGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
bool transpose_x = ctx.Attr<bool>("trans_x");
bool transpose_y = ctx.Attr<bool>("trans_y");
if (transpose_x) {
if (transpose_y) {
ComputeImpl<true, true>(ctx);
} else {
ComputeImpl<true, false>(ctx);
}
} else {
if (transpose_y) {
ComputeImpl<false, true>(ctx);
} else {
ComputeImpl<false, false>(ctx);
}
}
}
private:
template <bool TransX, bool TransY>
static void ComputeImpl(const framework::ExecutionContext& ctx) {
using Trait = FusedGEMMGradTrait<TransX, TransY>;
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
const phi::DenseTensor* dout = ctx.Input<phi::DenseTensor>("DOut");
const phi::DenseTensor* x = ctx.Input<phi::DenseTensor>("X");
const phi::DenseTensor* y = ctx.Input<phi::DenseTensor>("Y");
const phi::DenseTensor* reserve_space =
ctx.Input<phi::DenseTensor>("ReserveSpace");
phi::DenseTensor* dx = ctx.Output<phi::DenseTensor>("DX");
phi::DenseTensor* dy = ctx.Output<phi::DenseTensor>("DY");
phi::DenseTensor* dbias = ctx.Output<phi::DenseTensor>("DBias");
std::string activation_grad = ctx.Attr<std::string>("activation_grad");
VLOG(10) << "trans_x = " << TransX << " , trans_y = " << TransY
<< " , activation_grad = " << activation_grad;
auto x_mat_dims =
phi::flatten_to_2d(x->dims(), TransX ? 1 : x->dims().size() - 1);
// (M * K) * (K * N)
int64_t M = TransX ? x_mat_dims[1] : x_mat_dims[0];
int64_t K = TransY ? y->dims()[1] : y->dims()[0];
int64_t N = TransY ? y->dims()[0] : y->dims()[1];
VLOG(10) << "M = " << M << " , K = " << K << " , N = " << N;
hipDataType mat_type = HIP_R_32F;
hipDataType scale_type = HIP_R_32F;
hipblasComputeType_t compute_type = CUBLAS_COMPUTE_32F;
if (std::is_same<T, paddle::platform::float16>::value) {
mat_type = HIP_R_16F;
}
if (std::is_same<T, platform::bfloat16>::value) {
mat_type = CUDA_R_16BF;
}
if (std::is_same<T, double>::value) {
mat_type = HIP_R_64F;
scale_type = HIP_R_64F;
compute_type = CUBLAS_COMPUTE_64F;
}
cublasLtHandle_t lt_handle = dev_ctx.cublaslt_handle();
// NOTE(zengjinle): I do not know whether the 4MB workspace size is
// "enough". I just followed the settings from the NVIDIA MLPerf BERT code.
size_t workspace_size = static_cast<size_t>(4) * 1024 * 1024;
const cublasLtMatmulAlgo_t* algo = nullptr;
hipStream_t stream = dev_ctx.stream();
double alpha64 = 1.0, beta64 = 0.0;
float alpha32 = 1.0f, beta32 = 0.0f;
void *alpha = nullptr, *beta = nullptr;
if (std::is_same<T, double>::value) {
alpha = &alpha64;
beta = &beta64;
} else {
alpha = &alpha32;
beta = &beta32;
}
cublasLtMatrixLayout_t dout_desc = nullptr, dout_trans_desc = nullptr;
cublasLtMatrixLayout_t x_desc = nullptr, x_trans_desc = nullptr;
cublasLtMatrixLayout_t y_desc = nullptr, y_trans_desc = nullptr;
cublasLtMatrixLayout_t dx_desc = nullptr, dy_desc = nullptr;
cublasLtMatmulDesc_t dx_operation_desc = nullptr,
dy_operation_desc = nullptr;
DEFINE_PADDLE_SCOPE_GUARD([&] {
auto descs = {dout_desc,
dout_trans_desc,
x_desc,
x_trans_desc,
y_desc,
y_trans_desc,
dx_desc,
dy_desc};
for (auto desc : descs) {
if (desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(desc));
}
}
if (dx_operation_desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(dx_operation_desc));
}
if (dy_operation_desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(dy_operation_desc));
}
});
auto x_row = TransX ? K : M;
auto x_col = TransX ? M : K;
auto y_row = TransY ? N : K;
auto y_col = TransY ? K : N;
auto z_row = TransX ? N : M;
auto z_col = TransX ? M : N;
// dx = func(dout, y)
if (dx) {
constexpr auto kXGradAIsDZ = (Trait::kXGradA == FusedGEMMGradInType::kDZ);
cublasLtMatrixLayout_t *dx_dout_desc, *dx_y_desc;
if (TransX) {
dx_dout_desc = &dout_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dx_dout_desc, mat_type, z_row, z_col, z_row));
} else {
dx_dout_desc = &dout_desc;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dx_dout_desc, mat_type, z_col, z_row, z_col));
}
dx_y_desc = &y_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
dx_y_desc, mat_type, y_col, y_row, y_col));
auto& a_desc = kXGradAIsDZ ? (*dx_dout_desc) : (*dx_y_desc);
auto& b_desc = kXGradAIsDZ ? (*dx_y_desc) : (*dx_dout_desc);
auto a_trans = BoolToCuBlasEnum(Trait::kXGradATrans);
auto b_trans = BoolToCuBlasEnum(Trait::kXGradBTrans);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&dx_desc, mat_type, x_col, x_row, x_col));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&dx_operation_desc, compute_type, scale_type));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSB,
&a_trans,
sizeof(a_trans)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSA,
&b_trans,
sizeof(b_trans)));
cublasLtEpilogue_t epiloque_func_for_dx =
get_epilogue_type_(activation_grad);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func_for_dx,
sizeof(epiloque_func_for_dx)));
if (activation_grad != "none") {
auto* aux_data = reserve_space->data<T>();
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&aux_data,
sizeof(aux_data)));
int64_t aux_ld = TransX ? M : K;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD,
&aux_ld,
sizeof(aux_ld)));
}
auto dx_workspace = memory::Alloc(
dev_ctx.GetPlace(),
workspace_size,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
auto* dx_data = dev_ctx.Alloc<T>(dx, dx->numel() * sizeof(T));
const auto* y_data = y->data<T>();
const auto* dout_data = dout->data<T>();
const auto* a_data = kXGradAIsDZ ? dout_data : y_data;
const auto* b_data = kXGradAIsDZ ? y_data : dout_data;
auto algo =
GemmEpilogueAlgoCache::Instance().GetGemmAlgo(lt_handle,
dx_operation_desc,
b_desc,
a_desc,
dx_desc,
alpha,
beta,
b_data,
a_data,
dx_data,
stream,
dx_workspace->ptr(),
workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmul(lt_handle,
dx_operation_desc,
alpha,
b_data,
b_desc,
a_data,
a_desc,
beta,
dx_data,
dx_desc,
dx_data,
dx_desc,
algo,
dx_workspace->ptr(),
workspace_size,
stream));
}
// dy = func(dout, x)
if (dy) {
constexpr auto kYGradAIsDZ = (Trait::kYGradA == FusedGEMMGradInType::kDZ);
cublasLtMatrixLayout_t *dy_dout_desc = nullptr, *dy_x_desc = nullptr;
if (TransX) {
dy_dout_desc = &dout_trans_desc;
if (dout_trans_desc == nullptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dy_dout_desc, mat_type, z_row, z_col, z_row));
}
} else {
dy_dout_desc = &dout_desc;
if (dout_desc == nullptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dy_dout_desc, mat_type, z_col, z_row, z_col));
}
}
dy_x_desc = &x_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
dy_x_desc, mat_type, x_col, x_row, x_col));
auto& a_desc = kYGradAIsDZ ? (*dy_dout_desc) : (*dy_x_desc);
auto& b_desc = kYGradAIsDZ ? (*dy_x_desc) : (*dy_dout_desc);
auto a_trans = BoolToCuBlasEnum(Trait::kYGradATrans);
auto b_trans = BoolToCuBlasEnum(Trait::kYGradBTrans);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&dy_desc, mat_type, y_col, y_row, y_col));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&dy_operation_desc, compute_type, scale_type));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSB,
&a_trans,
sizeof(a_trans)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSA,
&b_trans,
sizeof(b_trans)));
cublasLtEpilogue_t epiloque_func_for_dy;
if (dbias == nullptr) {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_DEFAULT;
} else {
if (TransY) {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_BGRADB;
} else {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_BGRADA;
}
}
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func_for_dy,
sizeof(epiloque_func_for_dy)));
if (dbias) {
auto* dbias_data = dev_ctx.Alloc<T>(dbias, dbias->numel() * sizeof(T));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_BIAS_POINTER,
&dbias_data,
sizeof(dbias_data)));
}
auto dy_workspace = memory::Alloc(
dev_ctx.GetPlace(),
workspace_size,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
auto* dy_data = dev_ctx.Alloc<T>(dy, dy->numel() * sizeof(T));
const auto* dout_data = dout->data<T>();
const auto* x_data = x->data<T>();
const auto* a_data = kYGradAIsDZ ? dout_data : x_data;
const auto* b_data = kYGradAIsDZ ? x_data : dout_data;
auto algo =
GemmEpilogueAlgoCache::Instance().GetGemmAlgo(lt_handle,
dy_operation_desc,
b_desc,
a_desc,
dy_desc,
alpha,
beta,
b_data,
a_data,
dy_data,
stream,
dy_workspace->ptr(),
workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmul(lt_handle,
dy_operation_desc,
alpha,
b_data,
b_desc,
a_data,
a_desc,
beta,
dy_data,
dy_desc,
dy_data,
dy_desc,
algo,
dy_workspace->ptr(),
workspace_size,
stream));
}
}
private:
static cublasLtEpilogue_t get_epilogue_type_(
const std::string& activation_grad) {
if (activation_grad == "relu_grad") {
return CUBLASLT_EPILOGUE_DRELU;
} else if (activation_grad == "gelu_grad") {
return CUBLASLT_EPILOGUE_DGELU;
} else if (activation_grad == "none") {
return CUBLASLT_EPILOGUE_DEFAULT;
} else {
PADDLE_ENFORCE_EQ(
true,
false,
platform::errors::InvalidArgument(
"The activation_grad attribute of fused_gemm_epilogue op should "
"be"
" one of {\"none\", \"relu\", \"gelu\"}. But received %s."
"But received activation_grad=%s.",
activation_grad));
}
}
};
} // namespace operators
} // namespace paddle
#if TORCH_HIP_VERSION >= 11060
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
fused_gemm_epilogue,
ops::FusedGemmEpilogueKernel<phi::GPUContext, float>,
ops::FusedGemmEpilogueKernel<phi::GPUContext, double>,
ops::FusedGemmEpilogueKernel<phi::GPUContext, paddle::platform::float16>,
ops::FusedGemmEpilogueKernel<phi::GPUContext, paddle::platform::bfloat16>);
REGISTER_OP_CUDA_KERNEL(
fused_gemm_epilogue_grad,
ops::FusedGemmEpilogueGradKernel<phi::GPUContext, float>,
ops::FusedGemmEpilogueGradKernel<phi::GPUContext, double>,
ops::FusedGemmEpilogueGradKernel<phi::GPUContext,
paddle::platform::float16>,
ops::FusedGemmEpilogueKernel<phi::GPUContext, paddle::platform::bfloat16>);
#endif
| c524730f8cf0b8ea6d85f811d29b24ae1563b3c1.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Copyright (c) 2022 NVIDIA Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/fused/fused_gemm_epilogue_op.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/framework/scope_guard.h"
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/dynload/cublasLt.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
using Tensor = phi::DenseTensor;
template <typename DeviceContext, typename T>
class FusedGemmEpilogueKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
const phi::DenseTensor* x = ctx.Input<phi::DenseTensor>("X");
const phi::DenseTensor* y = ctx.Input<phi::DenseTensor>("Y");
const phi::DenseTensor* bias = ctx.Input<phi::DenseTensor>("Bias");
phi::DenseTensor* out = ctx.Output<phi::DenseTensor>("Out");
phi::DenseTensor* reserve_space =
ctx.Output<phi::DenseTensor>("ReserveSpace");
bool trans_x = ctx.Attr<bool>("trans_x");
bool trans_y = ctx.Attr<bool>("trans_y");
std::string activation = ctx.Attr<std::string>("activation");
VLOG(10) << "trans_x = " << trans_x << " , trans_y = " << trans_y
<< " , activation = " << activation;
bool enable_auxiliary = reserve_space == nullptr ? false : true;
dev_ctx.Alloc<T>(out, out->numel() * sizeof(T));
auto* out_data = out->data<T>();
auto x_mat_dims =
phi::flatten_to_2d(x->dims(), trans_x ? 1 : x->dims().size() - 1);
// (M * K) * (K * N)
int64_t M = trans_x ? x_mat_dims[1] : x_mat_dims[0];
int64_t K = trans_y ? y->dims()[1] : y->dims()[0];
int64_t N = trans_y ? y->dims()[0] : y->dims()[1];
cudaDataType_t mat_type = CUDA_R_32F;
cudaDataType_t scale_type = CUDA_R_32F;
cublasComputeType_t compute_type = CUBLAS_COMPUTE_32F;
if (std::is_same<T, paddle::platform::float16>::value) {
mat_type = CUDA_R_16F;
}
if (std::is_same<T, platform::bfloat16>::value) {
mat_type = CUDA_R_16BF;
}
if (std::is_same<T, double>::value) {
mat_type = CUDA_R_64F;
scale_type = CUDA_R_64F;
compute_type = CUBLAS_COMPUTE_64F;
}
cublasLtMatmulDesc_t operation_desc = NULL;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&operation_desc, compute_type, scale_type));
cublasOperation_t transx = trans_x ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t transy = trans_y ? CUBLAS_OP_T : CUBLAS_OP_N;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_TRANSB,
&transx,
sizeof(transx)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_TRANSA,
&transy,
sizeof(transy)));
cublasLtEpilogue_t epiloque_func =
get_epilogue_type_(activation, enable_auxiliary);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func,
sizeof(epiloque_func)));
const T* bias_data = bias->data<T>();
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_BIAS_POINTER,
&bias_data,
sizeof(bias_data)));
if (enable_auxiliary && activation != "none") {
size_t reserve_space_size = 0;
if (activation == "relu") {
// Count in bits.
reserve_space_size = phi::product(out->dims()) / 8;
} else {
reserve_space_size = phi::product(out->dims()) * sizeof(T);
}
dev_ctx.Alloc(reserve_space, out->type(), reserve_space_size);
void* aux_data = reinterpret_cast<void*>(reserve_space->data<T>());
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&aux_data,
sizeof(aux_data)));
int64_t aux_ld = N;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD,
&aux_ld,
sizeof(aux_ld)));
}
cublasLtMatrixLayout_t x_desc = NULL, y_desc = NULL, out_desc = NULL;
if (trans_x)
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&x_desc, mat_type, M, K, M));
else
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&x_desc, mat_type, K, M, K));
if (trans_y)
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&y_desc, mat_type, K, N, K));
else
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&y_desc, mat_type, N, K, N));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&out_desc, mat_type, N, M, N));
cublasLtHandle_t lt_handle = dev_ctx.cublaslt_handle();
// NOTE(zengjinle): I do not know whether the 4MB workspace size is
// "enough". I just followed the settings from the NVIDIA MLPerf BERT code.
size_t workspace_size = static_cast<size_t>(4) * 1024 * 1024;
cudaStream_t stream = dev_ctx.stream();
memory::allocation::AllocationPtr workspace = memory::Alloc(
dev_ctx.GetPlace(),
workspace_size,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
double alpha64 = 1.0, beta64 = 0.0;
float alpha32 = 1.0f, beta32 = 0.0f;
void *alpha = nullptr, *beta = nullptr;
if (std::is_same<T, double>::value) {
alpha = &alpha64;
beta = &beta64;
} else {
alpha = &alpha32;
beta = &beta32;
}
const auto* y_data = y->data<T>();
const auto* x_data = x->data<T>();
auto algo = GemmEpilogueAlgoCache::Instance().GetGemmAlgo(lt_handle,
operation_desc,
y_desc,
x_desc,
out_desc,
alpha,
beta,
y_data,
x_data,
out_data,
stream,
workspace->ptr(),
workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmul(lt_handle,
operation_desc,
alpha,
y_data,
y_desc,
x_data,
x_desc,
beta,
out_data,
out_desc,
out_data,
out_desc,
algo,
workspace->ptr(),
workspace_size,
stream));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(operation_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(y_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(x_desc));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(out_desc));
}
private:
static cublasLtEpilogue_t get_epilogue_type_(const std::string& activation,
bool enable_auxiliary) {
if (activation == "relu") {
return enable_auxiliary ? CUBLASLT_EPILOGUE_RELU_AUX_BIAS
: CUBLASLT_EPILOGUE_RELU_BIAS;
} else if (activation == "gelu") {
return enable_auxiliary ? CUBLASLT_EPILOGUE_GELU_AUX_BIAS
: CUBLASLT_EPILOGUE_GELU_BIAS;
} else if (activation == "none") {
return CUBLASLT_EPILOGUE_BIAS;
} else {
PADDLE_ENFORCE_EQ(
true,
false,
platform::errors::InvalidArgument(
"The activation attribute of fused_gemm_epilogue op should be"
" one of {\"none\", \"relu\", \"gelu\"}. But received %s."
"But received activation=%s.",
activation));
}
}
};
enum FusedGEMMGradInType { kDX = 0, kDY = 1, kDZ = 2 };
template <bool TransX, bool TransY>
struct FusedGEMMGradTrait;
template <>
struct FusedGEMMGradTrait<false, false> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradB = FusedGEMMGradInType::kDY;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDX;
static constexpr auto kYGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<true, false> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDY;
static constexpr auto kXGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDX;
static constexpr auto kYGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradATrans = false;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<false, true> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradB = FusedGEMMGradInType::kDY;
static constexpr auto kXGradATrans = false;
static constexpr auto kXGradBTrans = false;
static constexpr auto kYGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradB = FusedGEMMGradInType::kDX;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = false;
};
template <>
struct FusedGEMMGradTrait<true, true> {
static constexpr auto kXGradA = FusedGEMMGradInType::kDY;
static constexpr auto kXGradB = FusedGEMMGradInType::kDZ;
static constexpr auto kXGradATrans = true;
static constexpr auto kXGradBTrans = true;
static constexpr auto kYGradA = FusedGEMMGradInType::kDZ;
static constexpr auto kYGradB = FusedGEMMGradInType::kDX;
static constexpr auto kYGradATrans = true;
static constexpr auto kYGradBTrans = true;
};
static constexpr auto BoolToCuBlasEnum(bool transpose) {
return transpose ? CUBLAS_OP_T : CUBLAS_OP_N;
}
template <typename DeviceContext, typename T>
class FusedGemmEpilogueGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
bool transpose_x = ctx.Attr<bool>("trans_x");
bool transpose_y = ctx.Attr<bool>("trans_y");
if (transpose_x) {
if (transpose_y) {
ComputeImpl<true, true>(ctx);
} else {
ComputeImpl<true, false>(ctx);
}
} else {
if (transpose_y) {
ComputeImpl<false, true>(ctx);
} else {
ComputeImpl<false, false>(ctx);
}
}
}
private:
template <bool TransX, bool TransY>
static void ComputeImpl(const framework::ExecutionContext& ctx) {
using Trait = FusedGEMMGradTrait<TransX, TransY>;
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
const phi::DenseTensor* dout = ctx.Input<phi::DenseTensor>("DOut");
const phi::DenseTensor* x = ctx.Input<phi::DenseTensor>("X");
const phi::DenseTensor* y = ctx.Input<phi::DenseTensor>("Y");
const phi::DenseTensor* reserve_space =
ctx.Input<phi::DenseTensor>("ReserveSpace");
phi::DenseTensor* dx = ctx.Output<phi::DenseTensor>("DX");
phi::DenseTensor* dy = ctx.Output<phi::DenseTensor>("DY");
phi::DenseTensor* dbias = ctx.Output<phi::DenseTensor>("DBias");
std::string activation_grad = ctx.Attr<std::string>("activation_grad");
VLOG(10) << "trans_x = " << TransX << " , trans_y = " << TransY
<< " , activation_grad = " << activation_grad;
auto x_mat_dims =
phi::flatten_to_2d(x->dims(), TransX ? 1 : x->dims().size() - 1);
// (M * K) * (K * N)
int64_t M = TransX ? x_mat_dims[1] : x_mat_dims[0];
int64_t K = TransY ? y->dims()[1] : y->dims()[0];
int64_t N = TransY ? y->dims()[0] : y->dims()[1];
VLOG(10) << "M = " << M << " , K = " << K << " , N = " << N;
cudaDataType_t mat_type = CUDA_R_32F;
cudaDataType_t scale_type = CUDA_R_32F;
cublasComputeType_t compute_type = CUBLAS_COMPUTE_32F;
if (std::is_same<T, paddle::platform::float16>::value) {
mat_type = CUDA_R_16F;
}
if (std::is_same<T, platform::bfloat16>::value) {
mat_type = CUDA_R_16BF;
}
if (std::is_same<T, double>::value) {
mat_type = CUDA_R_64F;
scale_type = CUDA_R_64F;
compute_type = CUBLAS_COMPUTE_64F;
}
cublasLtHandle_t lt_handle = dev_ctx.cublaslt_handle();
// NOTE(zengjinle): I do not know whether the 4MB workspace size is
// "enough". I just followed the settings from the NVIDIA MLPerf BERT code.
size_t workspace_size = static_cast<size_t>(4) * 1024 * 1024;
const cublasLtMatmulAlgo_t* algo = nullptr;
cudaStream_t stream = dev_ctx.stream();
double alpha64 = 1.0, beta64 = 0.0;
float alpha32 = 1.0f, beta32 = 0.0f;
void *alpha = nullptr, *beta = nullptr;
if (std::is_same<T, double>::value) {
alpha = &alpha64;
beta = &beta64;
} else {
alpha = &alpha32;
beta = &beta32;
}
cublasLtMatrixLayout_t dout_desc = nullptr, dout_trans_desc = nullptr;
cublasLtMatrixLayout_t x_desc = nullptr, x_trans_desc = nullptr;
cublasLtMatrixLayout_t y_desc = nullptr, y_trans_desc = nullptr;
cublasLtMatrixLayout_t dx_desc = nullptr, dy_desc = nullptr;
cublasLtMatmulDesc_t dx_operation_desc = nullptr,
dy_operation_desc = nullptr;
DEFINE_PADDLE_SCOPE_GUARD([&] {
auto descs = {dout_desc,
dout_trans_desc,
x_desc,
x_trans_desc,
y_desc,
y_trans_desc,
dx_desc,
dy_desc};
for (auto desc : descs) {
if (desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutDestroy(desc));
}
}
if (dx_operation_desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(dx_operation_desc));
}
if (dy_operation_desc) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescDestroy(dy_operation_desc));
}
});
auto x_row = TransX ? K : M;
auto x_col = TransX ? M : K;
auto y_row = TransY ? N : K;
auto y_col = TransY ? K : N;
auto z_row = TransX ? N : M;
auto z_col = TransX ? M : N;
// dx = func(dout, y)
if (dx) {
constexpr auto kXGradAIsDZ = (Trait::kXGradA == FusedGEMMGradInType::kDZ);
cublasLtMatrixLayout_t *dx_dout_desc, *dx_y_desc;
if (TransX) {
dx_dout_desc = &dout_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dx_dout_desc, mat_type, z_row, z_col, z_row));
} else {
dx_dout_desc = &dout_desc;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dx_dout_desc, mat_type, z_col, z_row, z_col));
}
dx_y_desc = &y_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
dx_y_desc, mat_type, y_col, y_row, y_col));
auto& a_desc = kXGradAIsDZ ? (*dx_dout_desc) : (*dx_y_desc);
auto& b_desc = kXGradAIsDZ ? (*dx_y_desc) : (*dx_dout_desc);
auto a_trans = BoolToCuBlasEnum(Trait::kXGradATrans);
auto b_trans = BoolToCuBlasEnum(Trait::kXGradBTrans);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&dx_desc, mat_type, x_col, x_row, x_col));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&dx_operation_desc, compute_type, scale_type));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSB,
&a_trans,
sizeof(a_trans)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSA,
&b_trans,
sizeof(b_trans)));
cublasLtEpilogue_t epiloque_func_for_dx =
get_epilogue_type_(activation_grad);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func_for_dx,
sizeof(epiloque_func_for_dx)));
if (activation_grad != "none") {
auto* aux_data = reserve_space->data<T>();
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER,
&aux_data,
sizeof(aux_data)));
int64_t aux_ld = TransX ? M : K;
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dx_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD,
&aux_ld,
sizeof(aux_ld)));
}
auto dx_workspace = memory::Alloc(
dev_ctx.GetPlace(),
workspace_size,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
auto* dx_data = dev_ctx.Alloc<T>(dx, dx->numel() * sizeof(T));
const auto* y_data = y->data<T>();
const auto* dout_data = dout->data<T>();
const auto* a_data = kXGradAIsDZ ? dout_data : y_data;
const auto* b_data = kXGradAIsDZ ? y_data : dout_data;
auto algo =
GemmEpilogueAlgoCache::Instance().GetGemmAlgo(lt_handle,
dx_operation_desc,
b_desc,
a_desc,
dx_desc,
alpha,
beta,
b_data,
a_data,
dx_data,
stream,
dx_workspace->ptr(),
workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmul(lt_handle,
dx_operation_desc,
alpha,
b_data,
b_desc,
a_data,
a_desc,
beta,
dx_data,
dx_desc,
dx_data,
dx_desc,
algo,
dx_workspace->ptr(),
workspace_size,
stream));
}
// dy = func(dout, x)
if (dy) {
constexpr auto kYGradAIsDZ = (Trait::kYGradA == FusedGEMMGradInType::kDZ);
cublasLtMatrixLayout_t *dy_dout_desc = nullptr, *dy_x_desc = nullptr;
if (TransX) {
dy_dout_desc = &dout_trans_desc;
if (dout_trans_desc == nullptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dy_dout_desc, mat_type, z_row, z_col, z_row));
}
} else {
dy_dout_desc = &dout_desc;
if (dout_desc == nullptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatrixLayoutCreate(
dy_dout_desc, mat_type, z_col, z_row, z_col));
}
}
dy_x_desc = &x_trans_desc;
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
dy_x_desc, mat_type, x_col, x_row, x_col));
auto& a_desc = kYGradAIsDZ ? (*dy_dout_desc) : (*dy_x_desc);
auto& b_desc = kYGradAIsDZ ? (*dy_x_desc) : (*dy_dout_desc);
auto a_trans = BoolToCuBlasEnum(Trait::kYGradATrans);
auto b_trans = BoolToCuBlasEnum(Trait::kYGradBTrans);
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatrixLayoutCreate(
&dy_desc, mat_type, y_col, y_row, y_col));
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cublasLtMatmulDescCreate(
&dy_operation_desc, compute_type, scale_type));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSB,
&a_trans,
sizeof(a_trans)));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_TRANSA,
&b_trans,
sizeof(b_trans)));
cublasLtEpilogue_t epiloque_func_for_dy;
if (dbias == nullptr) {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_DEFAULT;
} else {
if (TransY) {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_BGRADB;
} else {
epiloque_func_for_dy = CUBLASLT_EPILOGUE_BGRADA;
}
}
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_EPILOGUE,
&epiloque_func_for_dy,
sizeof(epiloque_func_for_dy)));
if (dbias) {
auto* dbias_data = dev_ctx.Alloc<T>(dbias, dbias->numel() * sizeof(T));
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmulDescSetAttribute(
dy_operation_desc,
CUBLASLT_MATMUL_DESC_BIAS_POINTER,
&dbias_data,
sizeof(dbias_data)));
}
auto dy_workspace = memory::Alloc(
dev_ctx.GetPlace(),
workspace_size,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
auto* dy_data = dev_ctx.Alloc<T>(dy, dy->numel() * sizeof(T));
const auto* dout_data = dout->data<T>();
const auto* x_data = x->data<T>();
const auto* a_data = kYGradAIsDZ ? dout_data : x_data;
const auto* b_data = kYGradAIsDZ ? x_data : dout_data;
auto algo =
GemmEpilogueAlgoCache::Instance().GetGemmAlgo(lt_handle,
dy_operation_desc,
b_desc,
a_desc,
dy_desc,
alpha,
beta,
b_data,
a_data,
dy_data,
stream,
dy_workspace->ptr(),
workspace_size);
PADDLE_ENFORCE_GPU_SUCCESS(
platform::dynload::cublasLtMatmul(lt_handle,
dy_operation_desc,
alpha,
b_data,
b_desc,
a_data,
a_desc,
beta,
dy_data,
dy_desc,
dy_data,
dy_desc,
algo,
dy_workspace->ptr(),
workspace_size,
stream));
}
}
private:
static cublasLtEpilogue_t get_epilogue_type_(
const std::string& activation_grad) {
if (activation_grad == "relu_grad") {
return CUBLASLT_EPILOGUE_DRELU;
} else if (activation_grad == "gelu_grad") {
return CUBLASLT_EPILOGUE_DGELU;
} else if (activation_grad == "none") {
return CUBLASLT_EPILOGUE_DEFAULT;
} else {
PADDLE_ENFORCE_EQ(
true,
false,
platform::errors::InvalidArgument(
"The activation_grad attribute of fused_gemm_epilogue op should "
"be"
" one of {\"none\", \"relu\", \"gelu\"}. But received %s."
"But received activation_grad=%s.",
activation_grad));
}
}
};
} // namespace operators
} // namespace paddle
#if CUDA_VERSION >= 11060
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
fused_gemm_epilogue,
ops::FusedGemmEpilogueKernel<phi::GPUContext, float>,
ops::FusedGemmEpilogueKernel<phi::GPUContext, double>,
ops::FusedGemmEpilogueKernel<phi::GPUContext, paddle::platform::float16>,
ops::FusedGemmEpilogueKernel<phi::GPUContext, paddle::platform::bfloat16>);
REGISTER_OP_CUDA_KERNEL(
fused_gemm_epilogue_grad,
ops::FusedGemmEpilogueGradKernel<phi::GPUContext, float>,
ops::FusedGemmEpilogueGradKernel<phi::GPUContext, double>,
ops::FusedGemmEpilogueGradKernel<phi::GPUContext,
paddle::platform::float16>,
ops::FusedGemmEpilogueKernel<phi::GPUContext, paddle::platform::bfloat16>);
#endif
|
6009f5ed951a08b3f825fec8f5fe0c06b8dfda39.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "si.h"
#include "wt.h"
# define CUDACHECK \
{ hipDeviceSynchronize(); \
hipError_t last = hipGetLastError();\
if(last!=hipSuccess) {\
printf("ERRORX: %s %s %i \n", hipGetErrorString( last), __FILE__, __LINE__ ); \
exit(1);\
}\
}
// FIXME: temp. workaround
#define MAX_FILTER_WIDTH 40
/// Constructor : default
SparseInpainting::SparseInpainting(void) : d_image(NULL), d_coeffs(NULL), d_tmp(NULL)
{
}
/// Constructor : SparseInpainting from image
SparseInpainting::SparseInpainting(
DTYPE* img_r,
int Nr,
int Nc,
const char* wname,
int levels) :
d_image(NULL),
d_coeffs(NULL),
d_tmp(NULL),
state(W_INIT)
{
winfos.Nr = Nr;
winfos.Nc = Nc;
winfos.nlevels = levels;
if (levels < 1) {
puts("Warning: cannot initialize wavelet coefficients with nlevels < 1. Forcing nlevels = 1");
winfos.nlevels = 1;
}
hipMemcpyKind transfer;
transfer = hipMemcpyDeviceToDevice;
// Image
DTYPE* d_arr_in;
hipMalloc(&d_arr_in, Nr*Nc*sizeof(DTYPE));
if (!img_r) hipMemset(d_arr_in, 0, Nr*Nc*sizeof(DTYPE));
hipMemcpy(d_arr_in, img_r, Nr*Nc*sizeof(DTYPE), transfer);
d_image = d_arr_in;
DTYPE* d_tmp_new;
hipMalloc(&d_tmp_new, 2*Nr*Nc*sizeof(DTYPE)); // Two temp. images
d_tmp = d_tmp_new;
hipMemset(d_tmp, 0, 2*Nr*Nc*sizeof(DTYPE));
// Filters
strncpy(this->wname, wname, 128);
int hlen = 0;
hlen = w_compute_filters_separable(wname);
if (hlen == 0) {
printf("ERROR: unknown wavelet name %s\n", wname);
//~ exit(1);
state = W_CREATION_ERROR;
}
winfos.hlen = hlen;
// Compute max achievable level according to image dimensions and filter size
int N;
N = Nc;
int wmaxlev = w_ilog2(N/hlen);
// TODO: remove this limitation
if (levels > wmaxlev) {
printf("Warning: required level (%d) is greater than the maximum possible level for %s (%d) on a %dx%d image.\n", winfos.nlevels, wname, wmaxlev, winfos.Nc, winfos.Nr);
printf("Forcing nlevels = %d\n", wmaxlev);
winfos.nlevels = wmaxlev;
}
// Allocate coeffs
DTYPE** d_coeffs_new;
d_coeffs_new = w_create_coeffs_buffer(winfos);
d_coeffs = d_coeffs_new;
}
/// Constructor: copy
SparseInpainting::SparseInpainting(const SparseInpainting &W) :
state(W.state)
{
winfos.Nr = W.winfos.Nr;
winfos.Nc = W.winfos.Nc;
winfos.nlevels = W.winfos.nlevels;
winfos.hlen = W.winfos.hlen;
strncpy(wname, W.wname, 128);
hipMalloc(&d_image, winfos.Nr*winfos.Nc*sizeof(DTYPE));
hipMemcpy(d_image, W.d_image, winfos.Nr*winfos.Nc*sizeof(DTYPE), hipMemcpyDeviceToDevice);
hipMalloc(&d_tmp, 2*winfos.Nr*winfos.Nc*sizeof(DTYPE));
d_coeffs = w_create_coeffs_buffer(winfos);
w_copy_coeffs_buffer(d_coeffs, W.d_coeffs, winfos);
}
/// Destructor
SparseInpainting::~SparseInpainting(void) {
if (d_image) hipFree(d_image);
if (d_coeffs) w_free_coeffs_buffer(d_coeffs, winfos.nlevels);
if (d_tmp) hipFree(d_tmp);
}
void SparseInpainting::projc(DTYPE* Omega, DTYPE* y) {
img_projc(d_image, Omega, y, winfos.Nr, winfos.Nc);
}
/// Method : forward
void SparseInpainting::forward(void) {
if (state == W_CREATION_ERROR) {
puts("Warning: forward transform not computed, as there was an error when creating the wavelets");
return;
}
w_forward_swt_separable(d_image, d_coeffs, d_tmp, winfos);
// else: not implemented yet
state = W_FORWARD;
}
/// Method : inverse
void SparseInpainting::inverse(void) {
if (state == W_INVERSE) { // TODO: what to do in this case ? Force re-compute, or abort ?
puts("Warning: W.inverse() has already been run. Inverse is available in W.get_image()");
return;
}
if (state == W_FORWARD_ERROR || state == W_THRESHOLD_ERROR) {
puts("Warning: inverse transform not computed, as there was an error in a previous stage");
return;
}
w_inverse_swt_separable(d_image, d_coeffs, d_tmp, winfos);
state = W_INVERSE;
}
/// Method : soft thresholding (L1 proximal)
void SparseInpainting::soft_threshold(DTYPE beta) {
if (state == W_INVERSE) {
puts("Warning: SparseInpainting(): cannot threshold coefficients, as they were modified by W.inverse()");
return;
}
w_call_soft_thresh(d_coeffs, beta, winfos);
}
/// Method : get the image from device to host
int SparseInpainting::get_image(DTYPE* img_r) { // TODO: more defensive
hipMemcpy(img_r, d_image, winfos.Nr*winfos.Nc*sizeof(DTYPE), hipMemcpyDeviceToHost);
return winfos.Nr*winfos.Nc;
}
/// Method : get the image from device to device
int SparseInpainting::get_image_d(DTYPE* d_img_r) { // TODO: more defensive
hipMemcpy(d_img_r, d_image, winfos.Nr*winfos.Nc*sizeof(DTYPE), hipMemcpyDeviceToDevice);
return winfos.Nr*winfos.Nc;
}
| 6009f5ed951a08b3f825fec8f5fe0c06b8dfda39.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include "common.h"
#include "si.h"
#include "wt.h"
# define CUDACHECK \
{ cudaThreadSynchronize(); \
cudaError_t last = cudaGetLastError();\
if(last!=cudaSuccess) {\
printf("ERRORX: %s %s %i \n", cudaGetErrorString( last), __FILE__, __LINE__ ); \
exit(1);\
}\
}
// FIXME: temp. workaround
#define MAX_FILTER_WIDTH 40
/// Constructor : default
SparseInpainting::SparseInpainting(void) : d_image(NULL), d_coeffs(NULL), d_tmp(NULL)
{
}
/// Constructor : SparseInpainting from image
SparseInpainting::SparseInpainting(
DTYPE* img_r,
int Nr,
int Nc,
const char* wname,
int levels) :
d_image(NULL),
d_coeffs(NULL),
d_tmp(NULL),
state(W_INIT)
{
winfos.Nr = Nr;
winfos.Nc = Nc;
winfos.nlevels = levels;
if (levels < 1) {
puts("Warning: cannot initialize wavelet coefficients with nlevels < 1. Forcing nlevels = 1");
winfos.nlevels = 1;
}
cudaMemcpyKind transfer;
transfer = cudaMemcpyDeviceToDevice;
// Image
DTYPE* d_arr_in;
cudaMalloc(&d_arr_in, Nr*Nc*sizeof(DTYPE));
if (!img_r) cudaMemset(d_arr_in, 0, Nr*Nc*sizeof(DTYPE));
cudaMemcpy(d_arr_in, img_r, Nr*Nc*sizeof(DTYPE), transfer);
d_image = d_arr_in;
DTYPE* d_tmp_new;
cudaMalloc(&d_tmp_new, 2*Nr*Nc*sizeof(DTYPE)); // Two temp. images
d_tmp = d_tmp_new;
cudaMemset(d_tmp, 0, 2*Nr*Nc*sizeof(DTYPE));
// Filters
strncpy(this->wname, wname, 128);
int hlen = 0;
hlen = w_compute_filters_separable(wname);
if (hlen == 0) {
printf("ERROR: unknown wavelet name %s\n", wname);
//~ exit(1);
state = W_CREATION_ERROR;
}
winfos.hlen = hlen;
// Compute max achievable level according to image dimensions and filter size
int N;
N = Nc;
int wmaxlev = w_ilog2(N/hlen);
// TODO: remove this limitation
if (levels > wmaxlev) {
printf("Warning: required level (%d) is greater than the maximum possible level for %s (%d) on a %dx%d image.\n", winfos.nlevels, wname, wmaxlev, winfos.Nc, winfos.Nr);
printf("Forcing nlevels = %d\n", wmaxlev);
winfos.nlevels = wmaxlev;
}
// Allocate coeffs
DTYPE** d_coeffs_new;
d_coeffs_new = w_create_coeffs_buffer(winfos);
d_coeffs = d_coeffs_new;
}
/// Constructor: copy
SparseInpainting::SparseInpainting(const SparseInpainting &W) :
state(W.state)
{
winfos.Nr = W.winfos.Nr;
winfos.Nc = W.winfos.Nc;
winfos.nlevels = W.winfos.nlevels;
winfos.hlen = W.winfos.hlen;
strncpy(wname, W.wname, 128);
cudaMalloc(&d_image, winfos.Nr*winfos.Nc*sizeof(DTYPE));
cudaMemcpy(d_image, W.d_image, winfos.Nr*winfos.Nc*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
cudaMalloc(&d_tmp, 2*winfos.Nr*winfos.Nc*sizeof(DTYPE));
d_coeffs = w_create_coeffs_buffer(winfos);
w_copy_coeffs_buffer(d_coeffs, W.d_coeffs, winfos);
}
/// Destructor
SparseInpainting::~SparseInpainting(void) {
if (d_image) cudaFree(d_image);
if (d_coeffs) w_free_coeffs_buffer(d_coeffs, winfos.nlevels);
if (d_tmp) cudaFree(d_tmp);
}
void SparseInpainting::projc(DTYPE* Omega, DTYPE* y) {
img_projc(d_image, Omega, y, winfos.Nr, winfos.Nc);
}
/// Method : forward
void SparseInpainting::forward(void) {
if (state == W_CREATION_ERROR) {
puts("Warning: forward transform not computed, as there was an error when creating the wavelets");
return;
}
w_forward_swt_separable(d_image, d_coeffs, d_tmp, winfos);
// else: not implemented yet
state = W_FORWARD;
}
/// Method : inverse
void SparseInpainting::inverse(void) {
if (state == W_INVERSE) { // TODO: what to do in this case ? Force re-compute, or abort ?
puts("Warning: W.inverse() has already been run. Inverse is available in W.get_image()");
return;
}
if (state == W_FORWARD_ERROR || state == W_THRESHOLD_ERROR) {
puts("Warning: inverse transform not computed, as there was an error in a previous stage");
return;
}
w_inverse_swt_separable(d_image, d_coeffs, d_tmp, winfos);
state = W_INVERSE;
}
/// Method : soft thresholding (L1 proximal)
void SparseInpainting::soft_threshold(DTYPE beta) {
if (state == W_INVERSE) {
puts("Warning: SparseInpainting(): cannot threshold coefficients, as they were modified by W.inverse()");
return;
}
w_call_soft_thresh(d_coeffs, beta, winfos);
}
/// Method : get the image from device to host
int SparseInpainting::get_image(DTYPE* img_r) { // TODO: more defensive
cudaMemcpy(img_r, d_image, winfos.Nr*winfos.Nc*sizeof(DTYPE), cudaMemcpyDeviceToHost);
return winfos.Nr*winfos.Nc;
}
/// Method : get the image from device to device
int SparseInpainting::get_image_d(DTYPE* d_img_r) { // TODO: more defensive
cudaMemcpy(d_img_r, d_image, winfos.Nr*winfos.Nc*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
return winfos.Nr*winfos.Nc;
}
|
417c6f1e445a00c88d5d526175a4e9708603b841.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <glog/logging.h>
#include "paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
namespace details {
template <typename T>
struct Add {
__device__ T operator()(const T &a, const T &b) const { return a + b; }
};
template <typename T>
struct Mul {
__device__ T operator()(const T &a, const T &b) const { return a * b; }
};
} // namespace details
template <typename T, typename Operator>
__global__ void elementwise_kernel(const size_t total, const T *x_data,
const T *y_data, T *out_data, int pre, int n,
int post, Operator op) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < total) {
int idx = tid / post % n;
#if __CUDA_ARCH__ >= 350
out_data[tid] = op(__ldg(x_data + tid), __ldg(y_data + idx));
#else
out_data[tid] = op(x_data[tid], y_data[idx]);
#endif
}
}
nvinfer1::Dims ElementWisePlugin::getOutputDimensions(
int index, const nvinfer1::Dims *input_dims, int num_inputs) {
PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument(
"There is only one output in TRT elementwise "
"op plugin, but got output index: %d.",
index));
PADDLE_ENFORCE_EQ(num_inputs, 2, platform::errors::InvalidArgument(
"There are 2 inputs in TRT elementwise "
"op plugin, but got input number: %d.",
num_inputs));
PADDLE_ENFORCE_NOT_NULL(
input_dims,
platform::errors::InvalidArgument(
"The input dims of TRT elementwise op plugin should not be null."));
return input_dims[0];
}
int ElementWisePlugin::initialize() {
PADDLE_ENFORCE_GT(dims_y_.nbDims, 0,
platform::errors::InvalidArgument(
"The dimension of input Y of TRT elementwise op plugin "
"should be greater than 0, but got %d.",
dims_y_.nbDims));
axis_ = (axis_ == -1) ? dims_x_.nbDims - dims_y_.nbDims : axis_;
int trimed_nb_dims = dims_y_.nbDims;
for (; trimed_nb_dims > 0; --trimed_nb_dims) {
if (dims_y_.d[trimed_nb_dims - 1] != 1) {
break;
}
}
dims_y_.nbDims = trimed_nb_dims;
PADDLE_ENFORCE_GE(dims_x_.nbDims, dims_y_.nbDims + axis_,
platform::errors::InvalidArgument(
"We expect [number of x dims] >= [number of y dims + "
"axis] in TRT elementwise op plugin, but got [number "
"of x dims] = %d, [number of y dims + axis] = %d.",
dims_x_.nbDims, dims_y_.nbDims + axis_));
PADDLE_ENFORCE_LT(
axis_, dims_x_.nbDims,
platform::errors::InvalidArgument("We expect [axis] < [number of x dims] "
"in TRT elementwise op plugin, but got "
"[axis] = %d, [number of x dims] = %d.",
axis_, dims_x_.nbDims));
prev_size_ = 1;
midd_size_ = 1;
post_size_ = 1;
for (int i = 0; i < axis_; ++i) {
prev_size_ *= dims_x_.d[i];
}
for (int i = 0; i < dims_y_.nbDims; ++i) {
PADDLE_ENFORCE_EQ(dims_x_.d[i + axis_], dims_y_.d[i],
platform::errors::InvalidArgument(
"Broadcast dimension mismatch. The dims of input Y "
"should be a subsequence of X."));
midd_size_ *= dims_y_.d[i];
}
for (int i = axis_ + dims_y_.nbDims; i < dims_x_.nbDims; ++i) {
post_size_ *= dims_x_.d[i];
}
return 0;
}
int ElementWisePlugin::enqueue(int batch_size, const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs, void *workspace,
#else
void *const *outputs, void *workspace,
#endif
hipStream_t stream) {
const float *x = reinterpret_cast<const float *>(inputs[0]);
const float *y = reinterpret_cast<const float *>(inputs[1]);
float *out = reinterpret_cast<float *>(outputs[0]);
int num = batch_size * prev_size_ * midd_size_ * post_size_;
int thread = 256;
int block = (num + thread - 1) / thread;
if (type_ == "add") {
hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream,
num, x, y, out, prev_size_, batch_size * midd_size_, post_size_,
details::Add<float>());
} else if (type_ == "mul") {
hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream,
num, x, y, out, prev_size_, batch_size * midd_size_, post_size_,
details::Mul<float>());
} else {
PADDLE_THROW(platform::errors::Fatal(
"The %s type elementwise is not implemented in trt plugin.", type_));
}
return hipGetLastError() != hipSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
int ElementwisePluginDynamic::initialize() { return 0; }
size_t ElementwisePluginDynamic::getSerializationSize() const {
return SerializedSize(type_.c_str()) + SerializedSize(axis_);
}
void ElementwisePluginDynamic::serialize(void *buffer) const {
SerializeValue(&buffer, type_.c_str());
SerializeValue(&buffer, axis_);
}
nvinfer1::DimsExprs ElementwisePluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) {
return inputs[0];
}
bool ElementwisePluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType ElementwisePluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types, int nb_inputs) const {
PADDLE_ENFORCE_EQ(index, 0,
platform::errors::InvalidArgument(
"The Elementwise Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
return input_types[0];
}
int ElementwisePluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs,
void *const *outputs, void *workspace, hipStream_t stream) {
auto x_dims = input_desc[0].dims;
auto y_dims = input_desc[1].dims;
int axis = (axis_ == -1) ? x_dims.nbDims - y_dims.nbDims : axis_;
int batch_size = x_dims.d[0];
int prev_size = 1;
int midd_size = 1;
int post_size = 1;
for (int i = 0; i < axis; ++i) {
prev_size *= x_dims.d[i];
}
int trimed_nb_dims = y_dims.nbDims;
for (; trimed_nb_dims > 0; --trimed_nb_dims) {
if (y_dims.d[trimed_nb_dims - 1] != 1) {
break;
}
}
for (int i = 0; i < trimed_nb_dims; ++i) {
PADDLE_ENFORCE_EQ(x_dims.d[i + axis], y_dims.d[i],
platform::errors::InvalidArgument(
"Broadcast dimension mismatch found in trt "
"elementwise plugin's x and y input."));
midd_size *= y_dims.d[i];
}
for (int i = axis + trimed_nb_dims; i < x_dims.nbDims; ++i) {
post_size *= x_dims.d[i];
}
const float *x = static_cast<const float *>(inputs[0]);
const float *y = static_cast<const float *>(inputs[1]);
float *out = static_cast<float *>(outputs[0]);
int num = prev_size * midd_size * post_size;
int thread = 256;
int block = (num + thread - 1) / thread;
if (type_ == "add") {
hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream,
num, x, y, out, prev_size, midd_size, post_size, details::Add<float>());
} else if (type_ == "mul") {
hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream,
num, x, y, out, prev_size, midd_size, post_size, details::Mul<float>());
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Paddle-TRT only support elementwise operation: {add, mul} currently, "
"but got %s.",
type_));
}
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 417c6f1e445a00c88d5d526175a4e9708603b841.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <glog/logging.h>
#include "paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
namespace details {
template <typename T>
struct Add {
__device__ T operator()(const T &a, const T &b) const { return a + b; }
};
template <typename T>
struct Mul {
__device__ T operator()(const T &a, const T &b) const { return a * b; }
};
} // namespace details
template <typename T, typename Operator>
__global__ void elementwise_kernel(const size_t total, const T *x_data,
const T *y_data, T *out_data, int pre, int n,
int post, Operator op) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < total) {
int idx = tid / post % n;
#if __CUDA_ARCH__ >= 350
out_data[tid] = op(__ldg(x_data + tid), __ldg(y_data + idx));
#else
out_data[tid] = op(x_data[tid], y_data[idx]);
#endif
}
}
nvinfer1::Dims ElementWisePlugin::getOutputDimensions(
int index, const nvinfer1::Dims *input_dims, int num_inputs) {
PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument(
"There is only one output in TRT elementwise "
"op plugin, but got output index: %d.",
index));
PADDLE_ENFORCE_EQ(num_inputs, 2, platform::errors::InvalidArgument(
"There are 2 inputs in TRT elementwise "
"op plugin, but got input number: %d.",
num_inputs));
PADDLE_ENFORCE_NOT_NULL(
input_dims,
platform::errors::InvalidArgument(
"The input dims of TRT elementwise op plugin should not be null."));
return input_dims[0];
}
int ElementWisePlugin::initialize() {
PADDLE_ENFORCE_GT(dims_y_.nbDims, 0,
platform::errors::InvalidArgument(
"The dimension of input Y of TRT elementwise op plugin "
"should be greater than 0, but got %d.",
dims_y_.nbDims));
axis_ = (axis_ == -1) ? dims_x_.nbDims - dims_y_.nbDims : axis_;
int trimed_nb_dims = dims_y_.nbDims;
for (; trimed_nb_dims > 0; --trimed_nb_dims) {
if (dims_y_.d[trimed_nb_dims - 1] != 1) {
break;
}
}
dims_y_.nbDims = trimed_nb_dims;
PADDLE_ENFORCE_GE(dims_x_.nbDims, dims_y_.nbDims + axis_,
platform::errors::InvalidArgument(
"We expect [number of x dims] >= [number of y dims + "
"axis] in TRT elementwise op plugin, but got [number "
"of x dims] = %d, [number of y dims + axis] = %d.",
dims_x_.nbDims, dims_y_.nbDims + axis_));
PADDLE_ENFORCE_LT(
axis_, dims_x_.nbDims,
platform::errors::InvalidArgument("We expect [axis] < [number of x dims] "
"in TRT elementwise op plugin, but got "
"[axis] = %d, [number of x dims] = %d.",
axis_, dims_x_.nbDims));
prev_size_ = 1;
midd_size_ = 1;
post_size_ = 1;
for (int i = 0; i < axis_; ++i) {
prev_size_ *= dims_x_.d[i];
}
for (int i = 0; i < dims_y_.nbDims; ++i) {
PADDLE_ENFORCE_EQ(dims_x_.d[i + axis_], dims_y_.d[i],
platform::errors::InvalidArgument(
"Broadcast dimension mismatch. The dims of input Y "
"should be a subsequence of X."));
midd_size_ *= dims_y_.d[i];
}
for (int i = axis_ + dims_y_.nbDims; i < dims_x_.nbDims; ++i) {
post_size_ *= dims_x_.d[i];
}
return 0;
}
int ElementWisePlugin::enqueue(int batch_size, const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs, void *workspace,
#else
void *const *outputs, void *workspace,
#endif
cudaStream_t stream) {
const float *x = reinterpret_cast<const float *>(inputs[0]);
const float *y = reinterpret_cast<const float *>(inputs[1]);
float *out = reinterpret_cast<float *>(outputs[0]);
int num = batch_size * prev_size_ * midd_size_ * post_size_;
int thread = 256;
int block = (num + thread - 1) / thread;
if (type_ == "add") {
elementwise_kernel<<<block, thread, 0, stream>>>(
num, x, y, out, prev_size_, batch_size * midd_size_, post_size_,
details::Add<float>());
} else if (type_ == "mul") {
elementwise_kernel<<<block, thread, 0, stream>>>(
num, x, y, out, prev_size_, batch_size * midd_size_, post_size_,
details::Mul<float>());
} else {
PADDLE_THROW(platform::errors::Fatal(
"The %s type elementwise is not implemented in trt plugin.", type_));
}
return cudaGetLastError() != cudaSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
int ElementwisePluginDynamic::initialize() { return 0; }
size_t ElementwisePluginDynamic::getSerializationSize() const {
return SerializedSize(type_.c_str()) + SerializedSize(axis_);
}
void ElementwisePluginDynamic::serialize(void *buffer) const {
SerializeValue(&buffer, type_.c_str());
SerializeValue(&buffer, axis_);
}
nvinfer1::DimsExprs ElementwisePluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) {
return inputs[0];
}
bool ElementwisePluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType ElementwisePluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types, int nb_inputs) const {
PADDLE_ENFORCE_EQ(index, 0,
platform::errors::InvalidArgument(
"The Elementwise Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
return input_types[0];
}
int ElementwisePluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs,
void *const *outputs, void *workspace, cudaStream_t stream) {
auto x_dims = input_desc[0].dims;
auto y_dims = input_desc[1].dims;
int axis = (axis_ == -1) ? x_dims.nbDims - y_dims.nbDims : axis_;
int batch_size = x_dims.d[0];
int prev_size = 1;
int midd_size = 1;
int post_size = 1;
for (int i = 0; i < axis; ++i) {
prev_size *= x_dims.d[i];
}
int trimed_nb_dims = y_dims.nbDims;
for (; trimed_nb_dims > 0; --trimed_nb_dims) {
if (y_dims.d[trimed_nb_dims - 1] != 1) {
break;
}
}
for (int i = 0; i < trimed_nb_dims; ++i) {
PADDLE_ENFORCE_EQ(x_dims.d[i + axis], y_dims.d[i],
platform::errors::InvalidArgument(
"Broadcast dimension mismatch found in trt "
"elementwise plugin's x and y input."));
midd_size *= y_dims.d[i];
}
for (int i = axis + trimed_nb_dims; i < x_dims.nbDims; ++i) {
post_size *= x_dims.d[i];
}
const float *x = static_cast<const float *>(inputs[0]);
const float *y = static_cast<const float *>(inputs[1]);
float *out = static_cast<float *>(outputs[0]);
int num = prev_size * midd_size * post_size;
int thread = 256;
int block = (num + thread - 1) / thread;
if (type_ == "add") {
elementwise_kernel<<<block, thread, 0, stream>>>(
num, x, y, out, prev_size, midd_size, post_size, details::Add<float>());
} else if (type_ == "mul") {
elementwise_kernel<<<block, thread, 0, stream>>>(
num, x, y, out, prev_size, midd_size, post_size, details::Mul<float>());
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Paddle-TRT only support elementwise operation: {add, mul} currently, "
"but got %s.",
type_));
}
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
0242c8357b3257b7551cf81d2b85e1dd94e9c4f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/interpolate.hpp>
#include <nbla/cuda/utils/atomic_add.cuh>
#include <nbla/cuda/utils/nd_index.cuh>
#include <nbla/variable.hpp>
namespace nbla {
inline float compute_scale(int isize, int osize, bool align_corners) {
return (osize <= 1) ? 0.0f : (align_corners ? float(isize - 1) / (osize - 1)
: float(isize) / osize);
}
__device__ __forceinline__ float get_src_index(float scale, int dst_index,
bool align_corners) {
return align_corners ? scale * dst_index
: fmaxf(0.0f, scale * (float(dst_index) + 0.5f) - 0.5f);
}
template <typename T, bool channel_last = false>
__global__ void kernel_linear_interpolate_1d(
const int dst_inner_size, T *dst, const int src_inner_size, const T *src,
int outer_size, const int ishape, const int istride, const int ostride,
const float sx, const bool align_corners) {
NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) {
const auto nd_index = device_flat_to_2d(index, ostride);
const auto oc = channel_last ? nd_index.y : 0;
const auto ox = nd_index.x;
const auto iw = ishape;
const auto fx = get_src_index(sx, ox, align_corners);
const auto x1 = static_cast<int>(fx);
const auto x2 = min(x1 + 1, iw - 1);
const auto lx1 = static_cast<T>(fx - x1);
const auto lx0 = static_cast<T>(1) - lx1;
const auto nd_idx_x1 = make_int2(x1, oc);
const auto nd_idx_x2 = make_int2(x2, oc);
const auto idx_lx0 = device_2d_to_flat(nd_idx_x1, istride);
const auto idx_lx1 = device_2d_to_flat(nd_idx_x2, istride);
for (; outer_size--; src += src_inner_size, dst += dst_inner_size) {
const T val0 = lx0 * src[idx_lx0];
const T val1 = lx1 * src[idx_lx1];
dst[index] = val0 + val1;
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_linear_interpolate_2d(
const int dst_inner_size, T *dst, const int src_inner_size, const T *src,
int outer_size, const int2 ishape, const int2 istride, const int2 ostride,
const float sx, const float sy, const bool align_corners) {
NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) {
const auto nd_index = device_flat_to_3d(index, ostride);
const auto oc = channel_last ? nd_index.z : 0;
const auto oy = nd_index.x;
const auto ox = nd_index.y;
const auto ih = ishape.x;
const auto iw = ishape.y;
const auto fy = get_src_index(sy, oy, align_corners);
const auto y1 = static_cast<int>(fy);
const auto y2 = min(y1 + 1, ih - 1);
const auto ly1 = static_cast<T>(fy - y1);
const auto ly0 = static_cast<T>(1) - ly1;
const auto fx = get_src_index(sx, ox, align_corners);
const auto x1 = static_cast<int>(fx);
const auto x2 = min(x1 + 1, iw - 1);
const auto lx1 = static_cast<T>(fx - x1);
const auto lx0 = static_cast<T>(1) - lx1;
const auto nd_idx_y1x1 = make_int3(y1, x1, oc);
const auto nd_idx_y1x2 = make_int3(y1, x2, oc);
const auto nd_idx_y2x1 = make_int3(y2, x1, oc);
const auto nd_idx_y2x2 = make_int3(y2, x2, oc);
const auto idx_ly0x0 = device_3d_to_flat(nd_idx_y1x1, istride);
const auto idx_ly0x1 = device_3d_to_flat(nd_idx_y1x2, istride);
const auto idx_ly1x0 = device_3d_to_flat(nd_idx_y2x1, istride);
const auto idx_ly1x1 = device_3d_to_flat(nd_idx_y2x2, istride);
for (; outer_size--; src += src_inner_size, dst += dst_inner_size) {
const T val0 = lx0 * src[idx_ly0x0];
const T val1 = lx1 * src[idx_ly0x1];
const T val2 = lx0 * src[idx_ly1x0];
const T val3 = lx1 * src[idx_ly1x1];
dst[index] = ly0 * (val0 + val1) + ly1 * (val2 + val3);
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_linear_interpolate_3d(
const int dst_inner_size, T *dst, const int src_inner_size, const T *src,
int outer_size, const int3 ishape, const int3 istride, const int3 ostride,
const float sx, const float sy, const float sz, const bool align_corners) {
NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) {
const auto nd_index = device_flat_to_4d(index, ostride);
const auto oc = channel_last ? nd_index.w : 0;
const auto oz = nd_index.x;
const auto oy = nd_index.y;
const auto ox = nd_index.z;
const auto id = ishape.x;
const auto ih = ishape.y;
const auto iw = ishape.z;
const auto fz = get_src_index(sz, oz, align_corners);
const auto z1 = static_cast<int>(fz);
const auto z2 = min(z1 + 1, id - 1);
const auto lz1 = static_cast<T>(fz - z1);
const auto lz0 = static_cast<T>(1) - lz1;
const auto fy = get_src_index(sy, oy, align_corners);
const auto y1 = static_cast<int>(fy);
const auto y2 = min(y1 + 1, ih - 1);
const auto ly1 = static_cast<T>(fy - y1);
const auto ly0 = static_cast<T>(1) - ly1;
const auto fx = get_src_index(sx, ox, align_corners);
const auto x1 = static_cast<int>(fx);
const auto x2 = min(x1 + 1, iw - 1);
const auto lx1 = static_cast<T>(fx - x1);
const auto lx0 = static_cast<T>(1) - lx1;
const auto nd_idx_z1y1x1 = make_int4(z1, y1, x1, oc);
const auto nd_idx_z1y1x2 = make_int4(z1, y1, x2, oc);
const auto nd_idx_z1y2x1 = make_int4(z1, y2, x1, oc);
const auto nd_idx_z1y2x2 = make_int4(z1, y2, x2, oc);
const auto nd_idx_z2y1x1 = make_int4(z2, y1, x1, oc);
const auto nd_idx_z2y1x2 = make_int4(z2, y1, x2, oc);
const auto nd_idx_z2y2x1 = make_int4(z2, y2, x1, oc);
const auto nd_idx_z2y2x2 = make_int4(z2, y2, x2, oc);
const auto idx_lz0y0x0 = device_4d_to_flat(nd_idx_z1y1x1, istride);
const auto idx_lz0y0x1 = device_4d_to_flat(nd_idx_z1y1x2, istride);
const auto idx_lz0y1x0 = device_4d_to_flat(nd_idx_z1y2x1, istride);
const auto idx_lz0y1x1 = device_4d_to_flat(nd_idx_z1y2x2, istride);
const auto idx_lz1y0x0 = device_4d_to_flat(nd_idx_z2y1x1, istride);
const auto idx_lz1y0x1 = device_4d_to_flat(nd_idx_z2y1x2, istride);
const auto idx_lz1y1x0 = device_4d_to_flat(nd_idx_z2y2x1, istride);
const auto idx_lz1y1x1 = device_4d_to_flat(nd_idx_z2y2x2, istride);
for (; outer_size--; src += src_inner_size, dst += dst_inner_size) {
const T val0 = lx0 * src[idx_lz0y0x0];
const T val1 = lx1 * src[idx_lz0y0x1];
const T val2 = lx0 * src[idx_lz0y1x0];
const T val3 = lx1 * src[idx_lz0y1x1];
const T val4 = lx0 * src[idx_lz1y0x0];
const T val5 = lx1 * src[idx_lz1y0x1];
const T val6 = lx0 * src[idx_lz1y1x0];
const T val7 = lx1 * src[idx_lz1y1x1];
const T val8 = ly0 * (val0 + val1) + ly1 * (val2 + val3);
const T val9 = ly0 * (val4 + val5) + ly1 * (val6 + val7);
dst[index] = lz0 * val8 + lz1 * val9;
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_linear_interpolate_1d_backward(
const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x,
int outer_size, const int ishape, const int istride, const int ostride,
const float sx, const bool align_corners) {
NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) {
const auto nd_index = device_flat_to_2d(index, ostride);
const auto oc = channel_last ? nd_index.y : 0;
const auto ox = nd_index.x;
const auto iw = ishape;
const auto fx = get_src_index(sx, ox, align_corners);
const auto x1 = static_cast<int>(fx);
const auto x2 = min(x1 + 1, iw - 1);
const auto lx1 = static_cast<T>(fx - x1);
const auto lx0 = static_cast<T>(1) - lx1;
const auto nd_idx_x1 = make_int2(x1, oc);
const auto nd_idx_x2 = make_int2(x2, oc);
const auto idx_lx1 = device_2d_to_flat(nd_idx_x1, istride);
const auto idx_lx2 = device_2d_to_flat(nd_idx_x2, istride);
for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) {
const T g = g_y[index];
atomic_add(g_x + idx_lx1, lx0 * g);
atomic_add(g_x + idx_lx2, lx1 * g);
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_linear_interpolate_2d_backward(
const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x,
int outer_size, const int2 ishape, const int2 istride, const int2 ostride,
const float sx, const float sy, const bool align_corners) {
NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) {
const auto nd_index = device_flat_to_3d(index, ostride);
const auto oc = channel_last ? nd_index.z : 0;
const auto oy = nd_index.x;
const auto ox = nd_index.y;
const auto ih = ishape.x;
const auto iw = ishape.y;
const auto fy = get_src_index(sy, oy, align_corners);
const auto y1 = static_cast<int>(fy);
const auto y2 = min(y1 + 1, ih - 1);
const auto ly1 = static_cast<T>(fy - y1);
const auto ly0 = static_cast<T>(1) - ly1;
const auto fx = get_src_index(sx, ox, align_corners);
const auto x1 = static_cast<int>(fx);
const auto x2 = min(x1 + 1, iw - 1);
const auto lx1 = static_cast<T>(fx - x1);
const auto lx0 = static_cast<T>(1) - lx1;
const auto nd_idx_y1x1 = make_int3(y1, x1, oc);
const auto nd_idx_y1x2 = make_int3(y1, x2, oc);
const auto nd_idx_y2x1 = make_int3(y2, x1, oc);
const auto nd_idx_y2x2 = make_int3(y2, x2, oc);
const auto idx_ly0x0 = device_3d_to_flat(nd_idx_y1x1, istride);
const auto idx_ly0x1 = device_3d_to_flat(nd_idx_y1x2, istride);
const auto idx_ly1x0 = device_3d_to_flat(nd_idx_y2x1, istride);
const auto idx_ly1x1 = device_3d_to_flat(nd_idx_y2x2, istride);
for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) {
const T g = g_y[index];
atomic_add(g_x + idx_ly0x0, ly0 * lx0 * g);
atomic_add(g_x + idx_ly0x1, ly0 * lx1 * g);
atomic_add(g_x + idx_ly1x0, ly1 * lx0 * g);
atomic_add(g_x + idx_ly1x1, ly1 * lx1 * g);
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_linear_interpolate_3d_backward(
const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x,
int outer_size, const int3 ishape, const int3 istride, const int3 ostride,
const float sx, const float sy, const float sz, const bool align_corners) {
NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) {
const auto nd_index = device_flat_to_4d(index, ostride);
const auto oc = channel_last ? nd_index.w : 0;
const auto oz = nd_index.x;
const auto oy = nd_index.y;
const auto ox = nd_index.z;
const auto id = ishape.x;
const auto ih = ishape.y;
const auto iw = ishape.z;
const auto fz = get_src_index(sz, oz, align_corners);
const auto z1 = static_cast<int>(fz);
const auto z2 = min(z1 + 1, id - 1);
const auto lz1 = static_cast<T>(fz - z1);
const auto lz0 = static_cast<T>(1) - lz1;
const auto fy = get_src_index(sy, oy, align_corners);
const auto y1 = static_cast<int>(fy);
const auto y2 = min(y1 + 1, ih - 1);
const auto ly1 = static_cast<T>(fy - y1);
const auto ly0 = static_cast<T>(1) - ly1;
const auto fx = get_src_index(sx, ox, align_corners);
const auto x1 = static_cast<int>(fx);
const auto x2 = min(x1 + 1, iw - 1);
const auto lx1 = static_cast<T>(fx - x1);
const auto lx0 = static_cast<T>(1) - lx1;
const auto nd_idx_z1y1x1 = make_int4(z1, y1, x1, oc);
const auto nd_idx_z1y1x2 = make_int4(z1, y1, x2, oc);
const auto nd_idx_z1y2x1 = make_int4(z1, y2, x1, oc);
const auto nd_idx_z1y2x2 = make_int4(z1, y2, x2, oc);
const auto nd_idx_z2y1x1 = make_int4(z2, y1, x1, oc);
const auto nd_idx_z2y1x2 = make_int4(z2, y1, x2, oc);
const auto nd_idx_z2y2x1 = make_int4(z2, y2, x1, oc);
const auto nd_idx_z2y2x2 = make_int4(z2, y2, x2, oc);
const auto idx_lz0y0x0 = device_4d_to_flat(nd_idx_z1y1x1, istride);
const auto idx_lz0y0x1 = device_4d_to_flat(nd_idx_z1y1x2, istride);
const auto idx_lz0y1x0 = device_4d_to_flat(nd_idx_z1y2x1, istride);
const auto idx_lz0y1x1 = device_4d_to_flat(nd_idx_z1y2x2, istride);
const auto idx_lz1y0x0 = device_4d_to_flat(nd_idx_z2y1x1, istride);
const auto idx_lz1y0x1 = device_4d_to_flat(nd_idx_z2y1x2, istride);
const auto idx_lz1y1x0 = device_4d_to_flat(nd_idx_z2y2x1, istride);
const auto idx_lz1y1x1 = device_4d_to_flat(nd_idx_z2y2x2, istride);
for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) {
const T g = g_y[index];
atomic_add(g_x + idx_lz0y0x0, lz0 * ly0 * lx0 * g);
atomic_add(g_x + idx_lz0y0x1, lz0 * ly0 * lx1 * g);
atomic_add(g_x + idx_lz0y1x0, lz0 * ly1 * lx0 * g);
atomic_add(g_x + idx_lz0y1x1, lz0 * ly1 * lx1 * g);
atomic_add(g_x + idx_lz1y0x0, lz1 * ly0 * lx0 * g);
atomic_add(g_x + idx_lz1y0x1, lz1 * ly0 * lx1 * g);
atomic_add(g_x + idx_lz1y1x0, lz1 * ly1 * lx0 * g);
atomic_add(g_x + idx_lz1y1x1, lz1 * ly1 * lx1 * g);
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_nearest_interpolate_1d(
const int dst_inner_size, T *dst, const int src_inner_size, const T *src,
int outer_size, const int ishape, const int istride, const int ostride,
const float sx) {
NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) {
const auto nd_index = device_flat_to_2d(index, ostride);
const auto oc = channel_last ? nd_index.y : 0;
const auto ox = nd_index.x;
const auto iw = ishape;
const auto ix = min(static_cast<int>(sx * (ox + 0.5f)), iw - 1);
const auto nd_idx_x = make_int2(ix, oc);
const auto idx_x = device_2d_to_flat(nd_idx_x, istride);
for (; outer_size--; src += src_inner_size, dst += dst_inner_size) {
dst[index] = src[idx_x];
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_nearest_interpolate_2d(
const int dst_inner_size, T *dst, const int src_inner_size, const T *src,
int outer_size, const int2 ishape, const int2 istride, const int2 ostride,
const float sx, const float sy) {
NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) {
const auto nd_index = device_flat_to_3d(index, ostride);
const auto oc = channel_last ? nd_index.z : 0;
const auto oy = nd_index.x;
const auto ox = nd_index.y;
const auto ih = ishape.x;
const auto iw = ishape.y;
const auto iy = min(static_cast<int>(sy * (oy + 0.5f)), ih - 1);
const auto ix = min(static_cast<int>(sx * (ox + 0.5f)), iw - 1);
const auto nd_idx_yx = make_int3(iy, ix, oc);
const auto idx_yx = device_3d_to_flat(nd_idx_yx, istride);
for (; outer_size--; src += src_inner_size, dst += dst_inner_size) {
dst[index] = src[idx_yx];
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_nearest_interpolate_3d(
const int dst_inner_size, T *dst, const int src_inner_size, const T *src,
int outer_size, const int3 ishape, const int3 istride, const int3 ostride,
const float sx, const float sy, const float sz) {
NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) {
const auto nd_index = device_flat_to_4d(index, ostride);
const auto oc = channel_last ? nd_index.w : 0;
const auto oz = nd_index.x;
const auto oy = nd_index.y;
const auto ox = nd_index.z;
const auto id = ishape.x;
const auto ih = ishape.y;
const auto iw = ishape.z;
const auto iz = min(static_cast<int>(sz * (oz + 0.5f)), id - 1);
const auto iy = min(static_cast<int>(sy * (oy + 0.5f)), ih - 1);
const auto ix = min(static_cast<int>(sx * (ox + 0.5f)), iw - 1);
const auto nd_idx_zyx = make_int4(iz, iy, ix, oc);
const auto idx_zyx = device_4d_to_flat(nd_idx_zyx, istride);
for (; outer_size--; src += src_inner_size, dst += dst_inner_size) {
dst[index] = src[idx_zyx];
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_nearest_interpolate_1d_backward(
const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x,
int outer_size, const int ishape, const int istride, const int ostride,
const float sx) {
NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) {
const auto nd_index = device_flat_to_2d(index, ostride);
const auto oc = channel_last ? nd_index.y : 0;
const auto ox = nd_index.x;
const auto iw = ishape;
const auto ix = min(static_cast<int>(sx * (ox + 0.5f)), iw - 1);
const auto nd_idx_x = make_int2(ix, oc);
const auto idx_x = device_2d_to_flat(nd_idx_x, istride);
for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) {
atomic_add(g_x + idx_x, g_y[index]);
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_nearest_interpolate_2d_backward(
const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x,
int outer_size, const int2 ishape, const int2 istride, const int2 ostride,
const float sx, const float sy) {
NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) {
const auto nd_index = device_flat_to_3d(index, ostride);
const auto oc = channel_last ? nd_index.z : 0;
const auto oy = nd_index.x;
const auto ox = nd_index.y;
const auto ih = ishape.x;
const auto iw = ishape.y;
const auto iy = min(static_cast<int>(sy * (oy + 0.5f)), ih - 1);
const auto ix = min(static_cast<int>(sx * (ox + 0.5f)), iw - 1);
const auto nd_idx_yx = make_int3(iy, ix, oc);
const auto idx_yx = device_3d_to_flat(nd_idx_yx, istride);
for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) {
atomic_add(g_x + idx_yx, g_y[index]);
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_nearest_interpolate_3d_backward(
const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x,
int outer_size, const int3 ishape, const int3 istride, const int3 ostride,
const float sx, const float sy, const float sz) {
NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) {
const auto nd_index = device_flat_to_4d(index, ostride);
const auto oc = channel_last ? nd_index.w : 0;
const auto oz = nd_index.x;
const auto oy = nd_index.y;
const auto ox = nd_index.z;
const auto id = ishape.x;
const auto ih = ishape.y;
const auto iw = ishape.z;
const auto iz = min(static_cast<int>(sz * (oz + 0.5f)), id - 1);
const auto iy = min(static_cast<int>(sy * (oy + 0.5f)), ih - 1);
const auto ix = min(static_cast<int>(sx * (ox + 0.5f)), iw - 1);
const auto nd_idx_zyx = make_int4(iz, iy, ix, oc);
const auto idx_zyx = device_4d_to_flat(nd_idx_zyx, istride);
for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) {
atomic_add(g_x + idx_zyx, g_y[index]);
}
}
}
template <typename T>
void InterpolateCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
auto src = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
auto dst = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
const int ndim = inputs[0]->ndim();
if (this->output_size_.size() == 1) {
const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1]
: inputs[0]->shape()[ndim - 2];
const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2]
: inputs[0]->shape()[ndim - 1];
const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1]
: outputs[0]->shape()[ndim - 2];
const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2]
: outputs[0]->shape()[ndim - 1];
const int src_inner_size = this->channel_last_ ? ic * iw : iw;
const int dst_inner_size = this->channel_last_ ? oc * ow : ow;
const int outer_size = inputs[0]->size() / src_inner_size;
const auto ishape = iw;
const auto istride = this->channel_last_ ? ic : 1;
const auto ostride = this->channel_last_ ? oc : 1;
if (this->mode_ == "linear") {
const float sx = compute_scale(iw, ow, this->align_corners_);
auto kernel = this->channel_last_
? kernel_linear_interpolate_1d<Tcu, true>
: kernel_linear_interpolate_1d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape,
istride, ostride, sx, this->align_corners_);
} else if (this->mode_ == "nearest") {
const float sx = iw / static_cast<float>(ow);
auto kernel = this->channel_last_
? kernel_nearest_interpolate_1d<Tcu, true>
: kernel_nearest_interpolate_1d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, dst_inner_size, dst,
src_inner_size, src, outer_size, ishape,
istride, ostride, sx);
}
}
else if (this->output_size_.size() == 2) {
const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1]
: inputs[0]->shape()[ndim - 3];
const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3]
: inputs[0]->shape()[ndim - 2];
const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2]
: inputs[0]->shape()[ndim - 1];
const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1]
: outputs[0]->shape()[ndim - 3];
const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3]
: outputs[0]->shape()[ndim - 2];
const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2]
: outputs[0]->shape()[ndim - 1];
const int src_inner_size = this->channel_last_ ? ic * iw * ih : iw * ih;
const int dst_inner_size = this->channel_last_ ? oc * ow * oh : ow * oh;
const int outer_size = inputs[0]->size() / src_inner_size;
const auto ishape = make_int2(ih, iw);
const auto istride =
this->channel_last_ ? make_int2(iw * ic, ic) : make_int2(iw, 1);
const auto ostride =
this->channel_last_ ? make_int2(ow * oc, oc) : make_int2(ow, 1);
if (this->mode_ == "linear") {
const float sx = compute_scale(iw, ow, this->align_corners_);
const float sy = compute_scale(ih, oh, this->align_corners_);
auto kernel = this->channel_last_
? kernel_linear_interpolate_2d<Tcu, true>
: kernel_linear_interpolate_2d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape,
istride, ostride, sx, sy, this->align_corners_);
} else if (this->mode_ == "nearest") {
const float sx = iw / static_cast<float>(ow);
const float sy = ih / static_cast<float>(oh);
auto kernel = this->channel_last_
? kernel_nearest_interpolate_2d<Tcu, true>
: kernel_nearest_interpolate_2d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, dst_inner_size, dst,
src_inner_size, src, outer_size, ishape,
istride, ostride, sx, sy);
}
}
else if (this->output_size_.size() == 3) {
const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1]
: inputs[0]->shape()[ndim - 4];
const int id = this->channel_last_ ? inputs[0]->shape()[ndim - 4]
: inputs[0]->shape()[ndim - 3];
const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3]
: inputs[0]->shape()[ndim - 2];
const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2]
: inputs[0]->shape()[ndim - 1];
const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1]
: outputs[0]->shape()[ndim - 4];
const int od = this->channel_last_ ? outputs[0]->shape()[ndim - 4]
: outputs[0]->shape()[ndim - 3];
const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3]
: outputs[0]->shape()[ndim - 2];
const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2]
: outputs[0]->shape()[ndim - 1];
const int src_inner_size =
this->channel_last_ ? ic * iw * ih * id : iw * ih * id;
const int dst_inner_size =
this->channel_last_ ? oc * ow * oh * od : ow * oh * od;
const int outer_size = inputs[0]->size() / src_inner_size;
const auto ishape = make_int3(id, ih, iw);
const auto istride = this->channel_last_
? make_int3(ih * iw * ic, iw * ic, ic)
: make_int3(ih * iw, iw, 1);
const auto ostride = this->channel_last_
? make_int3(oh * ow * oc, ow * oc, oc)
: make_int3(oh * ow, ow, 1);
if (this->mode_ == "linear") {
const float sx = compute_scale(iw, ow, this->align_corners_);
const float sy = compute_scale(ih, oh, this->align_corners_);
const float sz = compute_scale(id, od, this->align_corners_);
auto kernel = this->channel_last_
? kernel_linear_interpolate_3d<Tcu, true>
: kernel_linear_interpolate_3d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape,
istride, ostride, sx, sy, sz, this->align_corners_);
} else if (this->mode_ == "nearest") {
const float sx = iw / static_cast<float>(ow);
const float sy = ih / static_cast<float>(oh);
const float sz = id / static_cast<float>(od);
auto kernel = this->channel_last_
? kernel_nearest_interpolate_3d<Tcu, true>
: kernel_nearest_interpolate_3d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, dst_inner_size, dst,
src_inner_size, src, outer_size, ishape,
istride, ostride, sx, sy, sz);
}
}
}
template <typename T>
void InterpolateCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!(propagate_down[0])) {
return;
}
cuda_set_device(this->device_);
auto g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_);
auto g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, false);
const int ndim = inputs[0]->ndim();
if (this->output_size_.size() == 1) {
const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1]
: inputs[0]->shape()[ndim - 2];
const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2]
: inputs[0]->shape()[ndim - 1];
const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1]
: outputs[0]->shape()[ndim - 2];
const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2]
: outputs[0]->shape()[ndim - 1];
const int g_x_inner_size = this->channel_last_ ? ic * iw : iw;
const int g_y_inner_size = this->channel_last_ ? oc * ow : ow;
const int outer_size = inputs[0]->size() / g_x_inner_size;
const auto ishape = iw;
const auto istride = this->channel_last_ ? ic : 1;
const auto ostride = this->channel_last_ ? oc : 1;
if (this->mode_ == "linear") {
const float sx = compute_scale(iw, ow, this->align_corners_);
auto kernel = this->channel_last_
? kernel_linear_interpolate_1d_backward<Tcu, true>
: kernel_linear_interpolate_1d_backward<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape,
istride, ostride, sx, this->align_corners_);
} else if (this->mode_ == "nearest") {
const float sx = iw / static_cast<float>(ow);
auto kernel = this->channel_last_
? kernel_nearest_interpolate_1d_backward<Tcu, true>
: kernel_nearest_interpolate_1d_backward<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, g_y_inner_size, g_y,
g_x_inner_size, g_x, outer_size, ishape,
istride, ostride, sx);
}
}
else if (this->output_size_.size() == 2) {
const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1]
: inputs[0]->shape()[ndim - 3];
const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3]
: inputs[0]->shape()[ndim - 2];
const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2]
: inputs[0]->shape()[ndim - 1];
const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1]
: outputs[0]->shape()[ndim - 3];
const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3]
: outputs[0]->shape()[ndim - 2];
const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2]
: outputs[0]->shape()[ndim - 1];
const int g_x_inner_size = this->channel_last_ ? ic * iw * ih : iw * ih;
const int g_y_inner_size = this->channel_last_ ? oc * ow * oh : ow * oh;
const int outer_size = inputs[0]->size() / g_x_inner_size;
const auto ishape = make_int2(ih, iw);
const auto istride =
this->channel_last_ ? make_int2(iw * ic, ic) : make_int2(iw, 1);
const auto ostride =
this->channel_last_ ? make_int2(ow * oc, oc) : make_int2(ow, 1);
if (this->mode_ == "linear") {
const float sx = compute_scale(iw, ow, this->align_corners_);
const float sy = compute_scale(ih, oh, this->align_corners_);
auto kernel = this->channel_last_
? kernel_linear_interpolate_2d_backward<Tcu, true>
: kernel_linear_interpolate_2d_backward<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape,
istride, ostride, sx, sy, this->align_corners_);
} else if (this->mode_ == "nearest") {
const float sx = iw / static_cast<float>(ow);
const float sy = ih / static_cast<float>(oh);
auto kernel = this->channel_last_
? kernel_nearest_interpolate_2d_backward<Tcu, true>
: kernel_nearest_interpolate_2d_backward<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, g_y_inner_size, g_y,
g_x_inner_size, g_x, outer_size, ishape,
istride, ostride, sx, sy);
}
}
else if (this->output_size_.size() == 3) {
const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1]
: inputs[0]->shape()[ndim - 4];
const int id = this->channel_last_ ? inputs[0]->shape()[ndim - 4]
: inputs[0]->shape()[ndim - 3];
const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3]
: inputs[0]->shape()[ndim - 2];
const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2]
: inputs[0]->shape()[ndim - 1];
const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1]
: outputs[0]->shape()[ndim - 4];
const int od = this->channel_last_ ? outputs[0]->shape()[ndim - 4]
: outputs[0]->shape()[ndim - 3];
const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3]
: outputs[0]->shape()[ndim - 2];
const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2]
: outputs[0]->shape()[ndim - 1];
const int g_x_inner_size =
this->channel_last_ ? ic * iw * ih * id : iw * ih * id;
const int g_y_inner_size =
this->channel_last_ ? oc * ow * oh * od : ow * oh * od;
const int outer_size = inputs[0]->size() / g_x_inner_size;
const auto ishape = make_int3(id, ih, iw);
const auto istride = this->channel_last_
? make_int3(ih * iw * ic, iw * ic, ic)
: make_int3(ih * iw, iw, 1);
const auto ostride = this->channel_last_
? make_int3(oh * ow * oc, ow * oc, oc)
: make_int3(oh * ow, ow, 1);
if (this->mode_ == "linear") {
const float sx = compute_scale(iw, ow, this->align_corners_);
const float sy = compute_scale(ih, oh, this->align_corners_);
const float sz = compute_scale(id, od, this->align_corners_);
auto kernel = this->channel_last_
? kernel_linear_interpolate_3d_backward<Tcu, true>
: kernel_linear_interpolate_3d_backward<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape,
istride, ostride, sx, sy, sz, this->align_corners_);
} else if (this->mode_ == "nearest") {
const float sx = iw / static_cast<float>(ow);
const float sy = ih / static_cast<float>(oh);
const float sz = id / static_cast<float>(od);
auto kernel = this->channel_last_
? kernel_nearest_interpolate_3d_backward<Tcu, true>
: kernel_nearest_interpolate_3d_backward<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, g_y_inner_size, g_y,
g_x_inner_size, g_x, outer_size, ishape,
istride, ostride, sx, sy, sz);
}
}
}
}
| 0242c8357b3257b7551cf81d2b85e1dd94e9c4f0.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/interpolate.hpp>
#include <nbla/cuda/utils/atomic_add.cuh>
#include <nbla/cuda/utils/nd_index.cuh>
#include <nbla/variable.hpp>
namespace nbla {
inline float compute_scale(int isize, int osize, bool align_corners) {
return (osize <= 1) ? 0.0f : (align_corners ? float(isize - 1) / (osize - 1)
: float(isize) / osize);
}
__device__ __forceinline__ float get_src_index(float scale, int dst_index,
bool align_corners) {
return align_corners ? scale * dst_index
: fmaxf(0.0f, scale * (float(dst_index) + 0.5f) - 0.5f);
}
template <typename T, bool channel_last = false>
__global__ void kernel_linear_interpolate_1d(
const int dst_inner_size, T *dst, const int src_inner_size, const T *src,
int outer_size, const int ishape, const int istride, const int ostride,
const float sx, const bool align_corners) {
NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) {
const auto nd_index = device_flat_to_2d(index, ostride);
const auto oc = channel_last ? nd_index.y : 0;
const auto ox = nd_index.x;
const auto iw = ishape;
const auto fx = get_src_index(sx, ox, align_corners);
const auto x1 = static_cast<int>(fx);
const auto x2 = min(x1 + 1, iw - 1);
const auto lx1 = static_cast<T>(fx - x1);
const auto lx0 = static_cast<T>(1) - lx1;
const auto nd_idx_x1 = make_int2(x1, oc);
const auto nd_idx_x2 = make_int2(x2, oc);
const auto idx_lx0 = device_2d_to_flat(nd_idx_x1, istride);
const auto idx_lx1 = device_2d_to_flat(nd_idx_x2, istride);
for (; outer_size--; src += src_inner_size, dst += dst_inner_size) {
const T val0 = lx0 * src[idx_lx0];
const T val1 = lx1 * src[idx_lx1];
dst[index] = val0 + val1;
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_linear_interpolate_2d(
const int dst_inner_size, T *dst, const int src_inner_size, const T *src,
int outer_size, const int2 ishape, const int2 istride, const int2 ostride,
const float sx, const float sy, const bool align_corners) {
NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) {
const auto nd_index = device_flat_to_3d(index, ostride);
const auto oc = channel_last ? nd_index.z : 0;
const auto oy = nd_index.x;
const auto ox = nd_index.y;
const auto ih = ishape.x;
const auto iw = ishape.y;
const auto fy = get_src_index(sy, oy, align_corners);
const auto y1 = static_cast<int>(fy);
const auto y2 = min(y1 + 1, ih - 1);
const auto ly1 = static_cast<T>(fy - y1);
const auto ly0 = static_cast<T>(1) - ly1;
const auto fx = get_src_index(sx, ox, align_corners);
const auto x1 = static_cast<int>(fx);
const auto x2 = min(x1 + 1, iw - 1);
const auto lx1 = static_cast<T>(fx - x1);
const auto lx0 = static_cast<T>(1) - lx1;
const auto nd_idx_y1x1 = make_int3(y1, x1, oc);
const auto nd_idx_y1x2 = make_int3(y1, x2, oc);
const auto nd_idx_y2x1 = make_int3(y2, x1, oc);
const auto nd_idx_y2x2 = make_int3(y2, x2, oc);
const auto idx_ly0x0 = device_3d_to_flat(nd_idx_y1x1, istride);
const auto idx_ly0x1 = device_3d_to_flat(nd_idx_y1x2, istride);
const auto idx_ly1x0 = device_3d_to_flat(nd_idx_y2x1, istride);
const auto idx_ly1x1 = device_3d_to_flat(nd_idx_y2x2, istride);
for (; outer_size--; src += src_inner_size, dst += dst_inner_size) {
const T val0 = lx0 * src[idx_ly0x0];
const T val1 = lx1 * src[idx_ly0x1];
const T val2 = lx0 * src[idx_ly1x0];
const T val3 = lx1 * src[idx_ly1x1];
dst[index] = ly0 * (val0 + val1) + ly1 * (val2 + val3);
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_linear_interpolate_3d(
const int dst_inner_size, T *dst, const int src_inner_size, const T *src,
int outer_size, const int3 ishape, const int3 istride, const int3 ostride,
const float sx, const float sy, const float sz, const bool align_corners) {
NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) {
const auto nd_index = device_flat_to_4d(index, ostride);
const auto oc = channel_last ? nd_index.w : 0;
const auto oz = nd_index.x;
const auto oy = nd_index.y;
const auto ox = nd_index.z;
const auto id = ishape.x;
const auto ih = ishape.y;
const auto iw = ishape.z;
const auto fz = get_src_index(sz, oz, align_corners);
const auto z1 = static_cast<int>(fz);
const auto z2 = min(z1 + 1, id - 1);
const auto lz1 = static_cast<T>(fz - z1);
const auto lz0 = static_cast<T>(1) - lz1;
const auto fy = get_src_index(sy, oy, align_corners);
const auto y1 = static_cast<int>(fy);
const auto y2 = min(y1 + 1, ih - 1);
const auto ly1 = static_cast<T>(fy - y1);
const auto ly0 = static_cast<T>(1) - ly1;
const auto fx = get_src_index(sx, ox, align_corners);
const auto x1 = static_cast<int>(fx);
const auto x2 = min(x1 + 1, iw - 1);
const auto lx1 = static_cast<T>(fx - x1);
const auto lx0 = static_cast<T>(1) - lx1;
const auto nd_idx_z1y1x1 = make_int4(z1, y1, x1, oc);
const auto nd_idx_z1y1x2 = make_int4(z1, y1, x2, oc);
const auto nd_idx_z1y2x1 = make_int4(z1, y2, x1, oc);
const auto nd_idx_z1y2x2 = make_int4(z1, y2, x2, oc);
const auto nd_idx_z2y1x1 = make_int4(z2, y1, x1, oc);
const auto nd_idx_z2y1x2 = make_int4(z2, y1, x2, oc);
const auto nd_idx_z2y2x1 = make_int4(z2, y2, x1, oc);
const auto nd_idx_z2y2x2 = make_int4(z2, y2, x2, oc);
const auto idx_lz0y0x0 = device_4d_to_flat(nd_idx_z1y1x1, istride);
const auto idx_lz0y0x1 = device_4d_to_flat(nd_idx_z1y1x2, istride);
const auto idx_lz0y1x0 = device_4d_to_flat(nd_idx_z1y2x1, istride);
const auto idx_lz0y1x1 = device_4d_to_flat(nd_idx_z1y2x2, istride);
const auto idx_lz1y0x0 = device_4d_to_flat(nd_idx_z2y1x1, istride);
const auto idx_lz1y0x1 = device_4d_to_flat(nd_idx_z2y1x2, istride);
const auto idx_lz1y1x0 = device_4d_to_flat(nd_idx_z2y2x1, istride);
const auto idx_lz1y1x1 = device_4d_to_flat(nd_idx_z2y2x2, istride);
for (; outer_size--; src += src_inner_size, dst += dst_inner_size) {
const T val0 = lx0 * src[idx_lz0y0x0];
const T val1 = lx1 * src[idx_lz0y0x1];
const T val2 = lx0 * src[idx_lz0y1x0];
const T val3 = lx1 * src[idx_lz0y1x1];
const T val4 = lx0 * src[idx_lz1y0x0];
const T val5 = lx1 * src[idx_lz1y0x1];
const T val6 = lx0 * src[idx_lz1y1x0];
const T val7 = lx1 * src[idx_lz1y1x1];
const T val8 = ly0 * (val0 + val1) + ly1 * (val2 + val3);
const T val9 = ly0 * (val4 + val5) + ly1 * (val6 + val7);
dst[index] = lz0 * val8 + lz1 * val9;
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_linear_interpolate_1d_backward(
const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x,
int outer_size, const int ishape, const int istride, const int ostride,
const float sx, const bool align_corners) {
NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) {
const auto nd_index = device_flat_to_2d(index, ostride);
const auto oc = channel_last ? nd_index.y : 0;
const auto ox = nd_index.x;
const auto iw = ishape;
const auto fx = get_src_index(sx, ox, align_corners);
const auto x1 = static_cast<int>(fx);
const auto x2 = min(x1 + 1, iw - 1);
const auto lx1 = static_cast<T>(fx - x1);
const auto lx0 = static_cast<T>(1) - lx1;
const auto nd_idx_x1 = make_int2(x1, oc);
const auto nd_idx_x2 = make_int2(x2, oc);
const auto idx_lx1 = device_2d_to_flat(nd_idx_x1, istride);
const auto idx_lx2 = device_2d_to_flat(nd_idx_x2, istride);
for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) {
const T g = g_y[index];
atomic_add(g_x + idx_lx1, lx0 * g);
atomic_add(g_x + idx_lx2, lx1 * g);
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_linear_interpolate_2d_backward(
const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x,
int outer_size, const int2 ishape, const int2 istride, const int2 ostride,
const float sx, const float sy, const bool align_corners) {
NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) {
const auto nd_index = device_flat_to_3d(index, ostride);
const auto oc = channel_last ? nd_index.z : 0;
const auto oy = nd_index.x;
const auto ox = nd_index.y;
const auto ih = ishape.x;
const auto iw = ishape.y;
const auto fy = get_src_index(sy, oy, align_corners);
const auto y1 = static_cast<int>(fy);
const auto y2 = min(y1 + 1, ih - 1);
const auto ly1 = static_cast<T>(fy - y1);
const auto ly0 = static_cast<T>(1) - ly1;
const auto fx = get_src_index(sx, ox, align_corners);
const auto x1 = static_cast<int>(fx);
const auto x2 = min(x1 + 1, iw - 1);
const auto lx1 = static_cast<T>(fx - x1);
const auto lx0 = static_cast<T>(1) - lx1;
const auto nd_idx_y1x1 = make_int3(y1, x1, oc);
const auto nd_idx_y1x2 = make_int3(y1, x2, oc);
const auto nd_idx_y2x1 = make_int3(y2, x1, oc);
const auto nd_idx_y2x2 = make_int3(y2, x2, oc);
const auto idx_ly0x0 = device_3d_to_flat(nd_idx_y1x1, istride);
const auto idx_ly0x1 = device_3d_to_flat(nd_idx_y1x2, istride);
const auto idx_ly1x0 = device_3d_to_flat(nd_idx_y2x1, istride);
const auto idx_ly1x1 = device_3d_to_flat(nd_idx_y2x2, istride);
for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) {
const T g = g_y[index];
atomic_add(g_x + idx_ly0x0, ly0 * lx0 * g);
atomic_add(g_x + idx_ly0x1, ly0 * lx1 * g);
atomic_add(g_x + idx_ly1x0, ly1 * lx0 * g);
atomic_add(g_x + idx_ly1x1, ly1 * lx1 * g);
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_linear_interpolate_3d_backward(
const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x,
int outer_size, const int3 ishape, const int3 istride, const int3 ostride,
const float sx, const float sy, const float sz, const bool align_corners) {
NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) {
const auto nd_index = device_flat_to_4d(index, ostride);
const auto oc = channel_last ? nd_index.w : 0;
const auto oz = nd_index.x;
const auto oy = nd_index.y;
const auto ox = nd_index.z;
const auto id = ishape.x;
const auto ih = ishape.y;
const auto iw = ishape.z;
const auto fz = get_src_index(sz, oz, align_corners);
const auto z1 = static_cast<int>(fz);
const auto z2 = min(z1 + 1, id - 1);
const auto lz1 = static_cast<T>(fz - z1);
const auto lz0 = static_cast<T>(1) - lz1;
const auto fy = get_src_index(sy, oy, align_corners);
const auto y1 = static_cast<int>(fy);
const auto y2 = min(y1 + 1, ih - 1);
const auto ly1 = static_cast<T>(fy - y1);
const auto ly0 = static_cast<T>(1) - ly1;
const auto fx = get_src_index(sx, ox, align_corners);
const auto x1 = static_cast<int>(fx);
const auto x2 = min(x1 + 1, iw - 1);
const auto lx1 = static_cast<T>(fx - x1);
const auto lx0 = static_cast<T>(1) - lx1;
const auto nd_idx_z1y1x1 = make_int4(z1, y1, x1, oc);
const auto nd_idx_z1y1x2 = make_int4(z1, y1, x2, oc);
const auto nd_idx_z1y2x1 = make_int4(z1, y2, x1, oc);
const auto nd_idx_z1y2x2 = make_int4(z1, y2, x2, oc);
const auto nd_idx_z2y1x1 = make_int4(z2, y1, x1, oc);
const auto nd_idx_z2y1x2 = make_int4(z2, y1, x2, oc);
const auto nd_idx_z2y2x1 = make_int4(z2, y2, x1, oc);
const auto nd_idx_z2y2x2 = make_int4(z2, y2, x2, oc);
const auto idx_lz0y0x0 = device_4d_to_flat(nd_idx_z1y1x1, istride);
const auto idx_lz0y0x1 = device_4d_to_flat(nd_idx_z1y1x2, istride);
const auto idx_lz0y1x0 = device_4d_to_flat(nd_idx_z1y2x1, istride);
const auto idx_lz0y1x1 = device_4d_to_flat(nd_idx_z1y2x2, istride);
const auto idx_lz1y0x0 = device_4d_to_flat(nd_idx_z2y1x1, istride);
const auto idx_lz1y0x1 = device_4d_to_flat(nd_idx_z2y1x2, istride);
const auto idx_lz1y1x0 = device_4d_to_flat(nd_idx_z2y2x1, istride);
const auto idx_lz1y1x1 = device_4d_to_flat(nd_idx_z2y2x2, istride);
for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) {
const T g = g_y[index];
atomic_add(g_x + idx_lz0y0x0, lz0 * ly0 * lx0 * g);
atomic_add(g_x + idx_lz0y0x1, lz0 * ly0 * lx1 * g);
atomic_add(g_x + idx_lz0y1x0, lz0 * ly1 * lx0 * g);
atomic_add(g_x + idx_lz0y1x1, lz0 * ly1 * lx1 * g);
atomic_add(g_x + idx_lz1y0x0, lz1 * ly0 * lx0 * g);
atomic_add(g_x + idx_lz1y0x1, lz1 * ly0 * lx1 * g);
atomic_add(g_x + idx_lz1y1x0, lz1 * ly1 * lx0 * g);
atomic_add(g_x + idx_lz1y1x1, lz1 * ly1 * lx1 * g);
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_nearest_interpolate_1d(
const int dst_inner_size, T *dst, const int src_inner_size, const T *src,
int outer_size, const int ishape, const int istride, const int ostride,
const float sx) {
NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) {
const auto nd_index = device_flat_to_2d(index, ostride);
const auto oc = channel_last ? nd_index.y : 0;
const auto ox = nd_index.x;
const auto iw = ishape;
const auto ix = min(static_cast<int>(sx * (ox + 0.5f)), iw - 1);
const auto nd_idx_x = make_int2(ix, oc);
const auto idx_x = device_2d_to_flat(nd_idx_x, istride);
for (; outer_size--; src += src_inner_size, dst += dst_inner_size) {
dst[index] = src[idx_x];
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_nearest_interpolate_2d(
const int dst_inner_size, T *dst, const int src_inner_size, const T *src,
int outer_size, const int2 ishape, const int2 istride, const int2 ostride,
const float sx, const float sy) {
NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) {
const auto nd_index = device_flat_to_3d(index, ostride);
const auto oc = channel_last ? nd_index.z : 0;
const auto oy = nd_index.x;
const auto ox = nd_index.y;
const auto ih = ishape.x;
const auto iw = ishape.y;
const auto iy = min(static_cast<int>(sy * (oy + 0.5f)), ih - 1);
const auto ix = min(static_cast<int>(sx * (ox + 0.5f)), iw - 1);
const auto nd_idx_yx = make_int3(iy, ix, oc);
const auto idx_yx = device_3d_to_flat(nd_idx_yx, istride);
for (; outer_size--; src += src_inner_size, dst += dst_inner_size) {
dst[index] = src[idx_yx];
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_nearest_interpolate_3d(
const int dst_inner_size, T *dst, const int src_inner_size, const T *src,
int outer_size, const int3 ishape, const int3 istride, const int3 ostride,
const float sx, const float sy, const float sz) {
NBLA_CUDA_KERNEL_LOOP(index, dst_inner_size) {
const auto nd_index = device_flat_to_4d(index, ostride);
const auto oc = channel_last ? nd_index.w : 0;
const auto oz = nd_index.x;
const auto oy = nd_index.y;
const auto ox = nd_index.z;
const auto id = ishape.x;
const auto ih = ishape.y;
const auto iw = ishape.z;
const auto iz = min(static_cast<int>(sz * (oz + 0.5f)), id - 1);
const auto iy = min(static_cast<int>(sy * (oy + 0.5f)), ih - 1);
const auto ix = min(static_cast<int>(sx * (ox + 0.5f)), iw - 1);
const auto nd_idx_zyx = make_int4(iz, iy, ix, oc);
const auto idx_zyx = device_4d_to_flat(nd_idx_zyx, istride);
for (; outer_size--; src += src_inner_size, dst += dst_inner_size) {
dst[index] = src[idx_zyx];
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_nearest_interpolate_1d_backward(
const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x,
int outer_size, const int ishape, const int istride, const int ostride,
const float sx) {
NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) {
const auto nd_index = device_flat_to_2d(index, ostride);
const auto oc = channel_last ? nd_index.y : 0;
const auto ox = nd_index.x;
const auto iw = ishape;
const auto ix = min(static_cast<int>(sx * (ox + 0.5f)), iw - 1);
const auto nd_idx_x = make_int2(ix, oc);
const auto idx_x = device_2d_to_flat(nd_idx_x, istride);
for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) {
atomic_add(g_x + idx_x, g_y[index]);
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_nearest_interpolate_2d_backward(
const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x,
int outer_size, const int2 ishape, const int2 istride, const int2 ostride,
const float sx, const float sy) {
NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) {
const auto nd_index = device_flat_to_3d(index, ostride);
const auto oc = channel_last ? nd_index.z : 0;
const auto oy = nd_index.x;
const auto ox = nd_index.y;
const auto ih = ishape.x;
const auto iw = ishape.y;
const auto iy = min(static_cast<int>(sy * (oy + 0.5f)), ih - 1);
const auto ix = min(static_cast<int>(sx * (ox + 0.5f)), iw - 1);
const auto nd_idx_yx = make_int3(iy, ix, oc);
const auto idx_yx = device_3d_to_flat(nd_idx_yx, istride);
for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) {
atomic_add(g_x + idx_yx, g_y[index]);
}
}
}
template <typename T, bool channel_last = false>
__global__ void kernel_nearest_interpolate_3d_backward(
const int g_y_inner_size, const T *g_y, const int g_x_inner_size, T *g_x,
int outer_size, const int3 ishape, const int3 istride, const int3 ostride,
const float sx, const float sy, const float sz) {
NBLA_CUDA_KERNEL_LOOP(index, g_y_inner_size) {
const auto nd_index = device_flat_to_4d(index, ostride);
const auto oc = channel_last ? nd_index.w : 0;
const auto oz = nd_index.x;
const auto oy = nd_index.y;
const auto ox = nd_index.z;
const auto id = ishape.x;
const auto ih = ishape.y;
const auto iw = ishape.z;
const auto iz = min(static_cast<int>(sz * (oz + 0.5f)), id - 1);
const auto iy = min(static_cast<int>(sy * (oy + 0.5f)), ih - 1);
const auto ix = min(static_cast<int>(sx * (ox + 0.5f)), iw - 1);
const auto nd_idx_zyx = make_int4(iz, iy, ix, oc);
const auto idx_zyx = device_4d_to_flat(nd_idx_zyx, istride);
for (; outer_size--; g_x += g_x_inner_size, g_y += g_y_inner_size) {
atomic_add(g_x + idx_zyx, g_y[index]);
}
}
}
template <typename T>
void InterpolateCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
auto src = inputs[0]->get_data_pointer<Tcu>(this->ctx_);
auto dst = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true);
const int ndim = inputs[0]->ndim();
if (this->output_size_.size() == 1) {
const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1]
: inputs[0]->shape()[ndim - 2];
const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2]
: inputs[0]->shape()[ndim - 1];
const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1]
: outputs[0]->shape()[ndim - 2];
const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2]
: outputs[0]->shape()[ndim - 1];
const int src_inner_size = this->channel_last_ ? ic * iw : iw;
const int dst_inner_size = this->channel_last_ ? oc * ow : ow;
const int outer_size = inputs[0]->size() / src_inner_size;
const auto ishape = iw;
const auto istride = this->channel_last_ ? ic : 1;
const auto ostride = this->channel_last_ ? oc : 1;
if (this->mode_ == "linear") {
const float sx = compute_scale(iw, ow, this->align_corners_);
auto kernel = this->channel_last_
? kernel_linear_interpolate_1d<Tcu, true>
: kernel_linear_interpolate_1d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape,
istride, ostride, sx, this->align_corners_);
} else if (this->mode_ == "nearest") {
const float sx = iw / static_cast<float>(ow);
auto kernel = this->channel_last_
? kernel_nearest_interpolate_1d<Tcu, true>
: kernel_nearest_interpolate_1d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, dst_inner_size, dst,
src_inner_size, src, outer_size, ishape,
istride, ostride, sx);
}
}
else if (this->output_size_.size() == 2) {
const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1]
: inputs[0]->shape()[ndim - 3];
const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3]
: inputs[0]->shape()[ndim - 2];
const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2]
: inputs[0]->shape()[ndim - 1];
const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1]
: outputs[0]->shape()[ndim - 3];
const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3]
: outputs[0]->shape()[ndim - 2];
const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2]
: outputs[0]->shape()[ndim - 1];
const int src_inner_size = this->channel_last_ ? ic * iw * ih : iw * ih;
const int dst_inner_size = this->channel_last_ ? oc * ow * oh : ow * oh;
const int outer_size = inputs[0]->size() / src_inner_size;
const auto ishape = make_int2(ih, iw);
const auto istride =
this->channel_last_ ? make_int2(iw * ic, ic) : make_int2(iw, 1);
const auto ostride =
this->channel_last_ ? make_int2(ow * oc, oc) : make_int2(ow, 1);
if (this->mode_ == "linear") {
const float sx = compute_scale(iw, ow, this->align_corners_);
const float sy = compute_scale(ih, oh, this->align_corners_);
auto kernel = this->channel_last_
? kernel_linear_interpolate_2d<Tcu, true>
: kernel_linear_interpolate_2d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape,
istride, ostride, sx, sy, this->align_corners_);
} else if (this->mode_ == "nearest") {
const float sx = iw / static_cast<float>(ow);
const float sy = ih / static_cast<float>(oh);
auto kernel = this->channel_last_
? kernel_nearest_interpolate_2d<Tcu, true>
: kernel_nearest_interpolate_2d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, dst_inner_size, dst,
src_inner_size, src, outer_size, ishape,
istride, ostride, sx, sy);
}
}
else if (this->output_size_.size() == 3) {
const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1]
: inputs[0]->shape()[ndim - 4];
const int id = this->channel_last_ ? inputs[0]->shape()[ndim - 4]
: inputs[0]->shape()[ndim - 3];
const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3]
: inputs[0]->shape()[ndim - 2];
const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2]
: inputs[0]->shape()[ndim - 1];
const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1]
: outputs[0]->shape()[ndim - 4];
const int od = this->channel_last_ ? outputs[0]->shape()[ndim - 4]
: outputs[0]->shape()[ndim - 3];
const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3]
: outputs[0]->shape()[ndim - 2];
const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2]
: outputs[0]->shape()[ndim - 1];
const int src_inner_size =
this->channel_last_ ? ic * iw * ih * id : iw * ih * id;
const int dst_inner_size =
this->channel_last_ ? oc * ow * oh * od : ow * oh * od;
const int outer_size = inputs[0]->size() / src_inner_size;
const auto ishape = make_int3(id, ih, iw);
const auto istride = this->channel_last_
? make_int3(ih * iw * ic, iw * ic, ic)
: make_int3(ih * iw, iw, 1);
const auto ostride = this->channel_last_
? make_int3(oh * ow * oc, ow * oc, oc)
: make_int3(oh * ow, ow, 1);
if (this->mode_ == "linear") {
const float sx = compute_scale(iw, ow, this->align_corners_);
const float sy = compute_scale(ih, oh, this->align_corners_);
const float sz = compute_scale(id, od, this->align_corners_);
auto kernel = this->channel_last_
? kernel_linear_interpolate_3d<Tcu, true>
: kernel_linear_interpolate_3d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, dst_inner_size, dst, src_inner_size, src, outer_size, ishape,
istride, ostride, sx, sy, sz, this->align_corners_);
} else if (this->mode_ == "nearest") {
const float sx = iw / static_cast<float>(ow);
const float sy = ih / static_cast<float>(oh);
const float sz = id / static_cast<float>(od);
auto kernel = this->channel_last_
? kernel_nearest_interpolate_3d<Tcu, true>
: kernel_nearest_interpolate_3d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, dst_inner_size, dst,
src_inner_size, src, outer_size, ishape,
istride, ostride, sx, sy, sz);
}
}
}
template <typename T>
void InterpolateCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!(propagate_down[0])) {
return;
}
cuda_set_device(this->device_);
auto g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_);
auto g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, false);
const int ndim = inputs[0]->ndim();
if (this->output_size_.size() == 1) {
const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1]
: inputs[0]->shape()[ndim - 2];
const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2]
: inputs[0]->shape()[ndim - 1];
const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1]
: outputs[0]->shape()[ndim - 2];
const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2]
: outputs[0]->shape()[ndim - 1];
const int g_x_inner_size = this->channel_last_ ? ic * iw : iw;
const int g_y_inner_size = this->channel_last_ ? oc * ow : ow;
const int outer_size = inputs[0]->size() / g_x_inner_size;
const auto ishape = iw;
const auto istride = this->channel_last_ ? ic : 1;
const auto ostride = this->channel_last_ ? oc : 1;
if (this->mode_ == "linear") {
const float sx = compute_scale(iw, ow, this->align_corners_);
auto kernel = this->channel_last_
? kernel_linear_interpolate_1d_backward<Tcu, true>
: kernel_linear_interpolate_1d_backward<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape,
istride, ostride, sx, this->align_corners_);
} else if (this->mode_ == "nearest") {
const float sx = iw / static_cast<float>(ow);
auto kernel = this->channel_last_
? kernel_nearest_interpolate_1d_backward<Tcu, true>
: kernel_nearest_interpolate_1d_backward<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, g_y_inner_size, g_y,
g_x_inner_size, g_x, outer_size, ishape,
istride, ostride, sx);
}
}
else if (this->output_size_.size() == 2) {
const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1]
: inputs[0]->shape()[ndim - 3];
const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3]
: inputs[0]->shape()[ndim - 2];
const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2]
: inputs[0]->shape()[ndim - 1];
const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1]
: outputs[0]->shape()[ndim - 3];
const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3]
: outputs[0]->shape()[ndim - 2];
const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2]
: outputs[0]->shape()[ndim - 1];
const int g_x_inner_size = this->channel_last_ ? ic * iw * ih : iw * ih;
const int g_y_inner_size = this->channel_last_ ? oc * ow * oh : ow * oh;
const int outer_size = inputs[0]->size() / g_x_inner_size;
const auto ishape = make_int2(ih, iw);
const auto istride =
this->channel_last_ ? make_int2(iw * ic, ic) : make_int2(iw, 1);
const auto ostride =
this->channel_last_ ? make_int2(ow * oc, oc) : make_int2(ow, 1);
if (this->mode_ == "linear") {
const float sx = compute_scale(iw, ow, this->align_corners_);
const float sy = compute_scale(ih, oh, this->align_corners_);
auto kernel = this->channel_last_
? kernel_linear_interpolate_2d_backward<Tcu, true>
: kernel_linear_interpolate_2d_backward<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape,
istride, ostride, sx, sy, this->align_corners_);
} else if (this->mode_ == "nearest") {
const float sx = iw / static_cast<float>(ow);
const float sy = ih / static_cast<float>(oh);
auto kernel = this->channel_last_
? kernel_nearest_interpolate_2d_backward<Tcu, true>
: kernel_nearest_interpolate_2d_backward<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, g_y_inner_size, g_y,
g_x_inner_size, g_x, outer_size, ishape,
istride, ostride, sx, sy);
}
}
else if (this->output_size_.size() == 3) {
const int ic = this->channel_last_ ? inputs[0]->shape()[ndim - 1]
: inputs[0]->shape()[ndim - 4];
const int id = this->channel_last_ ? inputs[0]->shape()[ndim - 4]
: inputs[0]->shape()[ndim - 3];
const int ih = this->channel_last_ ? inputs[0]->shape()[ndim - 3]
: inputs[0]->shape()[ndim - 2];
const int iw = this->channel_last_ ? inputs[0]->shape()[ndim - 2]
: inputs[0]->shape()[ndim - 1];
const int oc = this->channel_last_ ? outputs[0]->shape()[ndim - 1]
: outputs[0]->shape()[ndim - 4];
const int od = this->channel_last_ ? outputs[0]->shape()[ndim - 4]
: outputs[0]->shape()[ndim - 3];
const int oh = this->channel_last_ ? outputs[0]->shape()[ndim - 3]
: outputs[0]->shape()[ndim - 2];
const int ow = this->channel_last_ ? outputs[0]->shape()[ndim - 2]
: outputs[0]->shape()[ndim - 1];
const int g_x_inner_size =
this->channel_last_ ? ic * iw * ih * id : iw * ih * id;
const int g_y_inner_size =
this->channel_last_ ? oc * ow * oh * od : ow * oh * od;
const int outer_size = inputs[0]->size() / g_x_inner_size;
const auto ishape = make_int3(id, ih, iw);
const auto istride = this->channel_last_
? make_int3(ih * iw * ic, iw * ic, ic)
: make_int3(ih * iw, iw, 1);
const auto ostride = this->channel_last_
? make_int3(oh * ow * oc, ow * oc, oc)
: make_int3(oh * ow, ow, 1);
if (this->mode_ == "linear") {
const float sx = compute_scale(iw, ow, this->align_corners_);
const float sy = compute_scale(ih, oh, this->align_corners_);
const float sz = compute_scale(id, od, this->align_corners_);
auto kernel = this->channel_last_
? kernel_linear_interpolate_3d_backward<Tcu, true>
: kernel_linear_interpolate_3d_backward<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(
kernel, g_y_inner_size, g_y, g_x_inner_size, g_x, outer_size, ishape,
istride, ostride, sx, sy, sz, this->align_corners_);
} else if (this->mode_ == "nearest") {
const float sx = iw / static_cast<float>(ow);
const float sy = ih / static_cast<float>(oh);
const float sz = id / static_cast<float>(od);
auto kernel = this->channel_last_
? kernel_nearest_interpolate_3d_backward<Tcu, true>
: kernel_nearest_interpolate_3d_backward<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel, g_y_inner_size, g_y,
g_x_inner_size, g_x, outer_size, ishape,
istride, ostride, sx, sy, sz);
}
}
}
}
|
69ec65fbf8b49a8eb15f3f6b8d6dee14c60a87df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
The implementation of this file is based on qkvToContext plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Modifications: scaling is moved from masked softmax to the gemm before that.
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <hipcub/hipcub.hpp>
#include <rocblas.h>
#include <hip/hip_fp16.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "attention_impl.h"
using namespace onnxruntime::cuda;
using namespace cub;
namespace onnxruntime {
namespace contrib {
namespace cuda {
static size_t AlignTo(size_t a, size_t b) {
return CeilDiv(a, b) * b;
}
size_t ScratchSize(size_t element_size, int batch_size, int num_heads, int sequence_length) {
const size_t len = batch_size * num_heads * sequence_length * sequence_length;
const size_t bytes = len * element_size;
const size_t alignment = 256;
const size_t bytesAligned = AlignTo(bytes, alignment);
return bytesAligned;
}
size_t GetAttentionWorkspaceSize(size_t element_size, int batch_size, int num_heads, int head_size, int sequence_length) {
size_t qkv_size = 3 * batch_size * sequence_length * num_heads * head_size * element_size;
return qkv_size + 2 * ScratchSize(element_size, batch_size, num_heads, sequence_length);
}
template <typename T, unsigned TPB>
__device__ inline void Softmax(const int ld, const int last_valid, const T* input, T* output) {
using BlockReduce = hipcub::BlockReduce<float, TPB>;
__shared__ typename BlockReduce::TempStorage tmp_storage;
__shared__ float reverse_z;
float thread_data(0);
const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * ld;
for (int i = threadIdx.x; i < last_valid; i += TPB) {
const int index = offset + i;
const float val = input[index];
thread_data += expf(val);
}
hipcub::Sum sum;
const auto z = BlockReduce(tmp_storage).Reduce(thread_data, sum);
if (threadIdx.x == 0) {
reverse_z = 1.f / z;
}
__syncthreads();
for (int i = threadIdx.x; i < ld; i += TPB) {
const int index = offset + i;
const float val = (i < last_valid) ? expf(float(input[index])) * reverse_z : 0.f;
output[index] = T(val);
}
}
template <typename T, unsigned TPB>
__device__ inline void SoftmaxSmall(const int ld, const int last_valid, const T* input, T* output) {
using BlockReduce = hipcub::BlockReduce<float, TPB>;
__shared__ typename BlockReduce::TempStorage tmp_storage;
__shared__ float reverse_z;
float thread_data(0);
const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * ld;
const int index = offset + threadIdx.x;
if (threadIdx.x < last_valid) {
const float val = input[index];
thread_data = expf(val);
}
hipcub::Sum sum;
const auto z = BlockReduce(tmp_storage).Reduce(thread_data, sum);
if (threadIdx.x == 0) {
reverse_z = (1.f) / z;
}
__syncthreads();
if (threadIdx.x < ld) {
// this will be 0 for threadIdx.x >= last_valid
output[index] = T(thread_data * reverse_z);
}
}
template <typename T, unsigned TPB>
__global__ void MaskedSoftmaxKernelSmall(const int sequence_length, const int* mask_index, const T* input, T* output) {
__shared__ int last_valid;
if (threadIdx.x == 0) {
last_valid = min(sequence_length, mask_index[blockIdx.y]);
}
__syncthreads();
SoftmaxSmall<T, TPB>(sequence_length, last_valid, input, output);
}
template <typename T, unsigned TPB>
__global__ void MaskedSoftmaxKernel(const int sequence_length, const int* mask_index, const T* input, T* output) {
__shared__ int last_valid;
if (threadIdx.x == 0) {
last_valid = min(sequence_length, mask_index[blockIdx.y]);
}
__syncthreads();
Softmax<T, TPB>(sequence_length, last_valid, input, output);
}
template <typename T>
bool ComputeMaskedSoftmax(hipStream_t stream, const int sequence_length, const int batch_size, const int num_heads,
const int* mask_index, const T* input, T* output) {
// Mask is of length batch_size and assumes the valid region is contiguous starting
// from the beginning of the sequence
const dim3 grid(sequence_length * num_heads, batch_size, 1);
if (sequence_length <= 32) {
const int blockSize = 32;
hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, sequence_length, mask_index, input, output);
} else if (sequence_length <= 128) {
const int blockSize = 128;
hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, sequence_length, mask_index, input, output);
} else if (sequence_length == 384) {
const int blockSize = 384;
hipLaunchKernelGGL(( MaskedSoftmaxKernelSmall<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, sequence_length, mask_index, input, output);
} else {
const int blockSize = 256;
hipLaunchKernelGGL(( MaskedSoftmaxKernel<T, blockSize>)
, dim3(grid), dim3(blockSize), 0, stream, sequence_length, mask_index, input, output);
}
return CUDA_CALL(hipPeekAtLastError());
}
template <typename T>
__global__ void TransposeCtx(const int H, const T* input, T* output) {
// Input: BxNxSxH
// Output: BxSxNxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int num_heads = blockDim.y;
int sequence_length = gridDim.x;
const int NH = num_heads * H;
const int NHS = NH * sequence_length;
const int in_offset = s * H + n * sequence_length * H + b * NHS;
const int out_offset = n * H + s * NH + b * NHS;
const int i = threadIdx.x;
if (i < H) {
output[out_offset + i] = input[in_offset + i];
}
}
bool LaunchTransCtx(hipStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const float* input, float* output) {
const dim3 grid(sequence_length, batch_size, 1);
if (0 == (head_size & 1)) {
const int H = head_size / 2;
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
const dim3 block(H, num_heads, 1);
hipLaunchKernelGGL(( TransposeCtx<float2>), dim3(grid), dim3(block), 0, stream, H, input2, output2);
} else {
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( TransposeCtx<float>), dim3(grid), dim3(block), 0, stream, head_size, input, output);
}
return CUDA_CALL(hipPeekAtLastError());
}
bool LaunchTransCtx(hipStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const half* input, half* output) {
const dim3 grid(sequence_length, batch_size, 1);
if (0 == (head_size % 4)) {
const int H = head_size / 4;
const dim3 block(H, num_heads, 1);
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
hipLaunchKernelGGL(( TransposeCtx<float2>), dim3(grid), dim3(block), 0, stream, H, input2, output2);
} else if (0 == (head_size & 1)) {
const int H = head_size / 2;
const dim3 block(H, num_heads, 1);
const half2* input2 = reinterpret_cast<const half2*>(input);
half2* output2 = reinterpret_cast<half2*>(output);
hipLaunchKernelGGL(( TransposeCtx<half2>), dim3(grid), dim3(block), 0, stream, H, input2, output2);
} else { // this should be an "odd" case. probably not worth catching it in the half2 kernel.
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( TransposeCtx<half>), dim3(grid), dim3(block), 0, stream, head_size, input, output);
}
return CUDA_CALL(hipPeekAtLastError());
}
template <typename T>
__global__ void TransposeQKV(const int H, const T* input, T* output) {
// Input: BxSx3xNxH
// Output: 3xBxNxSxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int NH = num_heads * H;
const int NHS = NH * sequence_length;
const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3;
const int out_offset = s * H + n * sequence_length * H + b * NHS + m * NHS * batch_size;
const int i = threadIdx.x;
if (i < H) {
output[out_offset + i] = input[in_offset + i];
}
}
bool LaunchTransQkv(hipStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const float* input, float* output) {
const dim3 grid(sequence_length, batch_size, 3);
if (0 == (head_size & 1)) {
const int H = head_size / 2;
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
const dim3 block(H, num_heads, 1);
hipLaunchKernelGGL(( TransposeQKV<float2>), dim3(grid), dim3(block), 0, stream, H, input2, output2);
} else {
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( TransposeQKV<float>), dim3(grid), dim3(block), 0, stream, head_size, input, output);
}
return CUDA_CALL(hipPeekAtLastError());
}
bool LaunchTransQkv(hipStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const half* input, half* output) {
const dim3 grid(sequence_length, batch_size, 3);
if (0 == (head_size % 4)) {
const int H = head_size / 4;
const dim3 block(H, num_heads, 1);
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
hipLaunchKernelGGL(( TransposeQKV<float2>), dim3(grid), dim3(block), 0, stream, H, input2, output2);
} else if (0 == (head_size & 1)) {
const int H = head_size / 2;
const dim3 block(H, num_heads, 1);
const half2* input2 = reinterpret_cast<const half2*>(input);
half2* output2 = reinterpret_cast<half2*>(output);
hipLaunchKernelGGL(( TransposeQKV<half2>), dim3(grid), dim3(block), 0, stream, H, input2, output2);
} else { // this should be an "odd" case. probably not worth catching it in the half2 kernel..
const dim3 block(head_size, num_heads, 1);
hipLaunchKernelGGL(( TransposeQKV<half>), dim3(grid), dim3(block), 0, stream, head_size, input, output);
}
return CUDA_CALL(hipPeekAtLastError());
}
hipblasStatus_t inline CublasGemmStridedBatched(
hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k, const float alpha,
const float* A, int lda, long long int strideA, const float* B, int ldb, long long int strideB,
const float beta, float* C, int ldc, long long int strideC, int batchCount) {
return hipblasSgemmStridedBatched(
handle, transa, transb, m, n, k, &alpha, A, lda, strideA, B, ldb, strideB, &beta, C, ldc, strideC, batchCount);
}
hipblasStatus_t inline CublasGemmStridedBatched(
hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k, const half alpha,
const half* A, int lda, long long int strideA, const half* B, int ldb, long long int strideB,
const half beta, half* C, int ldc, long long int strideC, int batchCount) {
return hipblasHgemmStridedBatched(
handle, transa, transb, m, n, k, &alpha, A, lda, strideA, B, ldb, strideB, &beta, C, ldc, strideC, batchCount);
}
struct CublasConfigHelper {
hipblasPointerMode_t pointer_mode_;
cublasMath_t math_mode_;
hipblasHandle_t cublas_;
CublasConfigHelper(hipblasHandle_t cublas)
: cublas_(cublas) {
hipblasGetPointerMode(cublas_, &pointer_mode_);
cublasGetMathMode(cublas_, &math_mode_);
hipblasSetPointerMode(cublas_, HIPBLAS_POINTER_MODE_HOST);
cublasSetMathMode(cublas_, CUBLAS_TENSOR_OP_MATH);
}
~CublasConfigHelper() {
cublasSetMathMode(cublas_, math_mode_);
hipblasSetPointerMode(cublas_, pointer_mode_);
}
};
template <typename T>
bool QkvToContext(
hipblasHandle_t& cublas, hipStream_t stream,
const int batch_size, const int sequence_length, const int num_heads, const int head_size, const size_t element_size,
const T* input, T* output, T* workspace,
const int* mask_index) {
const size_t bytes = ScratchSize(element_size, batch_size, num_heads, sequence_length);
T* scratch1 = workspace;
T* scratch2 = scratch1 + (bytes / element_size);
T* scratch3 = scratch2 + (bytes / element_size);
// input should be BxSx3xNxH => scratch3: 3xBxNxSxH
if (!LaunchTransQkv(stream, sequence_length, batch_size, head_size, num_heads, input, scratch3)) {
return false;
}
// now scratch3 has Q, K, V: each has size BxNxSxH
const int batches = batch_size * num_heads;
const int size_per_batch = sequence_length * head_size;
const int total_size = batches * size_per_batch;
const int temp_matrix_size = sequence_length * sequence_length;
const T* q = scratch3;
const T* k = q + total_size;
const T* v = k + total_size;
hipblasSetStream(cublas, stream);
CublasConfigHelper helper(cublas);
// compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxS
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
if (!CUBLAS_CALL(CublasGemmStridedBatched(
cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, sequence_length, sequence_length, head_size, rsqrt_head_size, k, head_size, size_per_batch,
q, head_size, size_per_batch, 0.f, scratch1, sequence_length, temp_matrix_size, batches))) {
return false;
}
// apply softmax and store result P to scratch2: BxNxSxS
if (!ComputeMaskedSoftmax<T>(stream, sequence_length, batch_size, num_heads, mask_index, scratch1, scratch2)) {
return false;
}
// compute P*V (as V*P), and store in scratch3: BxNxSxH
if (!CUBLAS_CALL(CublasGemmStridedBatched(
cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, head_size, sequence_length, sequence_length, 1.f, v, head_size, size_per_batch,
scratch2, sequence_length, temp_matrix_size, 0.f, scratch3, head_size, size_per_batch, batches))) {
return false;
}
// scratch3 is BxNxSxH, transpose to output BxSxNxH
return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads, scratch3, output);
}
bool LaunchAttentionKernel(
const void* input,
const int* mask_index,
void* output,
const int batch_size,
const int sequence_length,
const int num_heads,
const int head_size,
void* workspace,
hipblasHandle_t& cublas,
const size_t element_size) {
// use default stream
const hipStream_t stream = nullptr;
if (element_size == 2) {
return QkvToContext(cublas, stream,
batch_size, sequence_length, num_heads, head_size, element_size,
reinterpret_cast<const half*>(input), reinterpret_cast<half*>(output), reinterpret_cast<half*>(workspace),
mask_index);
} else {
return QkvToContext(cublas, stream,
batch_size, sequence_length, num_heads, head_size, element_size,
reinterpret_cast<const float*>(input), reinterpret_cast<float*>(output), reinterpret_cast<float*>(workspace),
mask_index);
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 69ec65fbf8b49a8eb15f3f6b8d6dee14c60a87df.cu | /*
The implementation of this file is based on qkvToContext plugin in TensorRT demo:
https://github.com/NVIDIA/TensorRT/tree/release/5.1/demo/BERT/
Copyright 2019 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Modifications: scaling is moved from masked softmax to the gemm before that.
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cub/cub.cuh>
#include <cublas_v2.h>
#include <cuda_fp16.h>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "attention_impl.h"
using namespace onnxruntime::cuda;
using namespace cub;
namespace onnxruntime {
namespace contrib {
namespace cuda {
static size_t AlignTo(size_t a, size_t b) {
return CeilDiv(a, b) * b;
}
size_t ScratchSize(size_t element_size, int batch_size, int num_heads, int sequence_length) {
const size_t len = batch_size * num_heads * sequence_length * sequence_length;
const size_t bytes = len * element_size;
const size_t alignment = 256;
const size_t bytesAligned = AlignTo(bytes, alignment);
return bytesAligned;
}
size_t GetAttentionWorkspaceSize(size_t element_size, int batch_size, int num_heads, int head_size, int sequence_length) {
size_t qkv_size = 3 * batch_size * sequence_length * num_heads * head_size * element_size;
return qkv_size + 2 * ScratchSize(element_size, batch_size, num_heads, sequence_length);
}
template <typename T, unsigned TPB>
__device__ inline void Softmax(const int ld, const int last_valid, const T* input, T* output) {
using BlockReduce = cub::BlockReduce<float, TPB>;
__shared__ typename BlockReduce::TempStorage tmp_storage;
__shared__ float reverse_z;
float thread_data(0);
const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * ld;
for (int i = threadIdx.x; i < last_valid; i += TPB) {
const int index = offset + i;
const float val = input[index];
thread_data += expf(val);
}
cub::Sum sum;
const auto z = BlockReduce(tmp_storage).Reduce(thread_data, sum);
if (threadIdx.x == 0) {
reverse_z = 1.f / z;
}
__syncthreads();
for (int i = threadIdx.x; i < ld; i += TPB) {
const int index = offset + i;
const float val = (i < last_valid) ? expf(float(input[index])) * reverse_z : 0.f;
output[index] = T(val);
}
}
template <typename T, unsigned TPB>
__device__ inline void SoftmaxSmall(const int ld, const int last_valid, const T* input, T* output) {
using BlockReduce = cub::BlockReduce<float, TPB>;
__shared__ typename BlockReduce::TempStorage tmp_storage;
__shared__ float reverse_z;
float thread_data(0);
const int offset = (blockIdx.y * gridDim.x + blockIdx.x) * ld;
const int index = offset + threadIdx.x;
if (threadIdx.x < last_valid) {
const float val = input[index];
thread_data = expf(val);
}
cub::Sum sum;
const auto z = BlockReduce(tmp_storage).Reduce(thread_data, sum);
if (threadIdx.x == 0) {
reverse_z = (1.f) / z;
}
__syncthreads();
if (threadIdx.x < ld) {
// this will be 0 for threadIdx.x >= last_valid
output[index] = T(thread_data * reverse_z);
}
}
template <typename T, unsigned TPB>
__global__ void MaskedSoftmaxKernelSmall(const int sequence_length, const int* mask_index, const T* input, T* output) {
__shared__ int last_valid;
if (threadIdx.x == 0) {
last_valid = min(sequence_length, mask_index[blockIdx.y]);
}
__syncthreads();
SoftmaxSmall<T, TPB>(sequence_length, last_valid, input, output);
}
template <typename T, unsigned TPB>
__global__ void MaskedSoftmaxKernel(const int sequence_length, const int* mask_index, const T* input, T* output) {
__shared__ int last_valid;
if (threadIdx.x == 0) {
last_valid = min(sequence_length, mask_index[blockIdx.y]);
}
__syncthreads();
Softmax<T, TPB>(sequence_length, last_valid, input, output);
}
template <typename T>
bool ComputeMaskedSoftmax(cudaStream_t stream, const int sequence_length, const int batch_size, const int num_heads,
const int* mask_index, const T* input, T* output) {
// Mask is of length batch_size and assumes the valid region is contiguous starting
// from the beginning of the sequence
const dim3 grid(sequence_length * num_heads, batch_size, 1);
if (sequence_length <= 32) {
const int blockSize = 32;
MaskedSoftmaxKernelSmall<T, blockSize>
<<<grid, blockSize, 0, stream>>>(sequence_length, mask_index, input, output);
} else if (sequence_length <= 128) {
const int blockSize = 128;
MaskedSoftmaxKernelSmall<T, blockSize>
<<<grid, blockSize, 0, stream>>>(sequence_length, mask_index, input, output);
} else if (sequence_length == 384) {
const int blockSize = 384;
MaskedSoftmaxKernelSmall<T, blockSize>
<<<grid, blockSize, 0, stream>>>(sequence_length, mask_index, input, output);
} else {
const int blockSize = 256;
MaskedSoftmaxKernel<T, blockSize>
<<<grid, blockSize, 0, stream>>>(sequence_length, mask_index, input, output);
}
return CUDA_CALL(cudaPeekAtLastError());
}
template <typename T>
__global__ void TransposeCtx(const int H, const T* input, T* output) {
// Input: BxNxSxH
// Output: BxSxNxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int num_heads = blockDim.y;
int sequence_length = gridDim.x;
const int NH = num_heads * H;
const int NHS = NH * sequence_length;
const int in_offset = s * H + n * sequence_length * H + b * NHS;
const int out_offset = n * H + s * NH + b * NHS;
const int i = threadIdx.x;
if (i < H) {
output[out_offset + i] = input[in_offset + i];
}
}
bool LaunchTransCtx(cudaStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const float* input, float* output) {
const dim3 grid(sequence_length, batch_size, 1);
if (0 == (head_size & 1)) {
const int H = head_size / 2;
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
const dim3 block(H, num_heads, 1);
TransposeCtx<float2><<<grid, block, 0, stream>>>(H, input2, output2);
} else {
const dim3 block(head_size, num_heads, 1);
TransposeCtx<float><<<grid, block, 0, stream>>>(head_size, input, output);
}
return CUDA_CALL(cudaPeekAtLastError());
}
bool LaunchTransCtx(cudaStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const half* input, half* output) {
const dim3 grid(sequence_length, batch_size, 1);
if (0 == (head_size % 4)) {
const int H = head_size / 4;
const dim3 block(H, num_heads, 1);
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
TransposeCtx<float2><<<grid, block, 0, stream>>>(H, input2, output2);
} else if (0 == (head_size & 1)) {
const int H = head_size / 2;
const dim3 block(H, num_heads, 1);
const half2* input2 = reinterpret_cast<const half2*>(input);
half2* output2 = reinterpret_cast<half2*>(output);
TransposeCtx<half2><<<grid, block, 0, stream>>>(H, input2, output2);
} else { // this should be an "odd" case. probably not worth catching it in the half2 kernel.
const dim3 block(head_size, num_heads, 1);
TransposeCtx<half><<<grid, block, 0, stream>>>(head_size, input, output);
}
return CUDA_CALL(cudaPeekAtLastError());
}
template <typename T>
__global__ void TransposeQKV(const int H, const T* input, T* output) {
// Input: BxSx3xNxH
// Output: 3xBxNxSxH
int n = threadIdx.y;
int s = blockIdx.x;
int b = blockIdx.y;
int m = blockIdx.z; // matrix id
const int num_heads = blockDim.y;
const int sequence_length = gridDim.x;
const int batch_size = gridDim.y;
const int NH = num_heads * H;
const int NHS = NH * sequence_length;
const int in_offset = n * H + m * NH + s * 3 * NH + b * NHS * 3;
const int out_offset = s * H + n * sequence_length * H + b * NHS + m * NHS * batch_size;
const int i = threadIdx.x;
if (i < H) {
output[out_offset + i] = input[in_offset + i];
}
}
bool LaunchTransQkv(cudaStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const float* input, float* output) {
const dim3 grid(sequence_length, batch_size, 3);
if (0 == (head_size & 1)) {
const int H = head_size / 2;
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
const dim3 block(H, num_heads, 1);
TransposeQKV<float2><<<grid, block, 0, stream>>>(H, input2, output2);
} else {
const dim3 block(head_size, num_heads, 1);
TransposeQKV<float><<<grid, block, 0, stream>>>(head_size, input, output);
}
return CUDA_CALL(cudaPeekAtLastError());
}
bool LaunchTransQkv(cudaStream_t stream,
const int sequence_length, const int batch_size, const int head_size, const int num_heads,
const half* input, half* output) {
const dim3 grid(sequence_length, batch_size, 3);
if (0 == (head_size % 4)) {
const int H = head_size / 4;
const dim3 block(H, num_heads, 1);
const float2* input2 = reinterpret_cast<const float2*>(input);
float2* output2 = reinterpret_cast<float2*>(output);
TransposeQKV<float2><<<grid, block, 0, stream>>>(H, input2, output2);
} else if (0 == (head_size & 1)) {
const int H = head_size / 2;
const dim3 block(H, num_heads, 1);
const half2* input2 = reinterpret_cast<const half2*>(input);
half2* output2 = reinterpret_cast<half2*>(output);
TransposeQKV<half2><<<grid, block, 0, stream>>>(H, input2, output2);
} else { // this should be an "odd" case. probably not worth catching it in the half2 kernel..
const dim3 block(head_size, num_heads, 1);
TransposeQKV<half><<<grid, block, 0, stream>>>(head_size, input, output);
}
return CUDA_CALL(cudaPeekAtLastError());
}
cublasStatus_t inline CublasGemmStridedBatched(
cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k, const float alpha,
const float* A, int lda, long long int strideA, const float* B, int ldb, long long int strideB,
const float beta, float* C, int ldc, long long int strideC, int batchCount) {
return cublasSgemmStridedBatched(
handle, transa, transb, m, n, k, &alpha, A, lda, strideA, B, ldb, strideB, &beta, C, ldc, strideC, batchCount);
}
cublasStatus_t inline CublasGemmStridedBatched(
cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k, const half alpha,
const half* A, int lda, long long int strideA, const half* B, int ldb, long long int strideB,
const half beta, half* C, int ldc, long long int strideC, int batchCount) {
return cublasHgemmStridedBatched(
handle, transa, transb, m, n, k, &alpha, A, lda, strideA, B, ldb, strideB, &beta, C, ldc, strideC, batchCount);
}
struct CublasConfigHelper {
cublasPointerMode_t pointer_mode_;
cublasMath_t math_mode_;
cublasHandle_t cublas_;
CublasConfigHelper(cublasHandle_t cublas)
: cublas_(cublas) {
cublasGetPointerMode(cublas_, &pointer_mode_);
cublasGetMathMode(cublas_, &math_mode_);
cublasSetPointerMode(cublas_, CUBLAS_POINTER_MODE_HOST);
cublasSetMathMode(cublas_, CUBLAS_TENSOR_OP_MATH);
}
~CublasConfigHelper() {
cublasSetMathMode(cublas_, math_mode_);
cublasSetPointerMode(cublas_, pointer_mode_);
}
};
template <typename T>
bool QkvToContext(
cublasHandle_t& cublas, cudaStream_t stream,
const int batch_size, const int sequence_length, const int num_heads, const int head_size, const size_t element_size,
const T* input, T* output, T* workspace,
const int* mask_index) {
const size_t bytes = ScratchSize(element_size, batch_size, num_heads, sequence_length);
T* scratch1 = workspace;
T* scratch2 = scratch1 + (bytes / element_size);
T* scratch3 = scratch2 + (bytes / element_size);
// input should be BxSx3xNxH => scratch3: 3xBxNxSxH
if (!LaunchTransQkv(stream, sequence_length, batch_size, head_size, num_heads, input, scratch3)) {
return false;
}
// now scratch3 has Q, K, V: each has size BxNxSxH
const int batches = batch_size * num_heads;
const int size_per_batch = sequence_length * head_size;
const int total_size = batches * size_per_batch;
const int temp_matrix_size = sequence_length * sequence_length;
const T* q = scratch3;
const T* k = q + total_size;
const T* v = k + total_size;
cublasSetStream(cublas, stream);
CublasConfigHelper helper(cublas);
// compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scratch1: BxNxSxS
const float rsqrt_head_size = 1.f / sqrt(static_cast<float>(head_size));
if (!CUBLAS_CALL(CublasGemmStridedBatched(
cublas, CUBLAS_OP_T, CUBLAS_OP_N, sequence_length, sequence_length, head_size, rsqrt_head_size, k, head_size, size_per_batch,
q, head_size, size_per_batch, 0.f, scratch1, sequence_length, temp_matrix_size, batches))) {
return false;
}
// apply softmax and store result P to scratch2: BxNxSxS
if (!ComputeMaskedSoftmax<T>(stream, sequence_length, batch_size, num_heads, mask_index, scratch1, scratch2)) {
return false;
}
// compute P*V (as V*P), and store in scratch3: BxNxSxH
if (!CUBLAS_CALL(CublasGemmStridedBatched(
cublas, CUBLAS_OP_N, CUBLAS_OP_N, head_size, sequence_length, sequence_length, 1.f, v, head_size, size_per_batch,
scratch2, sequence_length, temp_matrix_size, 0.f, scratch3, head_size, size_per_batch, batches))) {
return false;
}
// scratch3 is BxNxSxH, transpose to output BxSxNxH
return LaunchTransCtx(stream, sequence_length, batch_size, head_size, num_heads, scratch3, output);
}
bool LaunchAttentionKernel(
const void* input,
const int* mask_index,
void* output,
const int batch_size,
const int sequence_length,
const int num_heads,
const int head_size,
void* workspace,
cublasHandle_t& cublas,
const size_t element_size) {
// use default stream
const cudaStream_t stream = nullptr;
if (element_size == 2) {
return QkvToContext(cublas, stream,
batch_size, sequence_length, num_heads, head_size, element_size,
reinterpret_cast<const half*>(input), reinterpret_cast<half*>(output), reinterpret_cast<half*>(workspace),
mask_index);
} else {
return QkvToContext(cublas, stream,
batch_size, sequence_length, num_heads, head_size, element_size,
reinterpret_cast<const float*>(input), reinterpret_cast<float*>(output), reinterpret_cast<float*>(workspace),
mask_index);
}
}
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
a22ab91d827daed961238a14cbac0e628b8a399c.hip | // !!! This is a file automatically generated by hipify!!!
#include <Timer.hpp>
#include <iostream>
#include <iomanip>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
using LOFAR::NSTimer;
using std::cout;
using std::cerr;
using std::endl;
using std::fixed;
using std::setprecision;
/* Utility function/macro, used to do error checking.
Use this function/macro like this:
checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(hipGetLastError());
*/
#define checkCudaCall(result) { \
if (result != hipSuccess){ \
cerr << "cuda error: " << hipGetErrorString(result); \
cerr << " in " << __FILE__ << " at line "<< __LINE__<<endl; \
exit(1); \
} \
}
__constant__ float filter_sum = 35.0f;
__constant__ float filterCuda[] = { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 2.0f, 3.0f, 2.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f
};
__global__ void rgb2grayCudaKernel(unsigned char *deviceImage, unsigned char *deviceResult, const int height, const int width){
/* calculate the global thread id*/
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadNum = blockNumInGrid * threadsPerBlock + threadNumInBlock;
int i = globalThreadNum;
int grayPix = 0;
grayPix = (30*deviceImage[i] + 59 * deviceImage[(width * height) + i] + 11 * deviceImage[(2 * width * height) + i])/100;
deviceResult[i] = grayPix;
}
void rgb2grayCuda(unsigned char *inputImage, unsigned char *grayImage, const int width, const int height) {
unsigned char *deviceImage;
unsigned char *deviceResult;
unsigned int imageSize = width * height;
int initialBytes = width * height * 3 *sizeof(unsigned char);
int endBytes = width * height * sizeof(unsigned char);
unsigned int xGridDim = 0, yGridDim=1;
hipError_t err = hipMalloc((void**) &deviceImage, initialBytes);
err = hipMalloc((void**) &deviceResult, endBytes);
err = hipMemset(deviceResult, 0, endBytes);
err = hipMemset(deviceImage, 0, initialBytes);
err = hipMemcpy(deviceImage, inputImage, initialBytes, hipMemcpyHostToDevice);
if(imageSize >= 8192*8192)
if(imageSize%(8192*8192-1)==0)
yGridDim = imageSize / (8192*8192-1);
else
yGridDim = imageSize / (8192*8192-1)+ 1;
if(imageSize%1024==0)
xGridDim = width*height / (1024*yGridDim);
else
xGridDim = width*height / (1024*yGridDim) + 1;
// Convert the input image to grayscale
dim3 grid(xGridDim,yGridDim,1);
dim3 block(32,32,1);
hipLaunchKernelGGL(( rgb2grayCudaKernel), dim3(grid), dim3(block), 0, 0, deviceImage, deviceResult, height, width);
err = hipDeviceSynchronize();
err = hipMemcpy(grayImage, deviceResult, endBytes, hipMemcpyDeviceToHost);
hipFree(deviceImage);
hipFree(deviceResult);
}
void rgb2gray(unsigned char *inputImage, unsigned char *grayImage, const int width, const int height)
{
NSTimer kernelTime = NSTimer("kernelTime", false, false);
kernelTime.start();
// Kernel
for ( int y = 0; y < height; y++ )
{
for ( int x = 0; x < width; x++ )
{
float grayPix = 0.0f;
float r = static_cast< float >(inputImage[(y * width) + x]);
float g = static_cast< float >(inputImage[(width * height) + (y * width) + x]);
float b = static_cast< float >(inputImage[(2 * width * height) + (y * width) + x]);
grayPix = (0.3f * r) + (0.59f * g) + (0.11f * b);
grayImage[(y * width) + x] = static_cast< unsigned char >(grayPix);
}
}
// /Kernel
kernelTime.stop();
//cout << fixed << setprecision(6);
//cout << "rgb2gray (cpu): \t\t" << kernelTime.getElapsed() << " seconds." << endl;
}
__global__ void histogram1DCudaKernel(unsigned char *grayImg, unsigned int *hist, const int no_of_bins, const int width, const int height){
/* calculate the global thread id*/
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadNum = blockNumInGrid * threadsPerBlock + threadNumInBlock;
int startIdxHist = globalThreadNum*no_of_bins;
int startIdxImg = globalThreadNum*width;
for(int i = startIdxImg; i < startIdxImg + width && i<width*height; i++) {
hist[startIdxHist+grayImg[i]]++;
}
}
__global__ void sumHistCuda(unsigned int *histArray, unsigned int *hist, const int no_of_bins, const int height, const int width){
/* calculate the global thread id*/
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadNum = blockNumInGrid * threadsPerBlock + threadNumInBlock;
for(int i = 0; i < height; i++) {
hist[globalThreadNum] += histArray[i*no_of_bins+globalThreadNum];
}
}
void histogram1DCuda(unsigned char *grayImage, unsigned char *histogramImage,const int width, const int height,
unsigned int *histogram, const unsigned int HISTOGRAM_SIZE,
const unsigned int BAR_WIDTH)
{
unsigned int max = 0;
NSTimer kernelTime = NSTimer("kernelTime", false, false);
unsigned int *histArray;
unsigned int hist[256] = {0};
histArray = (unsigned int*)malloc(height*HISTOGRAM_SIZE*sizeof(unsigned int));
memset(histArray, 0, height*HISTOGRAM_SIZE*sizeof(unsigned int));
unsigned char *grayImgCuda;
unsigned int *histArrayComputedCuda;
unsigned int *histCuda;
unsigned int xGridDim = 0;
hipMalloc((void **) &histArrayComputedCuda, height*HISTOGRAM_SIZE*sizeof(unsigned int));
hipMemset(histArrayComputedCuda, 0, height*HISTOGRAM_SIZE*sizeof(unsigned int));
hipMalloc((void **) &grayImgCuda, width*height*sizeof(unsigned char));
if(height%1024==0)
xGridDim = height / 1024;
else
xGridDim = height / 1024 + 1;
dim3 gridSize(xGridDim,1,1);
dim3 blockSize(32,32,1);
hipMemcpy(grayImgCuda,grayImage,sizeof(unsigned char)*height*width,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( histogram1DCudaKernel), dim3(gridSize), dim3(blockSize), 0, 0, grayImgCuda, histArrayComputedCuda, HISTOGRAM_SIZE, width, height);
hipError_t err = hipDeviceSynchronize();
err = hipMemcpy(histArray, histArrayComputedCuda, height*HISTOGRAM_SIZE*sizeof(unsigned int), hipMemcpyDeviceToHost);
err = hipMalloc((void **)&histCuda,HISTOGRAM_SIZE*sizeof(unsigned int));
err = hipMemset(histCuda, 0, HISTOGRAM_SIZE*sizeof(unsigned int));
dim3 gridSize2(1,1,1);
dim3 blockSize2(16,16,1);
//err = hipMemcpy(histArrayComputedCuda, histArray, height*HISTOGRAM_SIZE*sizeof(unsigned int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sumHistCuda), dim3(gridSize), dim3(blockSize2), 0, 0, histArrayComputedCuda, histCuda, 256, height, width);
err = hipDeviceSynchronize();
err = hipMemcpy(histogram, histCuda, HISTOGRAM_SIZE*sizeof(unsigned int), hipMemcpyDeviceToHost);
for ( unsigned int i = 0; i < HISTOGRAM_SIZE; i++ )
{
if ( histogram[i] > max )
{
max = histogram[i];
}
}
for ( unsigned int x = 0; x < HISTOGRAM_SIZE * BAR_WIDTH; x += BAR_WIDTH )
{
unsigned int value = HISTOGRAM_SIZE - ((histogram[x / BAR_WIDTH] * HISTOGRAM_SIZE) / max);
for ( unsigned int y = 0; y < value; y++ )
{
for ( unsigned int i = 0; i < BAR_WIDTH; i++ )
{
histogramImage[(y * HISTOGRAM_SIZE * BAR_WIDTH) + x + i] = 0;
}
}
for ( unsigned int y = value; y < HISTOGRAM_SIZE; y++ )
{
for ( unsigned int i = 0; i < BAR_WIDTH; i++ )
{
histogramImage[(y * HISTOGRAM_SIZE * BAR_WIDTH) + x + i] = 255;
}
}
}
//cout << fixed << setprecision(6);
//cout << "histogram1D (cpu): \t\t" << kernelTime.getElapsed() << " seconds." << endl;
hipFree(grayImgCuda);
hipFree(histArrayComputedCuda);
hipFree(histCuda);
}
void histogram1D(unsigned char *grayImage, unsigned char *histogramImage,const int width, const int height,
unsigned int *histogram, const unsigned int HISTOGRAM_SIZE,
const unsigned int BAR_WIDTH)
{
unsigned int max = 0;
NSTimer kernelTime = NSTimer("kernelTime", false, false);
memset(reinterpret_cast< void * >(histogram), 0, HISTOGRAM_SIZE * sizeof(unsigned int));
kernelTime.start();
// Kernel
for ( int y = 0; y < height; y++ )
{
for ( int x = 0; x < width; x++ )
{
histogram[static_cast< unsigned int >(grayImage[(y * width) + x])] += 1;
}
}
// /Kernel
kernelTime.stop();
for ( unsigned int i = 0; i < HISTOGRAM_SIZE; i++ )
{
if ( histogram[i] > max )
{
max = histogram[i];
}
}
for ( unsigned int x = 0; x < HISTOGRAM_SIZE * BAR_WIDTH; x += BAR_WIDTH )
{
unsigned int value = HISTOGRAM_SIZE - ((histogram[x / BAR_WIDTH] * HISTOGRAM_SIZE) / max);
for ( unsigned int y = 0; y < value; y++ )
{
for ( unsigned int i = 0; i < BAR_WIDTH; i++ )
{
histogramImage[(y * HISTOGRAM_SIZE * BAR_WIDTH) + x + i] = 0;
}
}
for ( unsigned int y = value; y < HISTOGRAM_SIZE; y++ )
{
for ( unsigned int i = 0; i < BAR_WIDTH; i++ )
{
histogramImage[(y * HISTOGRAM_SIZE * BAR_WIDTH) + x + i] = 255;
}
}
}
//cout << fixed << setprecision(6);
//cout << "histogram1D (cpu): \t\t" << kernelTime.getElapsed() << " seconds." << endl;
}
__global__ void contrast1DCudaKernel(unsigned char *deviceImage, unsigned char *deviceResult, const int height, const int width,
unsigned int min, unsigned int max, float diff)
{
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadNum = blockNumInGrid * threadsPerBlock + threadNumInBlock;
int i = globalThreadNum;
unsigned int grayPix = static_cast< unsigned int >(deviceImage[i]);
if ( grayPix < min )
{
grayPix = 0;
}
else if ( grayPix > max )
{
grayPix = 255;
}
else
{
grayPix = (255 * (grayPix - min) / diff);
}
deviceResult[i] = static_cast< unsigned char > (grayPix);
}
void contrast1DCuda(unsigned char *grayImage, const int width, const int height,
unsigned int *histogram, const unsigned int HISTOGRAM_SIZE,
const unsigned int CONTRAST_THRESHOLD)
{
unsigned char *deviceImage;
unsigned char *deviceResult;
unsigned int imageSize = width * height;
int numBytes = width * height * sizeof(unsigned char);
unsigned int i = 0, xGridDim = 0, yGridDim = 1;
unsigned int maxHist = 0;
for ( unsigned int i = 0; i < HISTOGRAM_SIZE; i++ )
{
if ( histogram[i] > maxHist )
{
maxHist = histogram[i];
}
}
i=0;
while ( (i < HISTOGRAM_SIZE) && ((histogram[i]*HISTOGRAM_SIZE)/maxHist < CONTRAST_THRESHOLD) )
{
i++;
}
unsigned int min = i;
i = HISTOGRAM_SIZE - 1;
while ( (i > min) && (histogram[i] < CONTRAST_THRESHOLD) )
{
i--;
}
unsigned int max = i;
float diff = max - min;
hipMalloc((void**) &deviceImage, numBytes);
hipMalloc((void**) &deviceResult, numBytes);
hipMemset(deviceResult, 0, numBytes);
hipMemset(deviceImage, 0, numBytes);
hipError_t err = hipMemcpy(deviceImage, grayImage, numBytes, hipMemcpyHostToDevice);
if(imageSize >= 8192*8192)
if(imageSize%(8192*8192-1)==0)
yGridDim = imageSize / (8192*8192-1);
else
yGridDim = imageSize / (8192*8192-1)+ 1;
if(imageSize%1024==0)
xGridDim = width*height / (1024*yGridDim);
else
xGridDim = width*height / (1024*yGridDim) + 1;
// Convert the input image to grayscale
dim3 grid(xGridDim,yGridDim,1);
dim3 block(32,32,1);
// Convert the input image to grayscale
hipLaunchKernelGGL(( contrast1DCudaKernel), dim3(grid), dim3(block), 0, 0, deviceImage, deviceResult, height, width, min, max, diff);
hipDeviceSynchronize();
hipMemcpy(grayImage, deviceResult, numBytes, hipMemcpyDeviceToHost);
}
void contrast1D(unsigned char *grayImage, const int width, const int height,
unsigned int *histogram, const unsigned int HISTOGRAM_SIZE,
const unsigned int CONTRAST_THRESHOLD)
{
unsigned int i = 0;
unsigned int maxHist = 0;
NSTimer kernelTime = NSTimer("kernelTime", false, false);
for ( unsigned int j = 0; j < HISTOGRAM_SIZE; j++ )
{
if ( histogram[j] > maxHist )
{
maxHist = histogram[j];
}
}
while ( (i < HISTOGRAM_SIZE) && ((histogram[i]*HISTOGRAM_SIZE/maxHist) < CONTRAST_THRESHOLD) )
{
i++;
}
unsigned int min = i;
i = HISTOGRAM_SIZE - 1;
while ( (i > min) && (histogram[i] < CONTRAST_THRESHOLD) )
{
i--;
}
unsigned int max = i;
float diff = max - min;
kernelTime.start();
// Kernel
for ( int y = 0; y < height; y++ )
{
for (int x = 0; x < width; x++ )
{
unsigned char pixel = grayImage[(y * width) + x];
if ( pixel < min )
{
pixel = 0;
}
else if ( pixel > max )
{
pixel = 255;
}
else
{
pixel = static_cast< unsigned char >(255.0f * (pixel - min) / diff);
}
grayImage[(y * width) + x] = pixel;
}
}
// /Kernel
kernelTime.stop();
//cout << fixed << setprecision(6);
//cout << "contrast1D (cpu): \t\t" << kernelTime.getElapsed() << " seconds." << endl;
}
__global__ void triangularSmoothKernel(unsigned char *grayScale, unsigned char *smoothened, unsigned int width, unsigned int height)
{
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadNum = blockNumInGrid * threadsPerBlock + threadNumInBlock;
int pixelPos = globalThreadNum;
int modWidth = pixelPos%width;
int modHeight = (pixelPos/width);
int x, y;
float smoothened_0 = 0, smoothened_1 = 0, smoothened_2 = 0, smoothened_3 = 0, smoothened_4 = 0, smoothened_f = 0;
bool unrollFlag = true;
int width2pos = 2*width, width2minus = -width2pos;
int x_start = 0, x_end = 5, y_start = 0, y_end = 5;
if(pixelPos >= width * height)
return;
if((modWidth < 2) || (modWidth > width - 3)) {
unrollFlag = false;
if(modWidth < 2)
x_start = 2 - modWidth;
else
x_end = 2 + width - modWidth;
}
if((modHeight < 2) || (modHeight > height - 3)) {
unrollFlag = false;
if(modHeight < 2)
y_start = 2 - modHeight;
else
y_end = 2 + height - modHeight;
}
if(!unrollFlag){
float el_sum = 0;
for(y = y_start; y < y_end; y++){
for(x = x_start; x < x_end; x++) {
smoothened_f += filterCuda[5*y+x] * grayScale[pixelPos+x-2+(y-2)*width];
el_sum += filterCuda[5*y+x];
}
}
smoothened_f/=el_sum;
smoothened[pixelPos] = smoothened_f;
}
else {
smoothened_0 += filterCuda[0] * grayScale[pixelPos-2+width2minus];
smoothened_1 += filterCuda[1] * grayScale[pixelPos-1+width2minus];
smoothened_2 += filterCuda[2] * grayScale[pixelPos+0+width2minus];
smoothened_3 += filterCuda[3] * grayScale[pixelPos+1+width2minus];
smoothened_4 += filterCuda[4] * grayScale[pixelPos+2+width2minus];
smoothened_0 += filterCuda[5] * grayScale[pixelPos-2-width];
smoothened_1 += filterCuda[6] * grayScale[pixelPos-1-width];
smoothened_2 += filterCuda[7] * grayScale[pixelPos+0-width];
smoothened_3 += filterCuda[8] * grayScale[pixelPos+1-width];
smoothened_4 += filterCuda[9] * grayScale[pixelPos+2-width];
smoothened_0 += filterCuda[10] * grayScale[pixelPos-2];
smoothened_1 += filterCuda[11] * grayScale[pixelPos-1];
smoothened_2 += filterCuda[12] * grayScale[pixelPos+0];
smoothened_3 += filterCuda[13] * grayScale[pixelPos+1];
smoothened_4 += filterCuda[14] * grayScale[pixelPos+2];
smoothened_0 += filterCuda[15] * grayScale[pixelPos-2+width];
smoothened_1 += filterCuda[16] * grayScale[pixelPos-1+width];
smoothened_2 += filterCuda[17] * grayScale[pixelPos+0+width];
smoothened_3 += filterCuda[18] * grayScale[pixelPos+1+width];
smoothened_4 += filterCuda[19] * grayScale[pixelPos+2+width];
smoothened_0 += filterCuda[20] * grayScale[pixelPos-2+width2pos];
smoothened_1 += filterCuda[21] * grayScale[pixelPos-1+width2pos];
smoothened_2 += filterCuda[22] * grayScale[pixelPos+0+width2pos];
smoothened_3 += filterCuda[23] * grayScale[pixelPos+1+width2pos];
smoothened_4 += filterCuda[24] * grayScale[pixelPos+2+width2pos];
smoothened_0 = smoothened_0 + smoothened_1;
smoothened_3 = smoothened_3 + smoothened_4;
smoothened_0 += smoothened_2 + smoothened_3;
smoothened_0/=filter_sum;
smoothened[pixelPos] = smoothened_0;
}
}
void triangularSmoothCuda(unsigned char *grayImage, unsigned char *smoothImage, const int width, const int height)
{
unsigned char *cudaImGray, *cudaEnhanced;
unsigned int xGridDim = 0, yGridDim = 1;
unsigned int imageSize = width * height;
hipMalloc((void**)&cudaImGray, height*width*sizeof(unsigned char));
hipMalloc((void**)&cudaEnhanced, height*width*sizeof(unsigned char));
hipMemcpy(cudaImGray, grayImage, height*width*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemset(cudaEnhanced, 0, height*width*sizeof(unsigned char));
if(imageSize >= 8192*8192)
if(imageSize%(8192*8192-1)==0)
yGridDim = imageSize / (8192*8192-1);
else
yGridDim = imageSize / (8192*8192-1)+ 1;
if(imageSize%1024==0)
xGridDim = width*height / (1024*yGridDim);
else
xGridDim = width*height / (1024*yGridDim) + 1;
// Convert the input image to grayscale
dim3 grid(xGridDim,yGridDim,1);
dim3 block(32,32,1);
hipLaunchKernelGGL(( triangularSmoothKernel), dim3(grid), dim3(block), 0, 0, cudaImGray, cudaEnhanced, width, height);
hipError_t err = hipMemcpy(smoothImage, cudaEnhanced ,height*width*sizeof(unsigned char), hipMemcpyDeviceToHost);
hipFree(cudaImGray);
hipFree(cudaEnhanced);
}
void triangularSmooth(unsigned char *grayImage, unsigned char *smoothImage, const int width, const int height,
const float *filter)
{
NSTimer kernelTime = NSTimer("kernelTime", false, false);
kernelTime.start();
// Kernel
for ( int y = 0; y < height; y++ )
{
for ( int x = 0; x < width; x++ )
{
unsigned int filterItem = 0;
float filterSum = 0.0f;
float smoothPix = 0.0f;
for ( int fy = y - 2; fy < y + 3; fy++ )
{
for ( int fx = x - 2; fx < x + 3; fx++ )
{
if ( ((fy < 0) || (fy >= height)) || ((fx < 0) || (fx >= width)) )
{
filterItem++;
continue;
}
smoothPix += grayImage[(fy * width) + fx] * filter[filterItem];
filterSum += filter[filterItem];
filterItem++;
}
}
smoothPix /= filterSum;
smoothImage[(y * width) + x] = static_cast< unsigned char >(smoothPix);
}
}
// /Kernel
kernelTime.stop();
//cout << fixed << setprecision(6);
//cout << "triangularSmooth (cpu): \t" << kernelTime.getElapsed() << " seconds." << endl;
} | a22ab91d827daed961238a14cbac0e628b8a399c.cu | #include <Timer.hpp>
#include <iostream>
#include <iomanip>
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
using LOFAR::NSTimer;
using std::cout;
using std::cerr;
using std::endl;
using std::fixed;
using std::setprecision;
/* Utility function/macro, used to do error checking.
Use this function/macro like this:
checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
And to check the result of a kernel invocation:
checkCudaCall(cudaGetLastError());
*/
#define checkCudaCall(result) { \
if (result != cudaSuccess){ \
cerr << "cuda error: " << cudaGetErrorString(result); \
cerr << " in " << __FILE__ << " at line "<< __LINE__<<endl; \
exit(1); \
} \
}
__constant__ float filter_sum = 35.0f;
__constant__ float filterCuda[] = { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 2.0f, 3.0f, 2.0f, 1.0f,
1.0f, 2.0f, 2.0f, 2.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f, 1.0f
};
__global__ void rgb2grayCudaKernel(unsigned char *deviceImage, unsigned char *deviceResult, const int height, const int width){
/* calculate the global thread id*/
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadNum = blockNumInGrid * threadsPerBlock + threadNumInBlock;
int i = globalThreadNum;
int grayPix = 0;
grayPix = (30*deviceImage[i] + 59 * deviceImage[(width * height) + i] + 11 * deviceImage[(2 * width * height) + i])/100;
deviceResult[i] = grayPix;
}
void rgb2grayCuda(unsigned char *inputImage, unsigned char *grayImage, const int width, const int height) {
unsigned char *deviceImage;
unsigned char *deviceResult;
unsigned int imageSize = width * height;
int initialBytes = width * height * 3 *sizeof(unsigned char);
int endBytes = width * height * sizeof(unsigned char);
unsigned int xGridDim = 0, yGridDim=1;
cudaError_t err = cudaMalloc((void**) &deviceImage, initialBytes);
err = cudaMalloc((void**) &deviceResult, endBytes);
err = cudaMemset(deviceResult, 0, endBytes);
err = cudaMemset(deviceImage, 0, initialBytes);
err = cudaMemcpy(deviceImage, inputImage, initialBytes, cudaMemcpyHostToDevice);
if(imageSize >= 8192*8192)
if(imageSize%(8192*8192-1)==0)
yGridDim = imageSize / (8192*8192-1);
else
yGridDim = imageSize / (8192*8192-1)+ 1;
if(imageSize%1024==0)
xGridDim = width*height / (1024*yGridDim);
else
xGridDim = width*height / (1024*yGridDim) + 1;
// Convert the input image to grayscale
dim3 grid(xGridDim,yGridDim,1);
dim3 block(32,32,1);
rgb2grayCudaKernel<<<grid, block>>>(deviceImage, deviceResult, height, width);
err = cudaDeviceSynchronize();
err = cudaMemcpy(grayImage, deviceResult, endBytes, cudaMemcpyDeviceToHost);
cudaFree(deviceImage);
cudaFree(deviceResult);
}
void rgb2gray(unsigned char *inputImage, unsigned char *grayImage, const int width, const int height)
{
NSTimer kernelTime = NSTimer("kernelTime", false, false);
kernelTime.start();
// Kernel
for ( int y = 0; y < height; y++ )
{
for ( int x = 0; x < width; x++ )
{
float grayPix = 0.0f;
float r = static_cast< float >(inputImage[(y * width) + x]);
float g = static_cast< float >(inputImage[(width * height) + (y * width) + x]);
float b = static_cast< float >(inputImage[(2 * width * height) + (y * width) + x]);
grayPix = (0.3f * r) + (0.59f * g) + (0.11f * b);
grayImage[(y * width) + x] = static_cast< unsigned char >(grayPix);
}
}
// /Kernel
kernelTime.stop();
//cout << fixed << setprecision(6);
//cout << "rgb2gray (cpu): \t\t" << kernelTime.getElapsed() << " seconds." << endl;
}
__global__ void histogram1DCudaKernel(unsigned char *grayImg, unsigned int *hist, const int no_of_bins, const int width, const int height){
/* calculate the global thread id*/
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadNum = blockNumInGrid * threadsPerBlock + threadNumInBlock;
int startIdxHist = globalThreadNum*no_of_bins;
int startIdxImg = globalThreadNum*width;
for(int i = startIdxImg; i < startIdxImg + width && i<width*height; i++) {
hist[startIdxHist+grayImg[i]]++;
}
}
__global__ void sumHistCuda(unsigned int *histArray, unsigned int *hist, const int no_of_bins, const int height, const int width){
/* calculate the global thread id*/
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadNum = blockNumInGrid * threadsPerBlock + threadNumInBlock;
for(int i = 0; i < height; i++) {
hist[globalThreadNum] += histArray[i*no_of_bins+globalThreadNum];
}
}
void histogram1DCuda(unsigned char *grayImage, unsigned char *histogramImage,const int width, const int height,
unsigned int *histogram, const unsigned int HISTOGRAM_SIZE,
const unsigned int BAR_WIDTH)
{
unsigned int max = 0;
NSTimer kernelTime = NSTimer("kernelTime", false, false);
unsigned int *histArray;
unsigned int hist[256] = {0};
histArray = (unsigned int*)malloc(height*HISTOGRAM_SIZE*sizeof(unsigned int));
memset(histArray, 0, height*HISTOGRAM_SIZE*sizeof(unsigned int));
unsigned char *grayImgCuda;
unsigned int *histArrayComputedCuda;
unsigned int *histCuda;
unsigned int xGridDim = 0;
cudaMalloc((void **) &histArrayComputedCuda, height*HISTOGRAM_SIZE*sizeof(unsigned int));
cudaMemset(histArrayComputedCuda, 0, height*HISTOGRAM_SIZE*sizeof(unsigned int));
cudaMalloc((void **) &grayImgCuda, width*height*sizeof(unsigned char));
if(height%1024==0)
xGridDim = height / 1024;
else
xGridDim = height / 1024 + 1;
dim3 gridSize(xGridDim,1,1);
dim3 blockSize(32,32,1);
cudaMemcpy(grayImgCuda,grayImage,sizeof(unsigned char)*height*width,cudaMemcpyHostToDevice);
histogram1DCudaKernel<<<gridSize, blockSize>>>(grayImgCuda, histArrayComputedCuda, HISTOGRAM_SIZE, width, height);
cudaError err = cudaDeviceSynchronize();
err = cudaMemcpy(histArray, histArrayComputedCuda, height*HISTOGRAM_SIZE*sizeof(unsigned int), cudaMemcpyDeviceToHost);
err = cudaMalloc((void **)&histCuda,HISTOGRAM_SIZE*sizeof(unsigned int));
err = cudaMemset(histCuda, 0, HISTOGRAM_SIZE*sizeof(unsigned int));
dim3 gridSize2(1,1,1);
dim3 blockSize2(16,16,1);
//err = cudaMemcpy(histArrayComputedCuda, histArray, height*HISTOGRAM_SIZE*sizeof(unsigned int), cudaMemcpyHostToDevice);
sumHistCuda<<<gridSize, blockSize2>>>(histArrayComputedCuda, histCuda, 256, height, width);
err = cudaDeviceSynchronize();
err = cudaMemcpy(histogram, histCuda, HISTOGRAM_SIZE*sizeof(unsigned int), cudaMemcpyDeviceToHost);
for ( unsigned int i = 0; i < HISTOGRAM_SIZE; i++ )
{
if ( histogram[i] > max )
{
max = histogram[i];
}
}
for ( unsigned int x = 0; x < HISTOGRAM_SIZE * BAR_WIDTH; x += BAR_WIDTH )
{
unsigned int value = HISTOGRAM_SIZE - ((histogram[x / BAR_WIDTH] * HISTOGRAM_SIZE) / max);
for ( unsigned int y = 0; y < value; y++ )
{
for ( unsigned int i = 0; i < BAR_WIDTH; i++ )
{
histogramImage[(y * HISTOGRAM_SIZE * BAR_WIDTH) + x + i] = 0;
}
}
for ( unsigned int y = value; y < HISTOGRAM_SIZE; y++ )
{
for ( unsigned int i = 0; i < BAR_WIDTH; i++ )
{
histogramImage[(y * HISTOGRAM_SIZE * BAR_WIDTH) + x + i] = 255;
}
}
}
//cout << fixed << setprecision(6);
//cout << "histogram1D (cpu): \t\t" << kernelTime.getElapsed() << " seconds." << endl;
cudaFree(grayImgCuda);
cudaFree(histArrayComputedCuda);
cudaFree(histCuda);
}
void histogram1D(unsigned char *grayImage, unsigned char *histogramImage,const int width, const int height,
unsigned int *histogram, const unsigned int HISTOGRAM_SIZE,
const unsigned int BAR_WIDTH)
{
unsigned int max = 0;
NSTimer kernelTime = NSTimer("kernelTime", false, false);
memset(reinterpret_cast< void * >(histogram), 0, HISTOGRAM_SIZE * sizeof(unsigned int));
kernelTime.start();
// Kernel
for ( int y = 0; y < height; y++ )
{
for ( int x = 0; x < width; x++ )
{
histogram[static_cast< unsigned int >(grayImage[(y * width) + x])] += 1;
}
}
// /Kernel
kernelTime.stop();
for ( unsigned int i = 0; i < HISTOGRAM_SIZE; i++ )
{
if ( histogram[i] > max )
{
max = histogram[i];
}
}
for ( unsigned int x = 0; x < HISTOGRAM_SIZE * BAR_WIDTH; x += BAR_WIDTH )
{
unsigned int value = HISTOGRAM_SIZE - ((histogram[x / BAR_WIDTH] * HISTOGRAM_SIZE) / max);
for ( unsigned int y = 0; y < value; y++ )
{
for ( unsigned int i = 0; i < BAR_WIDTH; i++ )
{
histogramImage[(y * HISTOGRAM_SIZE * BAR_WIDTH) + x + i] = 0;
}
}
for ( unsigned int y = value; y < HISTOGRAM_SIZE; y++ )
{
for ( unsigned int i = 0; i < BAR_WIDTH; i++ )
{
histogramImage[(y * HISTOGRAM_SIZE * BAR_WIDTH) + x + i] = 255;
}
}
}
//cout << fixed << setprecision(6);
//cout << "histogram1D (cpu): \t\t" << kernelTime.getElapsed() << " seconds." << endl;
}
__global__ void contrast1DCudaKernel(unsigned char *deviceImage, unsigned char *deviceResult, const int height, const int width,
unsigned int min, unsigned int max, float diff)
{
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadNum = blockNumInGrid * threadsPerBlock + threadNumInBlock;
int i = globalThreadNum;
unsigned int grayPix = static_cast< unsigned int >(deviceImage[i]);
if ( grayPix < min )
{
grayPix = 0;
}
else if ( grayPix > max )
{
grayPix = 255;
}
else
{
grayPix = (255 * (grayPix - min) / diff);
}
deviceResult[i] = static_cast< unsigned char > (grayPix);
}
void contrast1DCuda(unsigned char *grayImage, const int width, const int height,
unsigned int *histogram, const unsigned int HISTOGRAM_SIZE,
const unsigned int CONTRAST_THRESHOLD)
{
unsigned char *deviceImage;
unsigned char *deviceResult;
unsigned int imageSize = width * height;
int numBytes = width * height * sizeof(unsigned char);
unsigned int i = 0, xGridDim = 0, yGridDim = 1;
unsigned int maxHist = 0;
for ( unsigned int i = 0; i < HISTOGRAM_SIZE; i++ )
{
if ( histogram[i] > maxHist )
{
maxHist = histogram[i];
}
}
i=0;
while ( (i < HISTOGRAM_SIZE) && ((histogram[i]*HISTOGRAM_SIZE)/maxHist < CONTRAST_THRESHOLD) )
{
i++;
}
unsigned int min = i;
i = HISTOGRAM_SIZE - 1;
while ( (i > min) && (histogram[i] < CONTRAST_THRESHOLD) )
{
i--;
}
unsigned int max = i;
float diff = max - min;
cudaMalloc((void**) &deviceImage, numBytes);
cudaMalloc((void**) &deviceResult, numBytes);
cudaMemset(deviceResult, 0, numBytes);
cudaMemset(deviceImage, 0, numBytes);
cudaError_t err = cudaMemcpy(deviceImage, grayImage, numBytes, cudaMemcpyHostToDevice);
if(imageSize >= 8192*8192)
if(imageSize%(8192*8192-1)==0)
yGridDim = imageSize / (8192*8192-1);
else
yGridDim = imageSize / (8192*8192-1)+ 1;
if(imageSize%1024==0)
xGridDim = width*height / (1024*yGridDim);
else
xGridDim = width*height / (1024*yGridDim) + 1;
// Convert the input image to grayscale
dim3 grid(xGridDim,yGridDim,1);
dim3 block(32,32,1);
// Convert the input image to grayscale
contrast1DCudaKernel<<<grid, block>>>(deviceImage, deviceResult, height, width, min, max, diff);
cudaDeviceSynchronize();
cudaMemcpy(grayImage, deviceResult, numBytes, cudaMemcpyDeviceToHost);
}
void contrast1D(unsigned char *grayImage, const int width, const int height,
unsigned int *histogram, const unsigned int HISTOGRAM_SIZE,
const unsigned int CONTRAST_THRESHOLD)
{
unsigned int i = 0;
unsigned int maxHist = 0;
NSTimer kernelTime = NSTimer("kernelTime", false, false);
for ( unsigned int j = 0; j < HISTOGRAM_SIZE; j++ )
{
if ( histogram[j] > maxHist )
{
maxHist = histogram[j];
}
}
while ( (i < HISTOGRAM_SIZE) && ((histogram[i]*HISTOGRAM_SIZE/maxHist) < CONTRAST_THRESHOLD) )
{
i++;
}
unsigned int min = i;
i = HISTOGRAM_SIZE - 1;
while ( (i > min) && (histogram[i] < CONTRAST_THRESHOLD) )
{
i--;
}
unsigned int max = i;
float diff = max - min;
kernelTime.start();
// Kernel
for ( int y = 0; y < height; y++ )
{
for (int x = 0; x < width; x++ )
{
unsigned char pixel = grayImage[(y * width) + x];
if ( pixel < min )
{
pixel = 0;
}
else if ( pixel > max )
{
pixel = 255;
}
else
{
pixel = static_cast< unsigned char >(255.0f * (pixel - min) / diff);
}
grayImage[(y * width) + x] = pixel;
}
}
// /Kernel
kernelTime.stop();
//cout << fixed << setprecision(6);
//cout << "contrast1D (cpu): \t\t" << kernelTime.getElapsed() << " seconds." << endl;
}
__global__ void triangularSmoothKernel(unsigned char *grayScale, unsigned char *smoothened, unsigned int width, unsigned int height)
{
int threadsPerBlock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int globalThreadNum = blockNumInGrid * threadsPerBlock + threadNumInBlock;
int pixelPos = globalThreadNum;
int modWidth = pixelPos%width;
int modHeight = (pixelPos/width);
int x, y;
float smoothened_0 = 0, smoothened_1 = 0, smoothened_2 = 0, smoothened_3 = 0, smoothened_4 = 0, smoothened_f = 0;
bool unrollFlag = true;
int width2pos = 2*width, width2minus = -width2pos;
int x_start = 0, x_end = 5, y_start = 0, y_end = 5;
if(pixelPos >= width * height)
return;
if((modWidth < 2) || (modWidth > width - 3)) {
unrollFlag = false;
if(modWidth < 2)
x_start = 2 - modWidth;
else
x_end = 2 + width - modWidth;
}
if((modHeight < 2) || (modHeight > height - 3)) {
unrollFlag = false;
if(modHeight < 2)
y_start = 2 - modHeight;
else
y_end = 2 + height - modHeight;
}
if(!unrollFlag){
float el_sum = 0;
for(y = y_start; y < y_end; y++){
for(x = x_start; x < x_end; x++) {
smoothened_f += filterCuda[5*y+x] * grayScale[pixelPos+x-2+(y-2)*width];
el_sum += filterCuda[5*y+x];
}
}
smoothened_f/=el_sum;
smoothened[pixelPos] = smoothened_f;
}
else {
smoothened_0 += filterCuda[0] * grayScale[pixelPos-2+width2minus];
smoothened_1 += filterCuda[1] * grayScale[pixelPos-1+width2minus];
smoothened_2 += filterCuda[2] * grayScale[pixelPos+0+width2minus];
smoothened_3 += filterCuda[3] * grayScale[pixelPos+1+width2minus];
smoothened_4 += filterCuda[4] * grayScale[pixelPos+2+width2minus];
smoothened_0 += filterCuda[5] * grayScale[pixelPos-2-width];
smoothened_1 += filterCuda[6] * grayScale[pixelPos-1-width];
smoothened_2 += filterCuda[7] * grayScale[pixelPos+0-width];
smoothened_3 += filterCuda[8] * grayScale[pixelPos+1-width];
smoothened_4 += filterCuda[9] * grayScale[pixelPos+2-width];
smoothened_0 += filterCuda[10] * grayScale[pixelPos-2];
smoothened_1 += filterCuda[11] * grayScale[pixelPos-1];
smoothened_2 += filterCuda[12] * grayScale[pixelPos+0];
smoothened_3 += filterCuda[13] * grayScale[pixelPos+1];
smoothened_4 += filterCuda[14] * grayScale[pixelPos+2];
smoothened_0 += filterCuda[15] * grayScale[pixelPos-2+width];
smoothened_1 += filterCuda[16] * grayScale[pixelPos-1+width];
smoothened_2 += filterCuda[17] * grayScale[pixelPos+0+width];
smoothened_3 += filterCuda[18] * grayScale[pixelPos+1+width];
smoothened_4 += filterCuda[19] * grayScale[pixelPos+2+width];
smoothened_0 += filterCuda[20] * grayScale[pixelPos-2+width2pos];
smoothened_1 += filterCuda[21] * grayScale[pixelPos-1+width2pos];
smoothened_2 += filterCuda[22] * grayScale[pixelPos+0+width2pos];
smoothened_3 += filterCuda[23] * grayScale[pixelPos+1+width2pos];
smoothened_4 += filterCuda[24] * grayScale[pixelPos+2+width2pos];
smoothened_0 = smoothened_0 + smoothened_1;
smoothened_3 = smoothened_3 + smoothened_4;
smoothened_0 += smoothened_2 + smoothened_3;
smoothened_0/=filter_sum;
smoothened[pixelPos] = smoothened_0;
}
}
void triangularSmoothCuda(unsigned char *grayImage, unsigned char *smoothImage, const int width, const int height)
{
unsigned char *cudaImGray, *cudaEnhanced;
unsigned int xGridDim = 0, yGridDim = 1;
unsigned int imageSize = width * height;
cudaMalloc((void**)&cudaImGray, height*width*sizeof(unsigned char));
cudaMalloc((void**)&cudaEnhanced, height*width*sizeof(unsigned char));
cudaMemcpy(cudaImGray, grayImage, height*width*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemset(cudaEnhanced, 0, height*width*sizeof(unsigned char));
if(imageSize >= 8192*8192)
if(imageSize%(8192*8192-1)==0)
yGridDim = imageSize / (8192*8192-1);
else
yGridDim = imageSize / (8192*8192-1)+ 1;
if(imageSize%1024==0)
xGridDim = width*height / (1024*yGridDim);
else
xGridDim = width*height / (1024*yGridDim) + 1;
// Convert the input image to grayscale
dim3 grid(xGridDim,yGridDim,1);
dim3 block(32,32,1);
triangularSmoothKernel<<<grid, block>>> (cudaImGray, cudaEnhanced, width, height);
cudaError err = cudaMemcpy(smoothImage, cudaEnhanced ,height*width*sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(cudaImGray);
cudaFree(cudaEnhanced);
}
void triangularSmooth(unsigned char *grayImage, unsigned char *smoothImage, const int width, const int height,
const float *filter)
{
NSTimer kernelTime = NSTimer("kernelTime", false, false);
kernelTime.start();
// Kernel
for ( int y = 0; y < height; y++ )
{
for ( int x = 0; x < width; x++ )
{
unsigned int filterItem = 0;
float filterSum = 0.0f;
float smoothPix = 0.0f;
for ( int fy = y - 2; fy < y + 3; fy++ )
{
for ( int fx = x - 2; fx < x + 3; fx++ )
{
if ( ((fy < 0) || (fy >= height)) || ((fx < 0) || (fx >= width)) )
{
filterItem++;
continue;
}
smoothPix += grayImage[(fy * width) + fx] * filter[filterItem];
filterSum += filter[filterItem];
filterItem++;
}
}
smoothPix /= filterSum;
smoothImage[(y * width) + x] = static_cast< unsigned char >(smoothPix);
}
}
// /Kernel
kernelTime.stop();
//cout << fixed << setprecision(6);
//cout << "triangularSmooth (cpu): \t" << kernelTime.getElapsed() << " seconds." << endl;
} |
5fa603d93f36271dd4545829e6ed5a4f6be034de.hip | // !!! This is a file automatically generated by hipify!!!
/*! \file main.c
\brief Main program of the serial solar system simulator.
\author Peter C. Chapin <[email protected]>
LICENSE
This program is free software; you can redistribute it and/or modify it under the terms of the
GNU General Public License as published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if
not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA
*/
#include <stdio.h>
#include <stdlib.h>
#include "global.h"
#include "Initialize.h"
#include "Timer.h"
#define STEPS_PER_YEAR 8766 // Number of hours in a year.
int main( int argc, char **argv )
{
Timer stopwatch;
long long total_steps = 0;
int total_years = 0;
int return_code = EXIT_SUCCESS;
Object *dev_object_array; // Pointer to object array on GPU.
ObjectDynamics *dev_current_dynamics; // Pointer to current dynamics array on GPU.
ObjectDynamics *dev_next_dynamics; // Pointer to next dynamics array on GPU.
initialize_object_arrays( );
// dump_dynamics( );
Timer_initialize( &stopwatch );
Timer_start( &stopwatch );
// Allocate memory on GPU.
// TODO: Add error checking!
hipMalloc( (void**)&dev_object_array, OBJECT_COUNT*sizeof(Object) );
hipMalloc( (void**)&dev_current_dynamics, OBJECT_COUNT*sizeof(ObjectDynamics) );
hipMalloc( (void**)&dev_next_dynamics, OBJECT_COUNT*sizeof(ObjectDynamics) );
// Copy the object array to the GPU. This never changes so this only needs to be done once.
hipMemcpy( dev_object_array,
object_array,
OBJECT_COUNT*sizeof(Object),
hipMemcpyHostToDevice );
// Maybe copy the dynamics arrays once and leave them on the device...? See note in Object.cu.
//
//hipMemcpy( dev_current_dynamics,
// current_dynamics,
// OBJECT_COUNT*sizeof(ObjectDynamics),
// hipMemcpyHostToDevice );
while (1) {
cuda_time_step( dev_object_array, dev_current_dynamics, dev_next_dynamics );
total_steps++;
// Print out a message after 100 steps just to give the user something to see.
if( total_steps % 100 == 0 )
printf( "STEP %4lld\n", total_steps );
if( total_steps % STEPS_PER_YEAR == 0 ) {
total_years++;
if( total_years % 10 == 0 ) {
printf( "Years simulated = %d\r", total_years );
fflush( stdout );
}
// For now, stop the simulation after 1 year.
if( total_years == 1 ) break;
}
}
//hipMemcpy( current_dynamics,
// dev_next_dynamics,
// OBJECT_COUNT*sizeof(ObjectDynamics),
// hipMemcpyDeviceToHost );
// Deallocate memory on GPU.
hipFree( dev_object_array );
hipFree( dev_current_dynamics );
hipFree( dev_next_dynamics );
Timer_stop( &stopwatch );
dump_dynamics( );
printf( "Time elapsed = %ld milliseconds\n", Timer_time( &stopwatch ) );
return return_code;
}
| 5fa603d93f36271dd4545829e6ed5a4f6be034de.cu | /*! \file main.c
\brief Main program of the serial solar system simulator.
\author Peter C. Chapin <[email protected]>
LICENSE
This program is free software; you can redistribute it and/or modify it under the terms of the
GNU General Public License as published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if
not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA
*/
#include <stdio.h>
#include <stdlib.h>
#include "global.h"
#include "Initialize.h"
#include "Timer.h"
#define STEPS_PER_YEAR 8766 // Number of hours in a year.
int main( int argc, char **argv )
{
Timer stopwatch;
long long total_steps = 0;
int total_years = 0;
int return_code = EXIT_SUCCESS;
Object *dev_object_array; // Pointer to object array on GPU.
ObjectDynamics *dev_current_dynamics; // Pointer to current dynamics array on GPU.
ObjectDynamics *dev_next_dynamics; // Pointer to next dynamics array on GPU.
initialize_object_arrays( );
// dump_dynamics( );
Timer_initialize( &stopwatch );
Timer_start( &stopwatch );
// Allocate memory on GPU.
// TODO: Add error checking!
cudaMalloc( (void**)&dev_object_array, OBJECT_COUNT*sizeof(Object) );
cudaMalloc( (void**)&dev_current_dynamics, OBJECT_COUNT*sizeof(ObjectDynamics) );
cudaMalloc( (void**)&dev_next_dynamics, OBJECT_COUNT*sizeof(ObjectDynamics) );
// Copy the object array to the GPU. This never changes so this only needs to be done once.
cudaMemcpy( dev_object_array,
object_array,
OBJECT_COUNT*sizeof(Object),
cudaMemcpyHostToDevice );
// Maybe copy the dynamics arrays once and leave them on the device...? See note in Object.cu.
//
//cudaMemcpy( dev_current_dynamics,
// current_dynamics,
// OBJECT_COUNT*sizeof(ObjectDynamics),
// cudaMemcpyHostToDevice );
while (1) {
cuda_time_step( dev_object_array, dev_current_dynamics, dev_next_dynamics );
total_steps++;
// Print out a message after 100 steps just to give the user something to see.
if( total_steps % 100 == 0 )
printf( "STEP %4lld\n", total_steps );
if( total_steps % STEPS_PER_YEAR == 0 ) {
total_years++;
if( total_years % 10 == 0 ) {
printf( "Years simulated = %d\r", total_years );
fflush( stdout );
}
// For now, stop the simulation after 1 year.
if( total_years == 1 ) break;
}
}
//cudaMemcpy( current_dynamics,
// dev_next_dynamics,
// OBJECT_COUNT*sizeof(ObjectDynamics),
// cudaMemcpyDeviceToHost );
// Deallocate memory on GPU.
cudaFree( dev_object_array );
cudaFree( dev_current_dynamics );
cudaFree( dev_next_dynamics );
Timer_stop( &stopwatch );
dump_dynamics( );
printf( "Time elapsed = %ld milliseconds\n", Timer_time( &stopwatch ) );
return return_code;
}
|
a10003389f773c1f2183647add7e14cfc15b220c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* -----------------------------------------------------------------
* Programmer(s): Cody J. Balos @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* Example problem:
*
* The following is a simple example problem, with the program for
* its solution by CVODE. The problem is the semi-discrete
* form of the advection-diffusion equation in 1-D:
* du/dt = d^2 u / dx^2 + .5 du/dx
* on the interval 0 <= x <= 2, and the time interval 0 <= t <= 5.
* Homogeneous Dirichlet boundary conditions are posed, and the
* initial condition is the following:
* u(x,t=0) = x(2-x)exp(2x) .
* The PDE is discretized on a uniform grid of size MX+2 with
* central differencing, and with boundary values eliminated,
* leaving an ODE system of size NEQ = MX.
* This program solves the problem with the ADAMS integration method,
* and with Newton iteration using diagonal approximate Jacobians.
* It can use scalar (default) relative and absolute tolerances or a
* vector of absolute tolerances (controlled by a runtime argument).
* The constraint u_i >= 0 is posed for all components.
* Output is printed at t = .5, 1.0, ..., 5.
* Run statistics (optional outputs) are printed at the end.
*
* ./cvAdvDiff_diag_cuda [0 (scalar atol) | 1 (vector atol)]
* [0 (unfused) | 1 (fused)]
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */
#include <cvode/cvode_diag.h> /* prototypes for CVODE diagonal solver */
#include <nvector/nvector_cuda.h> /* access to cuda N_Vector */
#include <sundials/sundials_types.h> /* definition of type realtype */
/* Problem Constants */
#define ZERO RCONST(0.0)
#define XMAX RCONST(2.0) /* domain boundary */
#define MX 10 /* mesh dimension */
#define NEQ MX /* number of equations */
#define ATOL RCONST(1e-10) /* scalar absolute tolerance */
#define T0 ZERO /* initial time */
#define T1 RCONST(0.5) /* first output time */
#define DTOUT RCONST(0.5) /* output time increment */
#define NOUT 10 /* number of output times */
/* Type : UserData
contains mesh spacing and problem parameters. */
typedef struct {
realtype dx;
realtype hdcoef;
realtype hacoef;
} *UserData;
/* Private Helper Functions */
static void SetIC(N_Vector u, realtype dx);
static void PrintIntro(int toltype, int usefused);
static void PrintData(realtype t, realtype umax, long int nst);
static void PrintFinalStats(void *cvode_mem);
/* Functions Called by the Solver */
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data);
/* Private function to check function return values */
static int check_retval(void *returnvalue, const char *funcname, int opt);
/***************************** Main Program ******************************/
int main(int argc, char *argv[])
{
realtype dx, reltol, abstol, t, tout, umax;
N_Vector u;
UserData data;
void *cvode_mem;
int iout, retval, toltype, usefused;
long int nst;
u = NULL;
data = NULL;
cvode_mem = NULL;
toltype = 0;
usefused = 0;
if (argc >= 2) {
/* use vector or scalar atol? */
toltype = atoi(argv[1]);
/* use fused operations? */
if (argc == 3)
usefused = atoi(argv[2]);
}
data = (UserData) malloc(sizeof *data); /* Allocate data memory */
if(check_retval((void *)data, "malloc", 2)) return 1;
u = N_VNew_Cuda(NEQ); /* Allocate u vector */
if(check_retval((void *)u, "N_VNew", 0)) return 1;
reltol = ZERO; /* Set the tolerances */
abstol = ATOL;
dx = data->dx = XMAX/((realtype)(MX+1)); /* Set grid coefficients in data */
data->hdcoef = RCONST(1.0)/(dx*dx);
data->hacoef = RCONST(0.5)/(RCONST(2.0)*dx);
SetIC(u, dx); /* Initialize u vector */
/* Call CVodeCreate to create the solver memory and specify the
* Adams-Moulton LMM */
cvode_mem = CVodeCreate(CV_ADAMS);
if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return 1;
retval = CVodeSetUserData(cvode_mem, data);
if(check_retval(&retval, "CVodeSetUserData", 1)) return 1;
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in u'=f(t,u), the inital time T0, and
* the initial dependent variable vector u. */
retval = CVodeInit(cvode_mem, f, T0, u);
if(check_retval(&retval, "CVodeInit", 1)) return(1);
/* Call CVodeSStolerances to specify the scalar relative tolerance
* and scalar absolute tolerances */
if (toltype == 0) {
retval = CVodeSStolerances(cvode_mem, reltol, abstol);
if (check_retval(&retval, "CVodeSStolerances", 1)) return(1);
} else {
N_Vector vabstol = N_VClone_Cuda(u);
if (check_retval(&vabstol, "N_VClone_Cuda", 0)) return(1);
N_VConst(abstol, vabstol);
retval = CVodeSVtolerances(cvode_mem, reltol, vabstol);
if (check_retval(&retval, "CVodeSVtolerances", 1)) return(1);
N_VDestroy(vabstol);
}
/* Call CVDiag to create and attach CVODE-specific diagonal linear solver */
retval = CVDiag(cvode_mem);
if(check_retval(&retval, "CVDiag", 1)) return(1);
/* Tell CVode to use fused kernels if they are available. */
retval = CVodeSetUseIntegratorFusedKernels(cvode_mem, usefused);
check_retval(&retval, "CVodeSetUseIntegratorFusedKernels", 1);
PrintIntro(toltype, usefused);
umax = N_VMaxNorm(u);
t = T0;
PrintData(t, umax, 0);
/* In loop over output points, call CVode, print results, test for error */
for (iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) {
retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL);
if(check_retval(&retval, "CVode", 1)) break;
umax = N_VMaxNorm(u);
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
PrintData(t, umax, nst);
}
PrintFinalStats(cvode_mem); /* Print some final statistics */
N_VDestroy(u); /* Free the u vector */
CVodeFree(&cvode_mem); /* Free the integrator memory */
free(data); /* Free user data */
return(0);
}
/************************ Private Helper Functions ***********************/
/* Set initial conditions in u vector */
static void SetIC(N_Vector u, realtype dx)
{
int i;
sunindextype N;
realtype x;
realtype *udata;
/* Set pointer to data array and get local length of u. */
udata = N_VGetHostArrayPointer_Cuda(u);
N = N_VGetLength(u);
/* Load initial profile into u vector */
for (i=1; i<=N; i++) {
x = i*dx;
udata[i-1] = x*(XMAX - x)*exp(RCONST(2.0)*x);
}
N_VCopyToDevice_Cuda(u);
}
/* Print problem introduction */
static void PrintIntro(int toltype, int usefused)
{
printf("\n 1-D advection-diffusion equation, mesh size =%3d \n", MX);
printf("\n Diagonal linear solver CVDiag \n");
if (usefused)
printf(" Using fused CVODE kernels \n");
if (toltype == 0)
printf(" Using scalar ATOL\n");
else
printf(" Using vector ATOL\n");
printf("\n");
return;
}
/* Print data */
static void PrintData(realtype t, realtype umax, long int nst)
{
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("At t = %4.2Lf max.norm(u) =%14.6Le nst =%4ld \n", t, umax, nst);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("At t = %4.2f max.norm(u) =%14.6e nst =%4ld \n", t, umax, nst);
#else
printf("At t = %4.2f max.norm(u) =%14.6e nst =%4ld \n", t, umax, nst);
#endif
return;
}
/* Print some final statistics located in the iopt array */
static void PrintFinalStats(void *cvode_mem)
{
long int nst, nfe, nni, ncfn, netf;
int retval;
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
retval = CVodeGetNumRhsEvals(cvode_mem, &nfe);
check_retval(&retval, "CVodeGetNumRhsEvals", 1);
retval = CVodeGetNumErrTestFails(cvode_mem, &netf);
check_retval(&retval, "CVodeGetNumErrTestFails", 1);
retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni);
check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1);
retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn);
check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1);
printf("\nFinal Statistics: \n\n");
printf("nst = %-6ld nfe = %-6ld ", nst, nfe);
printf("nni = %-6ld ncfn = %-6ld netf = %ld\n \n", nni, ncfn, netf);
}
/***************** Function Called by the Solver ***********************/
/* f routine. Compute f(t,u). */
__global__
static void f_kernel(sunindextype N,
realtype hordc, realtype horac,
const realtype* u, realtype* udot)
{
sunindextype i = blockDim.x*blockIdx.x + threadIdx.x;
realtype ui, ult, urt, hdiff, hadv;
if (i < N) {
/* Extract u at x_i and two neighboring points */
ui = u[i];
ult = (i == 0) ? ZERO : u[i-1];
urt = (i == N-1) ? ZERO : u[i+1];
/* Set diffusion and advection terms and load into udot */
hdiff = hordc*(ult - RCONST(2.0)*ui + urt);
hadv = horac*(urt - ult);
udot[i] = hdiff + hadv;
}
}
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data)
{
realtype hordc, horac;
realtype *udata, *dudata;
sunindextype N;
size_t grid, block;
UserData data;
hipError_t cuerr;
udata = N_VGetDeviceArrayPointer_Cuda(u);
dudata = N_VGetDeviceArrayPointer_Cuda(udot);
/* Extract needed problem constants from data */
data = (UserData) user_data;
hordc = data->hdcoef;
horac = data->hacoef;
/* Extract parameters for parallel computation. */
N = N_VGetLength(u); /* Number of elements of u. */
block = 64;
grid = (block + N - 1)/block;
hipLaunchKernelGGL(( f_kernel), dim3(grid), dim3(block), 0, 0, N, hordc, horac, udata, dudata);
hipDeviceSynchronize();
cuerr = hipGetLastError();
if (cuerr != hipSuccess) {
fprintf(stderr, "ERROR in f: f_kernel --> %s\n", hipGetErrorString(cuerr));
return(-1);
}
return(0);
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns an integer value so check if
retval < 0
opt == 2 means function allocates memory so check if returned
NULL pointer */
static int check_retval(void *returnvalue, const char *funcname, int opt)
{
int *retval;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && returnvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname);
return(1); }
/* Check if retval < 0 */
else if (opt == 1) {
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval);
return(1); }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && returnvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname);
return(1); }
return(0);
} | a10003389f773c1f2183647add7e14cfc15b220c.cu | /*
* -----------------------------------------------------------------
* Programmer(s): Cody J. Balos @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* Example problem:
*
* The following is a simple example problem, with the program for
* its solution by CVODE. The problem is the semi-discrete
* form of the advection-diffusion equation in 1-D:
* du/dt = d^2 u / dx^2 + .5 du/dx
* on the interval 0 <= x <= 2, and the time interval 0 <= t <= 5.
* Homogeneous Dirichlet boundary conditions are posed, and the
* initial condition is the following:
* u(x,t=0) = x(2-x)exp(2x) .
* The PDE is discretized on a uniform grid of size MX+2 with
* central differencing, and with boundary values eliminated,
* leaving an ODE system of size NEQ = MX.
* This program solves the problem with the ADAMS integration method,
* and with Newton iteration using diagonal approximate Jacobians.
* It can use scalar (default) relative and absolute tolerances or a
* vector of absolute tolerances (controlled by a runtime argument).
* The constraint u_i >= 0 is posed for all components.
* Output is printed at t = .5, 1.0, ..., 5.
* Run statistics (optional outputs) are printed at the end.
*
* ./cvAdvDiff_diag_cuda [0 (scalar atol) | 1 (vector atol)]
* [0 (unfused) | 1 (fused)]
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */
#include <cvode/cvode_diag.h> /* prototypes for CVODE diagonal solver */
#include <nvector/nvector_cuda.h> /* access to cuda N_Vector */
#include <sundials/sundials_types.h> /* definition of type realtype */
/* Problem Constants */
#define ZERO RCONST(0.0)
#define XMAX RCONST(2.0) /* domain boundary */
#define MX 10 /* mesh dimension */
#define NEQ MX /* number of equations */
#define ATOL RCONST(1e-10) /* scalar absolute tolerance */
#define T0 ZERO /* initial time */
#define T1 RCONST(0.5) /* first output time */
#define DTOUT RCONST(0.5) /* output time increment */
#define NOUT 10 /* number of output times */
/* Type : UserData
contains mesh spacing and problem parameters. */
typedef struct {
realtype dx;
realtype hdcoef;
realtype hacoef;
} *UserData;
/* Private Helper Functions */
static void SetIC(N_Vector u, realtype dx);
static void PrintIntro(int toltype, int usefused);
static void PrintData(realtype t, realtype umax, long int nst);
static void PrintFinalStats(void *cvode_mem);
/* Functions Called by the Solver */
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data);
/* Private function to check function return values */
static int check_retval(void *returnvalue, const char *funcname, int opt);
/***************************** Main Program ******************************/
int main(int argc, char *argv[])
{
realtype dx, reltol, abstol, t, tout, umax;
N_Vector u;
UserData data;
void *cvode_mem;
int iout, retval, toltype, usefused;
long int nst;
u = NULL;
data = NULL;
cvode_mem = NULL;
toltype = 0;
usefused = 0;
if (argc >= 2) {
/* use vector or scalar atol? */
toltype = atoi(argv[1]);
/* use fused operations? */
if (argc == 3)
usefused = atoi(argv[2]);
}
data = (UserData) malloc(sizeof *data); /* Allocate data memory */
if(check_retval((void *)data, "malloc", 2)) return 1;
u = N_VNew_Cuda(NEQ); /* Allocate u vector */
if(check_retval((void *)u, "N_VNew", 0)) return 1;
reltol = ZERO; /* Set the tolerances */
abstol = ATOL;
dx = data->dx = XMAX/((realtype)(MX+1)); /* Set grid coefficients in data */
data->hdcoef = RCONST(1.0)/(dx*dx);
data->hacoef = RCONST(0.5)/(RCONST(2.0)*dx);
SetIC(u, dx); /* Initialize u vector */
/* Call CVodeCreate to create the solver memory and specify the
* Adams-Moulton LMM */
cvode_mem = CVodeCreate(CV_ADAMS);
if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return 1;
retval = CVodeSetUserData(cvode_mem, data);
if(check_retval(&retval, "CVodeSetUserData", 1)) return 1;
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in u'=f(t,u), the inital time T0, and
* the initial dependent variable vector u. */
retval = CVodeInit(cvode_mem, f, T0, u);
if(check_retval(&retval, "CVodeInit", 1)) return(1);
/* Call CVodeSStolerances to specify the scalar relative tolerance
* and scalar absolute tolerances */
if (toltype == 0) {
retval = CVodeSStolerances(cvode_mem, reltol, abstol);
if (check_retval(&retval, "CVodeSStolerances", 1)) return(1);
} else {
N_Vector vabstol = N_VClone_Cuda(u);
if (check_retval(&vabstol, "N_VClone_Cuda", 0)) return(1);
N_VConst(abstol, vabstol);
retval = CVodeSVtolerances(cvode_mem, reltol, vabstol);
if (check_retval(&retval, "CVodeSVtolerances", 1)) return(1);
N_VDestroy(vabstol);
}
/* Call CVDiag to create and attach CVODE-specific diagonal linear solver */
retval = CVDiag(cvode_mem);
if(check_retval(&retval, "CVDiag", 1)) return(1);
/* Tell CVode to use fused kernels if they are available. */
retval = CVodeSetUseIntegratorFusedKernels(cvode_mem, usefused);
check_retval(&retval, "CVodeSetUseIntegratorFusedKernels", 1);
PrintIntro(toltype, usefused);
umax = N_VMaxNorm(u);
t = T0;
PrintData(t, umax, 0);
/* In loop over output points, call CVode, print results, test for error */
for (iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) {
retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL);
if(check_retval(&retval, "CVode", 1)) break;
umax = N_VMaxNorm(u);
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
PrintData(t, umax, nst);
}
PrintFinalStats(cvode_mem); /* Print some final statistics */
N_VDestroy(u); /* Free the u vector */
CVodeFree(&cvode_mem); /* Free the integrator memory */
free(data); /* Free user data */
return(0);
}
/************************ Private Helper Functions ***********************/
/* Set initial conditions in u vector */
static void SetIC(N_Vector u, realtype dx)
{
int i;
sunindextype N;
realtype x;
realtype *udata;
/* Set pointer to data array and get local length of u. */
udata = N_VGetHostArrayPointer_Cuda(u);
N = N_VGetLength(u);
/* Load initial profile into u vector */
for (i=1; i<=N; i++) {
x = i*dx;
udata[i-1] = x*(XMAX - x)*exp(RCONST(2.0)*x);
}
N_VCopyToDevice_Cuda(u);
}
/* Print problem introduction */
static void PrintIntro(int toltype, int usefused)
{
printf("\n 1-D advection-diffusion equation, mesh size =%3d \n", MX);
printf("\n Diagonal linear solver CVDiag \n");
if (usefused)
printf(" Using fused CVODE kernels \n");
if (toltype == 0)
printf(" Using scalar ATOL\n");
else
printf(" Using vector ATOL\n");
printf("\n");
return;
}
/* Print data */
static void PrintData(realtype t, realtype umax, long int nst)
{
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("At t = %4.2Lf max.norm(u) =%14.6Le nst =%4ld \n", t, umax, nst);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("At t = %4.2f max.norm(u) =%14.6e nst =%4ld \n", t, umax, nst);
#else
printf("At t = %4.2f max.norm(u) =%14.6e nst =%4ld \n", t, umax, nst);
#endif
return;
}
/* Print some final statistics located in the iopt array */
static void PrintFinalStats(void *cvode_mem)
{
long int nst, nfe, nni, ncfn, netf;
int retval;
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
retval = CVodeGetNumRhsEvals(cvode_mem, &nfe);
check_retval(&retval, "CVodeGetNumRhsEvals", 1);
retval = CVodeGetNumErrTestFails(cvode_mem, &netf);
check_retval(&retval, "CVodeGetNumErrTestFails", 1);
retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni);
check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1);
retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn);
check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1);
printf("\nFinal Statistics: \n\n");
printf("nst = %-6ld nfe = %-6ld ", nst, nfe);
printf("nni = %-6ld ncfn = %-6ld netf = %ld\n \n", nni, ncfn, netf);
}
/***************** Function Called by the Solver ***********************/
/* f routine. Compute f(t,u). */
__global__
static void f_kernel(sunindextype N,
realtype hordc, realtype horac,
const realtype* u, realtype* udot)
{
sunindextype i = blockDim.x*blockIdx.x + threadIdx.x;
realtype ui, ult, urt, hdiff, hadv;
if (i < N) {
/* Extract u at x_i and two neighboring points */
ui = u[i];
ult = (i == 0) ? ZERO : u[i-1];
urt = (i == N-1) ? ZERO : u[i+1];
/* Set diffusion and advection terms and load into udot */
hdiff = hordc*(ult - RCONST(2.0)*ui + urt);
hadv = horac*(urt - ult);
udot[i] = hdiff + hadv;
}
}
static int f(realtype t, N_Vector u, N_Vector udot, void *user_data)
{
realtype hordc, horac;
realtype *udata, *dudata;
sunindextype N;
size_t grid, block;
UserData data;
cudaError_t cuerr;
udata = N_VGetDeviceArrayPointer_Cuda(u);
dudata = N_VGetDeviceArrayPointer_Cuda(udot);
/* Extract needed problem constants from data */
data = (UserData) user_data;
hordc = data->hdcoef;
horac = data->hacoef;
/* Extract parameters for parallel computation. */
N = N_VGetLength(u); /* Number of elements of u. */
block = 64;
grid = (block + N - 1)/block;
f_kernel<<<grid, block>>>(N, hordc, horac, udata, dudata);
cudaDeviceSynchronize();
cuerr = cudaGetLastError();
if (cuerr != cudaSuccess) {
fprintf(stderr, "ERROR in f: f_kernel --> %s\n", cudaGetErrorString(cuerr));
return(-1);
}
return(0);
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns an integer value so check if
retval < 0
opt == 2 means function allocates memory so check if returned
NULL pointer */
static int check_retval(void *returnvalue, const char *funcname, int opt)
{
int *retval;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && returnvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname);
return(1); }
/* Check if retval < 0 */
else if (opt == 1) {
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval);
return(1); }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && returnvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname);
return(1); }
return(0);
} |
67d07c801874e9413b5c7858109dcdbb16dbf0aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// UIUC IMPACT
#include "parboil.h"
#include <stdio.h>
#include <stdlib.h>
#include "file.h"
#include "gpu_info.h"
#include "spmv_jds.h"
#include "jds_kernels.cu"
#include "convert_dataset.h"
/*
static int generate_vector(float *x_vector, int dim) {
srand(54321);
for(int i=0;i<dim;i++) {
x_vector[i] = (rand() / (float) RAND_MAX);
}
return 0;
}
*/
int main(int argc, char** argv) {
struct pb_TimerSet timers;
struct pb_Parameters *parameters;
parameters = pb_ReadParameters(&argc, argv);
if ((parameters->inpFiles[0] == NULL) || (parameters->inpFiles[1] == NULL)) {
fprintf(stderr, "Expecting one two filenames\n");
exit(-1);
}
pb_InitializeTimerSet(&timers);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
int len;
int depth;
int dim;
int pad=32;
int nzcnt_len;
float *h_data;
int *h_indices;
int *h_ptr;
int *h_perm;
int *h_nzcnt;
float *h_Ax_vector;
float *h_x_vector;
float *d_data;
int *d_indices;
int *d_ptr;
int *d_perm;
int *d_nzcnt;
float *d_Ax_vector;
float *d_x_vector;
pb_SwitchToTimer(&timers, pb_TimerID_IO);
int col_count;
// printf("Input file %s\n", parameters->inpFiles[0]);
coo_to_jds(
parameters->inpFiles[0], // bcsstk32.mtx, fidapm05.mtx, jgl009.mtx
1, // row padding
pad, // warp size
1, // pack size
1, // is mirrored?
0, // binary matrix
0, // debug level [0:2]
&h_data, &h_ptr, &h_nzcnt, &h_indices, &h_perm,
&col_count, &dim, &len, &nzcnt_len, &depth
);
h_Ax_vector=(float*)malloc(sizeof(float)*dim);
h_x_vector=(float*)malloc(sizeof(float)*dim);
input_vec( parameters->inpFiles[1],h_x_vector,dim);
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
hipMalloc((void **)&d_data, len*sizeof(float));
hipMalloc((void **)&d_indices, len*sizeof(int));
hipMalloc((void **)&d_ptr, depth*sizeof(int));
hipMalloc((void **)&d_perm, dim*sizeof(int));
hipMalloc((void **)&d_nzcnt, nzcnt_len*sizeof(int));
hipMalloc((void **)&d_x_vector, dim*sizeof(float));
hipMalloc((void **)&d_Ax_vector,dim*sizeof(float));
hipMemset( (void *) d_Ax_vector, 0, dim*sizeof(float));
hipMemcpy(d_data, h_data, len*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_indices, h_indices, len*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_perm, h_perm, dim*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_x_vector, h_x_vector, dim*sizeof(int), hipMemcpyHostToDevice);
hipMemcpyToSymbol(jds_ptr_int, h_ptr, depth*sizeof(int));
hipMemcpyToSymbol(sh_zcnt_int, h_nzcnt,nzcnt_len*sizeof(int));
hipDeviceSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
unsigned int grid;
unsigned int block;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
compute_active_thread(&block, &grid,nzcnt_len,pad, deviceProp.major,deviceProp.minor,
deviceProp.warpSize,deviceProp.multiProcessorCount);
// hipFuncSetCacheConfig(spmv_jds_naive, hipFuncCachePreferL1);
//main execution
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
// for (int i=0; i<50; i++)
hipLaunchKernelGGL(( spmv_jds_naive), dim3(grid), dim3(block), 0, 0, d_Ax_vector, d_data, d_indices, d_perm, d_x_vector, d_nzcnt, dim);
CUERR // check and clear any existing errors
hipDeviceSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
hipMemcpy(h_Ax_vector, d_Ax_vector,dim*sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipFree(d_data);
hipFree(d_indices);
hipFree(d_ptr);
hipFree(d_perm);
hipFree(d_nzcnt);
hipFree(d_x_vector);
hipFree(d_Ax_vector);
if (parameters->outFile) {
pb_SwitchToTimer(&timers, pb_TimerID_IO);
outputData(parameters->outFile,h_Ax_vector,dim);
}
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
free (h_data);
free (h_indices);
free (h_ptr);
free (h_perm);
free (h_nzcnt);
free (h_Ax_vector);
free (h_x_vector);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
return 0;
}
| 67d07c801874e9413b5c7858109dcdbb16dbf0aa.cu | // UIUC IMPACT
#include "parboil.h"
#include <stdio.h>
#include <stdlib.h>
#include "file.h"
#include "gpu_info.h"
#include "spmv_jds.h"
#include "jds_kernels.cu"
#include "convert_dataset.h"
/*
static int generate_vector(float *x_vector, int dim) {
srand(54321);
for(int i=0;i<dim;i++) {
x_vector[i] = (rand() / (float) RAND_MAX);
}
return 0;
}
*/
int main(int argc, char** argv) {
struct pb_TimerSet timers;
struct pb_Parameters *parameters;
parameters = pb_ReadParameters(&argc, argv);
if ((parameters->inpFiles[0] == NULL) || (parameters->inpFiles[1] == NULL)) {
fprintf(stderr, "Expecting one two filenames\n");
exit(-1);
}
pb_InitializeTimerSet(&timers);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
int len;
int depth;
int dim;
int pad=32;
int nzcnt_len;
float *h_data;
int *h_indices;
int *h_ptr;
int *h_perm;
int *h_nzcnt;
float *h_Ax_vector;
float *h_x_vector;
float *d_data;
int *d_indices;
int *d_ptr;
int *d_perm;
int *d_nzcnt;
float *d_Ax_vector;
float *d_x_vector;
pb_SwitchToTimer(&timers, pb_TimerID_IO);
int col_count;
// printf("Input file %s\n", parameters->inpFiles[0]);
coo_to_jds(
parameters->inpFiles[0], // bcsstk32.mtx, fidapm05.mtx, jgl009.mtx
1, // row padding
pad, // warp size
1, // pack size
1, // is mirrored?
0, // binary matrix
0, // debug level [0:2]
&h_data, &h_ptr, &h_nzcnt, &h_indices, &h_perm,
&col_count, &dim, &len, &nzcnt_len, &depth
);
h_Ax_vector=(float*)malloc(sizeof(float)*dim);
h_x_vector=(float*)malloc(sizeof(float)*dim);
input_vec( parameters->inpFiles[1],h_x_vector,dim);
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cudaMalloc((void **)&d_data, len*sizeof(float));
cudaMalloc((void **)&d_indices, len*sizeof(int));
cudaMalloc((void **)&d_ptr, depth*sizeof(int));
cudaMalloc((void **)&d_perm, dim*sizeof(int));
cudaMalloc((void **)&d_nzcnt, nzcnt_len*sizeof(int));
cudaMalloc((void **)&d_x_vector, dim*sizeof(float));
cudaMalloc((void **)&d_Ax_vector,dim*sizeof(float));
cudaMemset( (void *) d_Ax_vector, 0, dim*sizeof(float));
cudaMemcpy(d_data, h_data, len*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_indices, h_indices, len*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_perm, h_perm, dim*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_x_vector, h_x_vector, dim*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(jds_ptr_int, h_ptr, depth*sizeof(int));
cudaMemcpyToSymbol(sh_zcnt_int, h_nzcnt,nzcnt_len*sizeof(int));
cudaThreadSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
unsigned int grid;
unsigned int block;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
compute_active_thread(&block, &grid,nzcnt_len,pad, deviceProp.major,deviceProp.minor,
deviceProp.warpSize,deviceProp.multiProcessorCount);
// cudaFuncSetCacheConfig(spmv_jds_naive, cudaFuncCachePreferL1);
//main execution
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
// for (int i=0; i<50; i++)
spmv_jds_naive<<<grid, block>>>(d_Ax_vector, d_data, d_indices, d_perm, d_x_vector, d_nzcnt, dim);
CUERR // check and clear any existing errors
cudaThreadSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cudaMemcpy(h_Ax_vector, d_Ax_vector,dim*sizeof(float), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaFree(d_data);
cudaFree(d_indices);
cudaFree(d_ptr);
cudaFree(d_perm);
cudaFree(d_nzcnt);
cudaFree(d_x_vector);
cudaFree(d_Ax_vector);
if (parameters->outFile) {
pb_SwitchToTimer(&timers, pb_TimerID_IO);
outputData(parameters->outFile,h_Ax_vector,dim);
}
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
free (h_data);
free (h_indices);
free (h_ptr);
free (h_perm);
free (h_nzcnt);
free (h_Ax_vector);
free (h_x_vector);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
return 0;
}
|
a8fe67d1699506a80d2a25dcd526fcc72c4b1fbf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Alejandro Salmon Felix Diaz
// A01201954
//incluir path a propio cuda_runtime
#include "../cuda_runtime.h"
#include "handle_error.h"
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 16
__global__ void mat_mult(float* dmat_res, float* dmat_a, float* dmat_b, int a_row, int a_col, int b_col){
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if(col<b_col && row<a_row){
float result = 0;
for(int i=0; i<a_col; i++){
result += dmat_a[row*a_col+i] * dmat_b[i*b_col+col];
}
dmat_res[row*b_col+col] = result;
}
}
//hur dur
void print_mat(float* mat, int row, int col){
for (int i = 0; i < row; i++){
for (int j = 0; j < col; j++){
printf("%.1f\t", mat[i*col+j]);
}
printf("\n");
}
printf("\n");
}
//llena matrices igual que en los casos de prueba del lab
void fill_matab(float* mata, int rowa, int cola, float* matb, int rowb, int colb){
int c = 1;
for(int i = 0; i<rowa; i++){
for(int j = 0; j<cola; j++){
mata[i*cola+j] = c++%10;
}
}
c--;
for(int i = 0; i<cola; i++){
for(int j = 0; j<colb; j++){
matb[i*colb+j] = c++%10;
}
}
}
int main(int argc, char* argv[]){
float *hmat_a, *hmat_b, *hmat_res;
float *dmat_a, *dmat_b, *dmat_res;
if (argc != 5) {
printf("usage: %s [MatrixA Rows] [MatrixA Columns] [MatrixB Rows] [MatrixB Columns]\n", argv[0]);
return -1;
}
//realmente no se utiliza
int a_row = atoi(argv[1]);
int a_col = atoi(argv[2]);
/*
realmente nunca se usa b_row pero memoria de sobra thats why
DISCLAIMER:
(si el uso innecesario de memora molesta al lector favor de comentar la linea siguiente)
*/
int b_row = atoi(argv[3]);
int b_col = atoi(argv[4]);
if(a_col != atoi(argv[3])){
printf("Matrix dimensions are not correct\n");
return -1;
}
//genera matrices para producto punto
hmat_a = (float *)malloc(sizeof(float)*a_row*a_col);
hmat_b = (float *)malloc(sizeof(float)*a_col*b_col);
hmat_res = (float *)malloc(sizeof(float)*a_row*a_row);
fill_matab(hmat_a, a_row, a_col, hmat_b, a_col, b_col);
print_mat(hmat_a, a_row, a_col);
print_mat(hmat_b, a_col, b_col);
HANDLE_ERROR(hipMalloc((void**)&dmat_a,sizeof(float)*a_row*a_col));
HANDLE_ERROR(hipMalloc((void**)&dmat_b,sizeof(float)*b_row*b_col));
HANDLE_ERROR(hipMalloc((void**)&dmat_res,sizeof(float)*a_row*a_row));
hipMemcpy(dmat_a, hmat_a, sizeof(float)*a_row*a_col, hipMemcpyHostToDevice);
hipMemcpy(dmat_b, hmat_b, sizeof(float)*a_col*b_col, hipMemcpyHostToDevice);
// Funciones que encontre en linea para calcular bloques y threads optimos dependiendo del tamano de las matrices
dim3 dimGrid((b_col + BLOCK_SIZE - 1) / BLOCK_SIZE, (a_row + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//manda llamar la funcion de GPU
hipLaunchKernelGGL(( mat_mult), dim3(dimGrid), dim3(dimBlock), 0, 0, dmat_res, dmat_a, dmat_b, a_row, a_col, b_col);
//copia matriz resultante
hipMemcpy(hmat_res, dmat_res, sizeof(float)*a_row*a_row, hipMemcpyDeviceToHost);
//imprime resultado
print_mat(hmat_res, a_row, a_row);
//libera memoria de host
free(hmat_a);
free(hmat_b);
free(hmat_res);
//libera memoria de device
hipFree(dmat_a);
hipFree(dmat_b);
hipFree(dmat_res);
} | a8fe67d1699506a80d2a25dcd526fcc72c4b1fbf.cu | // Alejandro Salmon Felix Diaz
// A01201954
//incluir path a propio cuda_runtime
#include "../cuda_runtime.h"
#include "handle_error.h"
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 16
__global__ void mat_mult(float* dmat_res, float* dmat_a, float* dmat_b, int a_row, int a_col, int b_col){
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if(col<b_col && row<a_row){
float result = 0;
for(int i=0; i<a_col; i++){
result += dmat_a[row*a_col+i] * dmat_b[i*b_col+col];
}
dmat_res[row*b_col+col] = result;
}
}
//hur dur
void print_mat(float* mat, int row, int col){
for (int i = 0; i < row; i++){
for (int j = 0; j < col; j++){
printf("%.1f\t", mat[i*col+j]);
}
printf("\n");
}
printf("\n");
}
//llena matrices igual que en los casos de prueba del lab
void fill_matab(float* mata, int rowa, int cola, float* matb, int rowb, int colb){
int c = 1;
for(int i = 0; i<rowa; i++){
for(int j = 0; j<cola; j++){
mata[i*cola+j] = c++%10;
}
}
c--;
for(int i = 0; i<cola; i++){
for(int j = 0; j<colb; j++){
matb[i*colb+j] = c++%10;
}
}
}
int main(int argc, char* argv[]){
float *hmat_a, *hmat_b, *hmat_res;
float *dmat_a, *dmat_b, *dmat_res;
if (argc != 5) {
printf("usage: %s [MatrixA Rows] [MatrixA Columns] [MatrixB Rows] [MatrixB Columns]\n", argv[0]);
return -1;
}
//realmente no se utiliza
int a_row = atoi(argv[1]);
int a_col = atoi(argv[2]);
/*
realmente nunca se usa b_row pero memoria de sobra thats why
DISCLAIMER:
(si el uso innecesario de memora molesta al lector favor de comentar la linea siguiente)
*/
int b_row = atoi(argv[3]);
int b_col = atoi(argv[4]);
if(a_col != atoi(argv[3])){
printf("Matrix dimensions are not correct\n");
return -1;
}
//genera matrices para producto punto
hmat_a = (float *)malloc(sizeof(float)*a_row*a_col);
hmat_b = (float *)malloc(sizeof(float)*a_col*b_col);
hmat_res = (float *)malloc(sizeof(float)*a_row*a_row);
fill_matab(hmat_a, a_row, a_col, hmat_b, a_col, b_col);
print_mat(hmat_a, a_row, a_col);
print_mat(hmat_b, a_col, b_col);
HANDLE_ERROR(cudaMalloc((void**)&dmat_a,sizeof(float)*a_row*a_col));
HANDLE_ERROR(cudaMalloc((void**)&dmat_b,sizeof(float)*b_row*b_col));
HANDLE_ERROR(cudaMalloc((void**)&dmat_res,sizeof(float)*a_row*a_row));
cudaMemcpy(dmat_a, hmat_a, sizeof(float)*a_row*a_col, cudaMemcpyHostToDevice);
cudaMemcpy(dmat_b, hmat_b, sizeof(float)*a_col*b_col, cudaMemcpyHostToDevice);
// Funciones que encontre en linea para calcular bloques y threads optimos dependiendo del tamano de las matrices
dim3 dimGrid((b_col + BLOCK_SIZE - 1) / BLOCK_SIZE, (a_row + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//manda llamar la funcion de GPU
mat_mult<<<dimGrid, dimBlock>>>(dmat_res, dmat_a, dmat_b, a_row, a_col, b_col);
//copia matriz resultante
cudaMemcpy(hmat_res, dmat_res, sizeof(float)*a_row*a_row, cudaMemcpyDeviceToHost);
//imprime resultado
print_mat(hmat_res, a_row, a_row);
//libera memoria de host
free(hmat_a);
free(hmat_b);
free(hmat_res);
//libera memoria de device
cudaFree(dmat_a);
cudaFree(dmat_b);
cudaFree(dmat_res);
} |
4d40ce316b82fdc435d4d2a2e9a0048b21a28922.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include "ext_cuda_chunk.hpp"
#include "kernels/solver_methods.cuknl"
/*
* SHARED SOLVER METHODS
*/
// Entry point to copy U.
extern "C"
void ext_solver_copy_u_(
const int* chunk)
{
Chunks[*chunk-1]->CopyU();
}
// Entry point for calculating residual.
extern "C"
void ext_calculate_residual_(
const int* chunk)
{
Chunks[*chunk-1]->CalculateResidual();
}
// Entry point for calculating 2norm.
extern "C"
void ext_calculate_2norm_(
const int* chunk,
const int* normArray,
double* normOut)
{
Chunks[*chunk-1]->Calculate2Norm(*normArray, normOut);
}
// Entry point for finalising solution.
extern "C"
void ext_solver_finalise_(
const int* chunk)
{
Chunks[*chunk-1]->Finalise();
}
// Determines the rx, ry and rz values.
void TeaLeafCudaChunk::CalcRxRyRz(
const double dt,
double* rxOut,
double* ryOut,
double* rzOut)
{
double dx, dy, dz;
hipMemcpy(&dx, dCellDx, sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(&dy, dCellDy, sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(&dz, dCellDz, sizeof(double), hipMemcpyDeviceToHost);
TeaLeafCudaChunk::CheckErrors(__LINE__,__FILE__);
*rxOut = dt/(dx*dx);
*ryOut = dt/(dy*dy);
*rzOut = dt/(dz*dz);
}
// Copies the current value of u
void TeaLeafCudaChunk::CopyU()
{
PRE_KERNEL(2*HALO_PAD);
hipLaunchKernelGGL(( CuKnlCopyU), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0,
innerX, innerY, innerZ, xCells, xCells*yCells, dU, dU0);
POST_KERNEL("Copy U");
}
// Calculates the current residual value.
void TeaLeafCudaChunk::CalculateResidual()
{
PRE_KERNEL(2*HALO_PAD);
hipLaunchKernelGGL(( CuKnlCalculateResidual), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0,
innerX, innerY, innerZ, xCells, xCells*yCells,
dU, dU0, dKx, dKy, dKz, dR);
POST_KERNEL("Calculate Residual");
}
// Calculates the 2norm of a particular space.
void TeaLeafCudaChunk::Calculate2Norm(
const bool normArray,
double* normOut)
{
PRE_KERNEL(2*HALO_PAD);
hipLaunchKernelGGL(( CuKnlCalculate2Norm), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0,
innerX, innerY, innerZ, xCells, xCells*yCells,
normArray ? dR : dU0, dReduceBuffer1);
POST_KERNEL("Calculate 2Norm");
SumReduce(dReduceBuffer1, normOut, numBlocks, "2norm reduction");
}
// Reduces residual values of a buffer
void TeaLeafCudaChunk::SumReduce(
double* buffer,
double* result,
int len,
std::string kName)
{
while(len > 1)
{
int numBlocks = ceil(len/(float)BLOCK_SIZE);
START_PROFILING();
hipLaunchKernelGGL(( CuKnlSumReduce), dim3(numBlocks),dim3(BLOCK_SIZE), 0, 0, len, buffer);
POST_KERNEL(kName);
len = numBlocks;
}
hipMemcpy(result, buffer, sizeof(double), hipMemcpyDeviceToHost);
CheckErrors(__LINE__,__FILE__);
}
// Finalises the solution.
void TeaLeafCudaChunk::Finalise()
{
PRE_KERNEL(2*HALO_PAD);
hipLaunchKernelGGL(( CuKnlFinalise), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0,
innerX, innerY, innerZ, xCells, xCells*yCells, dDensity, dU, dEnergy1);
POST_KERNEL("Finalise Solver");
}
// Loads alphas and betas onto the device
void TeaLeafCudaChunk::LoadAlphaBeta(
const double* alphas,
const double* betas,
const int numCoefs)
{
size_t length = numCoefs*sizeof(double);
hipMalloc((void**) &dAlphas, length);
hipMalloc((void**) &dBetas, length);
hipMemcpy(dAlphas, alphas, length, hipMemcpyHostToDevice);
hipMemcpy(dBetas, betas, length, hipMemcpyHostToDevice);
hipDeviceSynchronize();
CheckErrors(__LINE__,__FILE__);
}
| 4d40ce316b82fdc435d4d2a2e9a0048b21a28922.cu | #include <cstdio>
#include "ext_cuda_chunk.hpp"
#include "kernels/solver_methods.cuknl"
/*
* SHARED SOLVER METHODS
*/
// Entry point to copy U.
extern "C"
void ext_solver_copy_u_(
const int* chunk)
{
Chunks[*chunk-1]->CopyU();
}
// Entry point for calculating residual.
extern "C"
void ext_calculate_residual_(
const int* chunk)
{
Chunks[*chunk-1]->CalculateResidual();
}
// Entry point for calculating 2norm.
extern "C"
void ext_calculate_2norm_(
const int* chunk,
const int* normArray,
double* normOut)
{
Chunks[*chunk-1]->Calculate2Norm(*normArray, normOut);
}
// Entry point for finalising solution.
extern "C"
void ext_solver_finalise_(
const int* chunk)
{
Chunks[*chunk-1]->Finalise();
}
// Determines the rx, ry and rz values.
void TeaLeafCudaChunk::CalcRxRyRz(
const double dt,
double* rxOut,
double* ryOut,
double* rzOut)
{
double dx, dy, dz;
cudaMemcpy(&dx, dCellDx, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&dy, dCellDy, sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(&dz, dCellDz, sizeof(double), cudaMemcpyDeviceToHost);
TeaLeafCudaChunk::CheckErrors(__LINE__,__FILE__);
*rxOut = dt/(dx*dx);
*ryOut = dt/(dy*dy);
*rzOut = dt/(dz*dz);
}
// Copies the current value of u
void TeaLeafCudaChunk::CopyU()
{
PRE_KERNEL(2*HALO_PAD);
CuKnlCopyU<<<numBlocks, BLOCK_SIZE>>>(
innerX, innerY, innerZ, xCells, xCells*yCells, dU, dU0);
POST_KERNEL("Copy U");
}
// Calculates the current residual value.
void TeaLeafCudaChunk::CalculateResidual()
{
PRE_KERNEL(2*HALO_PAD);
CuKnlCalculateResidual<<<numBlocks, BLOCK_SIZE>>>(
innerX, innerY, innerZ, xCells, xCells*yCells,
dU, dU0, dKx, dKy, dKz, dR);
POST_KERNEL("Calculate Residual");
}
// Calculates the 2norm of a particular space.
void TeaLeafCudaChunk::Calculate2Norm(
const bool normArray,
double* normOut)
{
PRE_KERNEL(2*HALO_PAD);
CuKnlCalculate2Norm<<<numBlocks, BLOCK_SIZE>>>(
innerX, innerY, innerZ, xCells, xCells*yCells,
normArray ? dR : dU0, dReduceBuffer1);
POST_KERNEL("Calculate 2Norm");
SumReduce(dReduceBuffer1, normOut, numBlocks, "2norm reduction");
}
// Reduces residual values of a buffer
void TeaLeafCudaChunk::SumReduce(
double* buffer,
double* result,
int len,
std::string kName)
{
while(len > 1)
{
int numBlocks = ceil(len/(float)BLOCK_SIZE);
START_PROFILING();
CuKnlSumReduce<<<numBlocks,BLOCK_SIZE>>>(len, buffer);
POST_KERNEL(kName);
len = numBlocks;
}
cudaMemcpy(result, buffer, sizeof(double), cudaMemcpyDeviceToHost);
CheckErrors(__LINE__,__FILE__);
}
// Finalises the solution.
void TeaLeafCudaChunk::Finalise()
{
PRE_KERNEL(2*HALO_PAD);
CuKnlFinalise<<<numBlocks, BLOCK_SIZE>>>(
innerX, innerY, innerZ, xCells, xCells*yCells, dDensity, dU, dEnergy1);
POST_KERNEL("Finalise Solver");
}
// Loads alphas and betas onto the device
void TeaLeafCudaChunk::LoadAlphaBeta(
const double* alphas,
const double* betas,
const int numCoefs)
{
size_t length = numCoefs*sizeof(double);
cudaMalloc((void**) &dAlphas, length);
cudaMalloc((void**) &dBetas, length);
cudaMemcpy(dAlphas, alphas, length, cudaMemcpyHostToDevice);
cudaMemcpy(dBetas, betas, length, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
CheckErrors(__LINE__,__FILE__);
}
|
b74c26bcebe3b77511eddacec920e9963e39fd60.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "fill_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float ALPHA = 2;
float *X = NULL;
hipMalloc(&X, XSIZE*YSIZE);
int INCX = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
fill_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,ALPHA,X,INCX);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
fill_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,ALPHA,X,INCX);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
fill_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, N,ALPHA,X,INCX);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b74c26bcebe3b77511eddacec920e9963e39fd60.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "fill_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
float ALPHA = 2;
float *X = NULL;
cudaMalloc(&X, XSIZE*YSIZE);
int INCX = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
fill_kernel<<<gridBlock,threadBlock>>>(N,ALPHA,X,INCX);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
fill_kernel<<<gridBlock,threadBlock>>>(N,ALPHA,X,INCX);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
fill_kernel<<<gridBlock,threadBlock>>>(N,ALPHA,X,INCX);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4bd195d6d1787918402f1ab7001e9c9d7a46bffe.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "datadef.h"
#include "warp_device.cuh"
#include "check_cuda.h"
__global__ void pop_fission_kernel(unsigned N, cross_section_data* d_xsdata, particle_data* d_particles, unsigned* d_scanned, spatial_data* fission_particles, float* fission_energy){
// get tid
int tid = threadIdx.x+blockIdx.x*blockDim.x;
// declare shared variables
__shared__ unsigned n_isotopes;
//__shared__ unsigned energy_grid_len;
__shared__ unsigned total_reaction_channels;
__shared__ float* energy_grid;
__shared__ dist_container* dist_scatter;
__shared__ dist_container* dist_energy;
__shared__ spatial_data* space;
__shared__ float* E;
__shared__ unsigned* rn_bank;
__shared__ unsigned* yield;
__shared__ unsigned* index;
__shared__ unsigned* isonum;
// have thread 0 of block copy all pointers and static info into shared memory
if (threadIdx.x == 0){
n_isotopes = d_xsdata[0].n_isotopes;
//energy_grid_len = d_xsdata[0].energy_grid_len;
total_reaction_channels = d_xsdata[0].total_reaction_channels;
energy_grid = d_xsdata[0].energy_grid;
dist_scatter = d_xsdata[0].dist_scatter;
dist_energy = d_xsdata[0].dist_energy;
space = d_particles[0].space;
E = d_particles[0].E;
rn_bank = d_particles[0].rn_bank;
yield = d_particles[0].yield;
index = d_particles[0].index;
isonum = d_particles[0].isonum;
}
// make sure shared loads happen before anything else (epecially returns)
__syncthreads();
// load history data
unsigned this_dex = index[ tid];
float this_E = E[ tid];
unsigned this_yield = yield[ tid];
unsigned rn = rn_bank[ tid];
float this_x = space[ tid].x;
float this_y = space[ tid].y;
float this_z = space[ tid].z;
unsigned this_tope = isonum[ tid];
// get array position from prefix scan
unsigned position = d_scanned[tid];
// make sure individual loads happen before anything else?
__syncthreads();
// return immediately if out of bounds
if (tid >= N){return;}
// check yield
if (this_yield==0){
return;
}
// another yield check
if((d_scanned[tid+1]-d_scanned[tid]) == 0){
printf("NOT RIGHT! \n");
return;
}
// check E data pointers
if(dist_energy == 0x0){
printf("null pointer, energy array in pop_fission!,tid %u\n",tid);
return;
}
// check S data pointers
if(dist_scatter == 0x0){
printf("null pointer, scatter array in pop_fission!,tid %u\n",tid);
return;
}
// check second level pointers
if(dist_scatter[this_dex].lower == 0x0){printf("pop_fission: null pointer dist_scatter.lower! this_dex %u this_E %6.4E tope %u yield %u\n",this_dex,this_E,this_tope,this_yield); return;}
if(dist_scatter[this_dex].upper == 0x0){printf("pop_fission: null pointer dist_scatter.upper! this_dex %u this_E %6.4E tope %u yield %u\n",this_dex,this_E,this_tope,this_yield); return;}
if(dist_energy[ this_dex].upper == 0x0){printf("pop_fission: null pointer dist_energy.upper! this_dex %u this_E %6.4E tope %u yield %u\n",this_dex,this_E,this_tope,this_yield); return;}
if(dist_energy[ this_dex].lower == 0x0){printf("pop_fission: null pointer dist_energy.lower! this_dex %u this_E %6.4E tope %u yield %u\n",this_dex,this_E,this_tope,this_yield); return;}
//constants
const float pi = 3.14159265359;
// internal kernel variables
float nu_t0 = 0.0;
float nu_t1 = 0.0;
float nu_d0 = 0.0;
float nu_d1 = 0.0;
float beta = 0.0;
float e0 = 0.0;
float e1 = 0.0;
unsigned data_dex = 0;
float sampled_E = 0.0;
float phi, mu, E0, f, rn1, rn2;
unsigned this_law, this_len, this_intt, upper_len, lower_len, pre_index, pre_position;
float *this_var, *this_cdf, *this_pdf, *upper_var, *lower_var;
unsigned n_columns = n_isotopes + total_reaction_channels;
unsigned this_col = this_dex % n_columns;
unsigned this_row = (this_dex-this_col) / n_columns;
float E_of_index0 = energy_grid[this_row];
float E_of_index1 = energy_grid[this_row+1];
if(this_E < E_of_index0 | this_E > E_of_index1){printf("energy outside of distributions in pop!!!! this %6.4E row %6.4E row+1 %6.4E \n",this_E,E_of_index0,E_of_index1);}
// load dist info
dist_data this_edist, this_sdist;
dist_data sdist_lower = dist_scatter[this_dex].lower[0];
dist_data sdist_upper = dist_scatter[this_dex].upper[0];
dist_data edist_lower = dist_energy[ this_dex].lower[0];
dist_data edist_upper = dist_energy[ this_dex].upper[0];
// copy nu values, energy points from dist, t is len, d is law
memcpy(&nu_t0 , &sdist_lower.len, 1*sizeof(float));
memcpy(&nu_t1 , &sdist_upper.len, 1*sizeof(float));
memcpy(&nu_d0 , &sdist_lower.law, 1*sizeof(float));
memcpy(&nu_d1 , &sdist_upper.law, 1*sizeof(float));
memcpy(&e0 , &sdist_lower.erg, 1*sizeof(float));
memcpy(&e1 , &sdist_upper.erg, 1*sizeof(float));
// get interpolated beta value, beta = nu_d / nu_t
beta = interpolate_linear_energy( this_E, e0, e1, nu_d0, nu_d1 ) /
interpolate_linear_energy( this_E, e0, e1, nu_t0, nu_t1 ) ;
if( (this_E > e1 | this_E < e0) & (e0 != e1) ){printf("OUTSIDE bounds in pop_fission! this_E %6.4E e0 %6.4E e1 %6.4E col %u row %u\n",this_E,e0,e1,this_col,this_row);}
// write new histories for this yield number
for(unsigned k=0 ; k < this_yield ; k++ ){
//get proper data index
data_dex = position+k;
// check if this neutron is delayed or prompt
if ( get_rand(&rn) > beta ){
// do individual stochastic mixing for this prompt neutron
// pick upper or lower edist via stochastic mixing
f = (this_E - edist_lower.erg) / (edist_upper.erg - edist_lower.erg);
if( get_rand(&rn) > f ){
this_edist = edist_lower;
}
else{
this_edist = edist_upper;
}
// set pointers and parameters
this_law = this_edist.law;
this_len = this_edist.len;
this_intt = this_edist.intt;
this_var = this_edist.var;
this_cdf = this_edist.cdf;
this_pdf = this_edist.pdf;
upper_var = edist_upper.var;
lower_var = edist_lower.var;
upper_len = edist_upper.len;
lower_len = edist_lower.len;
}
else{
// pick upper or lower sdist (contains the delayed data) via stochastic mixing
f = 0.0;//(this_E - sdist_lower.var[0]) / (sdist_upper.erg - sdist_lower.erg);
if( get_rand(&rn) > f ){
this_sdist = sdist_lower;
}
else{
this_sdist = sdist_upper;
}
// decode precursor intt, 100 place
this_intt = (this_sdist.intt%1000-this_sdist.intt%100)/100;
// decode precursor law, 1000 place
this_law = (this_sdist.intt%10000-this_sdist.intt%1000)/1000;
// sample which precursor neutron is from
rn1 = get_rand(&rn);
for( pre_index=0; pre_index<6; pre_index++ ){
if ( rn1 <= this_sdist.var[pre_index+1] ){
break;
}
}
// get position of data in vector and vector length
pre_position = (unsigned) this_sdist.pdf[pre_index]; // haha preposition...
this_len = (unsigned) this_sdist.pdf[pre_index+1] - (unsigned) this_sdist.pdf[pre_index];
// get pointers to sampled data
this_var = &this_sdist.cdf[pre_position];
this_cdf = &this_sdist.cdf[pre_position + ((unsigned)this_sdist.pdf[6]) ]; // last value in cdf if the total length of the combined 6-vectors
this_pdf = &this_sdist.cdf[pre_position + ((unsigned)this_sdist.pdf[6])*2 ];
upper_var = &this_sdist.cdf[pre_position];
lower_var = &this_sdist.cdf[pre_position];
upper_len = this_len;
lower_len = this_len;
}
// check for null again
if( this_var == 0x0){printf("!-!-! null pointer this_var!\n"); return;}
if( this_cdf == 0x0){printf("!-!-! null pointer this_cdf!\n"); return;}
if( this_pdf == 0x0){printf("!-!-! null pointer this_pdf!\n"); return;}
if(upper_var == 0x0){printf("!-!-! null pointer upper_var!\n"); return;}
if(lower_var == 0x0){printf("!-!-! null pointer lower_var!\n"); return;}
// sample dist, passing the parameters/pointers of the sampled delayed/prompt emission data
if (this_law ==4 ){
// sample continuous tabular
E0 = sample_continuous_tablular( this_len ,
this_intt ,
get_rand(&rn) ,
this_var ,
this_cdf,
this_pdf );
//scale it to bins
sampled_E = scale_to_bins( f, E0,
this_var[0], this_var[ this_len-1],
lower_var[0], lower_var[lower_len-1],
upper_var[0], upper_var[upper_len-1] );
// check errors
if (!isfinite(sampled_E) | sampled_E<=0.0){
printf("Fission pop mis-sampled tid %i data_dex %u E %6.4E... setting to 2.5\n",tid,data_dex,sampled_E);
sampled_E = 2.5;
}
// sample mu/phi isotropically
mu = 2.0*get_rand(&rn)-1.0;
phi = 2.0*pi*get_rand(&rn);
}
else if ( this_law == 7 ){ // maxwell spectrum
// get tabulated temperature
float t0 = edist_lower.var[0];
float t1 = edist_upper.var[0];
float U = edist_lower.cdf[0];
float e0 = edist_lower.erg;
float e1 = edist_upper.erg;
float T = 0;
sampled_E = 99999.0;
// interpolate T
if (e1==e0 | edist_lower.intt==1){ // in top bin, both values are the same
T = t0;
}
else if (edist_lower.intt==2){// lin-lin interpolation
T = (t1 - t0)/(e1 - e0) * (this_E - e0) + t0;
}
else{
printf("dont know what to do!\n");
}
// restriction
while (sampled_E > this_E - U){
// rejection sample
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
while ( rn1*rn1+rn2*rn2 > 1.0 ) {
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
}
// mcnp5 volIII pg 2-43
sampled_E = -T * ( rn1*rn1*logf(get_rand(&rn)) / (rn1*rn1+rn2*rn2) + logf(get_rand(&rn)) );
}
// isotropic mu/phi
mu = 2.0*get_rand(&rn)-1.0;
phi = 2.0*pi*get_rand(&rn);
}
else if ( this_law == 9 ){ //evaopration spectrum
// get tabulated temperature
float t0 = edist_lower.var[0];
float t1 = edist_upper.var[0];
float U = edist_lower.cdf[0];
float e0 = edist_lower.erg;
float e1 = edist_upper.erg;
float T = 0.0;
float m = 0.0;
// interpolate T
if (e1==e0 | edist_lower.intt==1){ // in top bin, both values are the same
T = t0;
}
else if (edist_lower.intt==2){// lin-lin interpolation
T = (t1 - t0)/(e1 - e0) * (this_E - e0) + t0;
}
else{
printf("dont know what to do!\n");
}
// rejection sample
m = (this_E - U)/T;
e0 = 1.0-expf(-m);
float x = -logf(1.0-e0*get_rand(&rn)) - logf(1.0-e0*get_rand(&rn));
while ( x>m ) {
x = -logf(1.0-e0*get_rand(&rn)) - logf(1.0-e0*get_rand(&rn));
}
// mcnp5 volIII pg 2-43
sampled_E = T * x;
// isotropic mu/phi
mu = 2.0*get_rand(&rn)-1.0;
phi = 2.0*pi*get_rand(&rn);
}
else if( this_law == 11 ){ // energy-dependent maxwellian
// get tabulated parameters
float a0 = edist_lower.var[0];
float a1 = edist_upper.var[0];
float b0 = edist_lower.cdf[0];
float b1 = edist_upper.cdf[0];
float U = edist_lower.pdf[0];
float e0 = edist_lower.erg;
float e1 = edist_upper.erg;
float a = 0.0;
float b = 0.0;
float g = 0.0;
float c = 0.0;
sampled_E = 99999.0;
// interpolate T
if (e1==e0 | edist_lower.intt==1){ // in top bin, both values are the same
a = a0;
b = b0;
}
else if (edist_lower.intt==2){// lin-lin interpolation
a = (a1 - a0)/(e1 - e0) * (this_E - e0) + a0;
b = (b1 - b0)/(e1 - e0) * (this_E - e0) + b0;
c = 1.0 + a*b/8.0;
g = sqrtf( c*c - 1.0 ) + c;
}
else{
printf("dont know what to do!\n");
}
// restriction
while (sampled_E > this_E - U){
// rejection sample
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
sampled_E = -a*g*logf(rn1);
c = (1.0-g)*(1.0-logf(rn1)) - logf(rn2);
while ( c*c > b*sampled_E ) {
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
sampled_E = -a*g*logf(rn1);
c = (1.0-g)*(1.0-logf(rn1)) - logf(rn2);
}
}
// isotropic mu/phi
mu = 2.0*get_rand(&rn)-1.0;
phi = 2.0*pi*get_rand(&rn);
}
else{
printf("LAW %u NOT HANDLED IN FISSION POP!\n",this_law);
}
// check temp array
if(fission_energy[ data_dex] != 0.0){printf("NONZERO fission_energy[ data_dex] = % 6.4E \n",fission_energy[ data_dex] );}
if(fission_particles[data_dex].x != 0.0){printf("NONZERO fission_particles[data_dex].x = % 6.4E \n",fission_particles[data_dex].x );}
if(fission_particles[data_dex].y != 0.0){printf("NONZERO fission_particles[data_dex].y = % 6.4E \n",fission_particles[data_dex].y );}
if(fission_particles[data_dex].z != 0.0){printf("NONZERO fission_particles[data_dex].z = % 6.4E \n",fission_particles[data_dex].z );}
if(fission_particles[data_dex].xhat != 0.0){printf("NONZERO fission_particles[data_dex].xhat = % 6.4E \n",fission_particles[data_dex].xhat );}
if(fission_particles[data_dex].yhat != 0.0){printf("NONZERO fission_particles[data_dex].yhat = % 6.4E \n",fission_particles[data_dex].yhat );}
if(fission_particles[data_dex].zhat != 0.0){printf("NONZERO fission_particles[data_dex].zhat = % 6.4E \n",fission_particles[data_dex].zhat );}
if(fission_particles[data_dex].surf_dist != 0.0){printf("NONZERO fission_particles[data_dex].surf_dist = % 6.4E \n",fission_particles[data_dex].surf_dist );}
if(fission_particles[data_dex].enforce_BC != 0 ){printf("NONZERO fission_particles[data_dex].enforce_BC = %u \n",fission_particles[data_dex].enforce_BC );}
if(fission_particles[data_dex].norm[0] != 0.0){printf("NONZERO fission_particles[data_dex].norm[0] = % 6.4E \n",fission_particles[data_dex].norm[0] );}
if(fission_particles[data_dex].norm[1] != 0.0){printf("NONZERO fission_particles[data_dex].norm[1] = % 6.4E \n",fission_particles[data_dex].norm[1] );}
if(fission_particles[data_dex].norm[2] != 0.0){printf("NONZERO fission_particles[data_dex].norm[2] = % 6.4E \n",fission_particles[data_dex].norm[2] );}
// set data in temp array since GRID-WISE threadsync cannot be done (easily?)!
fission_energy[ data_dex ] = sampled_E;
fission_particles[ data_dex ].x = this_x;
fission_particles[ data_dex ].y = this_y;
fission_particles[ data_dex ].z = this_z;
fission_particles[ data_dex ].xhat = sqrtf(1.0-(mu*mu))*cosf(phi);
fission_particles[ data_dex ].yhat = sqrtf(1.0-(mu*mu))*sinf(phi);
fission_particles[ data_dex ].zhat = mu;
fission_particles[ data_dex ].enforce_BC = 0;
fission_particles[ data_dex ].surf_dist = 999999.0;
//if(data_dex<=9){printf("array index %u, E = % 6.4E d_fissile_energy[ data_dex ] = % 6.4E\n",data_dex,sampled_E,E[ data_dex ]);}
}
// write current seed out
rn_bank[tid] = rn;
}
/**
* \brief a
* \details b
*
* @param[in] NUM_THREADS - the number of threads to run per thread block
* @param[in] N - the total number of threads to launch on the grid
* @param[in] d_xsdata - device pointer to cross section data pointer array
* @param[in] d_particles - device pointer to particle data pointer array
* @param[in] d_scanned - device pointer to array of the cumulative sum (scan) of the yield array, used to find final index where new particles will be written
* @param[in] fission_particles - device pointer to intermadiate spatial data array where popped values will be written
* @param[in] fission_energy - device pointer to intermadiate energy data array where popped values will be written
*/
void pop_fission( unsigned NUM_THREADS, unsigned N, cross_section_data* d_xsdata, particle_data* d_particles, unsigned* d_scanned, spatial_data* fission_particles, float* fission_energy ){
unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS;
hipLaunchKernelGGL(( pop_fission_kernel) , dim3(blks), dim3(NUM_THREADS) , 0, 0, N, d_xsdata, d_particles, d_scanned, fission_particles, fission_energy);
check_cuda(hipDeviceSynchronize());
}
| 4bd195d6d1787918402f1ab7001e9c9d7a46bffe.cu | #include <cuda.h>
#include <stdio.h>
#include "datadef.h"
#include "warp_device.cuh"
#include "check_cuda.h"
__global__ void pop_fission_kernel(unsigned N, cross_section_data* d_xsdata, particle_data* d_particles, unsigned* d_scanned, spatial_data* fission_particles, float* fission_energy){
// get tid
int tid = threadIdx.x+blockIdx.x*blockDim.x;
// declare shared variables
__shared__ unsigned n_isotopes;
//__shared__ unsigned energy_grid_len;
__shared__ unsigned total_reaction_channels;
__shared__ float* energy_grid;
__shared__ dist_container* dist_scatter;
__shared__ dist_container* dist_energy;
__shared__ spatial_data* space;
__shared__ float* E;
__shared__ unsigned* rn_bank;
__shared__ unsigned* yield;
__shared__ unsigned* index;
__shared__ unsigned* isonum;
// have thread 0 of block copy all pointers and static info into shared memory
if (threadIdx.x == 0){
n_isotopes = d_xsdata[0].n_isotopes;
//energy_grid_len = d_xsdata[0].energy_grid_len;
total_reaction_channels = d_xsdata[0].total_reaction_channels;
energy_grid = d_xsdata[0].energy_grid;
dist_scatter = d_xsdata[0].dist_scatter;
dist_energy = d_xsdata[0].dist_energy;
space = d_particles[0].space;
E = d_particles[0].E;
rn_bank = d_particles[0].rn_bank;
yield = d_particles[0].yield;
index = d_particles[0].index;
isonum = d_particles[0].isonum;
}
// make sure shared loads happen before anything else (epecially returns)
__syncthreads();
// load history data
unsigned this_dex = index[ tid];
float this_E = E[ tid];
unsigned this_yield = yield[ tid];
unsigned rn = rn_bank[ tid];
float this_x = space[ tid].x;
float this_y = space[ tid].y;
float this_z = space[ tid].z;
unsigned this_tope = isonum[ tid];
// get array position from prefix scan
unsigned position = d_scanned[tid];
// make sure individual loads happen before anything else?
__syncthreads();
// return immediately if out of bounds
if (tid >= N){return;}
// check yield
if (this_yield==0){
return;
}
// another yield check
if((d_scanned[tid+1]-d_scanned[tid]) == 0){
printf("NOT RIGHT! \n");
return;
}
// check E data pointers
if(dist_energy == 0x0){
printf("null pointer, energy array in pop_fission!,tid %u\n",tid);
return;
}
// check S data pointers
if(dist_scatter == 0x0){
printf("null pointer, scatter array in pop_fission!,tid %u\n",tid);
return;
}
// check second level pointers
if(dist_scatter[this_dex].lower == 0x0){printf("pop_fission: null pointer dist_scatter.lower! this_dex %u this_E %6.4E tope %u yield %u\n",this_dex,this_E,this_tope,this_yield); return;}
if(dist_scatter[this_dex].upper == 0x0){printf("pop_fission: null pointer dist_scatter.upper! this_dex %u this_E %6.4E tope %u yield %u\n",this_dex,this_E,this_tope,this_yield); return;}
if(dist_energy[ this_dex].upper == 0x0){printf("pop_fission: null pointer dist_energy.upper! this_dex %u this_E %6.4E tope %u yield %u\n",this_dex,this_E,this_tope,this_yield); return;}
if(dist_energy[ this_dex].lower == 0x0){printf("pop_fission: null pointer dist_energy.lower! this_dex %u this_E %6.4E tope %u yield %u\n",this_dex,this_E,this_tope,this_yield); return;}
//constants
const float pi = 3.14159265359;
// internal kernel variables
float nu_t0 = 0.0;
float nu_t1 = 0.0;
float nu_d0 = 0.0;
float nu_d1 = 0.0;
float beta = 0.0;
float e0 = 0.0;
float e1 = 0.0;
unsigned data_dex = 0;
float sampled_E = 0.0;
float phi, mu, E0, f, rn1, rn2;
unsigned this_law, this_len, this_intt, upper_len, lower_len, pre_index, pre_position;
float *this_var, *this_cdf, *this_pdf, *upper_var, *lower_var;
unsigned n_columns = n_isotopes + total_reaction_channels;
unsigned this_col = this_dex % n_columns;
unsigned this_row = (this_dex-this_col) / n_columns;
float E_of_index0 = energy_grid[this_row];
float E_of_index1 = energy_grid[this_row+1];
if(this_E < E_of_index0 | this_E > E_of_index1){printf("energy outside of distributions in pop!!!! this %6.4E row %6.4E row+1 %6.4E \n",this_E,E_of_index0,E_of_index1);}
// load dist info
dist_data this_edist, this_sdist;
dist_data sdist_lower = dist_scatter[this_dex].lower[0];
dist_data sdist_upper = dist_scatter[this_dex].upper[0];
dist_data edist_lower = dist_energy[ this_dex].lower[0];
dist_data edist_upper = dist_energy[ this_dex].upper[0];
// copy nu values, energy points from dist, t is len, d is law
memcpy(&nu_t0 , &sdist_lower.len, 1*sizeof(float));
memcpy(&nu_t1 , &sdist_upper.len, 1*sizeof(float));
memcpy(&nu_d0 , &sdist_lower.law, 1*sizeof(float));
memcpy(&nu_d1 , &sdist_upper.law, 1*sizeof(float));
memcpy(&e0 , &sdist_lower.erg, 1*sizeof(float));
memcpy(&e1 , &sdist_upper.erg, 1*sizeof(float));
// get interpolated beta value, beta = nu_d / nu_t
beta = interpolate_linear_energy( this_E, e0, e1, nu_d0, nu_d1 ) /
interpolate_linear_energy( this_E, e0, e1, nu_t0, nu_t1 ) ;
if( (this_E > e1 | this_E < e0) & (e0 != e1) ){printf("OUTSIDE bounds in pop_fission! this_E %6.4E e0 %6.4E e1 %6.4E col %u row %u\n",this_E,e0,e1,this_col,this_row);}
// write new histories for this yield number
for(unsigned k=0 ; k < this_yield ; k++ ){
//get proper data index
data_dex = position+k;
// check if this neutron is delayed or prompt
if ( get_rand(&rn) > beta ){
// do individual stochastic mixing for this prompt neutron
// pick upper or lower edist via stochastic mixing
f = (this_E - edist_lower.erg) / (edist_upper.erg - edist_lower.erg);
if( get_rand(&rn) > f ){
this_edist = edist_lower;
}
else{
this_edist = edist_upper;
}
// set pointers and parameters
this_law = this_edist.law;
this_len = this_edist.len;
this_intt = this_edist.intt;
this_var = this_edist.var;
this_cdf = this_edist.cdf;
this_pdf = this_edist.pdf;
upper_var = edist_upper.var;
lower_var = edist_lower.var;
upper_len = edist_upper.len;
lower_len = edist_lower.len;
}
else{
// pick upper or lower sdist (contains the delayed data) via stochastic mixing
f = 0.0;//(this_E - sdist_lower.var[0]) / (sdist_upper.erg - sdist_lower.erg);
if( get_rand(&rn) > f ){
this_sdist = sdist_lower;
}
else{
this_sdist = sdist_upper;
}
// decode precursor intt, 100 place
this_intt = (this_sdist.intt%1000-this_sdist.intt%100)/100;
// decode precursor law, 1000 place
this_law = (this_sdist.intt%10000-this_sdist.intt%1000)/1000;
// sample which precursor neutron is from
rn1 = get_rand(&rn);
for( pre_index=0; pre_index<6; pre_index++ ){
if ( rn1 <= this_sdist.var[pre_index+1] ){
break;
}
}
// get position of data in vector and vector length
pre_position = (unsigned) this_sdist.pdf[pre_index]; // haha preposition...
this_len = (unsigned) this_sdist.pdf[pre_index+1] - (unsigned) this_sdist.pdf[pre_index];
// get pointers to sampled data
this_var = &this_sdist.cdf[pre_position];
this_cdf = &this_sdist.cdf[pre_position + ((unsigned)this_sdist.pdf[6]) ]; // last value in cdf if the total length of the combined 6-vectors
this_pdf = &this_sdist.cdf[pre_position + ((unsigned)this_sdist.pdf[6])*2 ];
upper_var = &this_sdist.cdf[pre_position];
lower_var = &this_sdist.cdf[pre_position];
upper_len = this_len;
lower_len = this_len;
}
// check for null again
if( this_var == 0x0){printf("!-!-! null pointer this_var!\n"); return;}
if( this_cdf == 0x0){printf("!-!-! null pointer this_cdf!\n"); return;}
if( this_pdf == 0x0){printf("!-!-! null pointer this_pdf!\n"); return;}
if(upper_var == 0x0){printf("!-!-! null pointer upper_var!\n"); return;}
if(lower_var == 0x0){printf("!-!-! null pointer lower_var!\n"); return;}
// sample dist, passing the parameters/pointers of the sampled delayed/prompt emission data
if (this_law ==4 ){
// sample continuous tabular
E0 = sample_continuous_tablular( this_len ,
this_intt ,
get_rand(&rn) ,
this_var ,
this_cdf,
this_pdf );
//scale it to bins
sampled_E = scale_to_bins( f, E0,
this_var[0], this_var[ this_len-1],
lower_var[0], lower_var[lower_len-1],
upper_var[0], upper_var[upper_len-1] );
// check errors
if (!isfinite(sampled_E) | sampled_E<=0.0){
printf("Fission pop mis-sampled tid %i data_dex %u E %6.4E... setting to 2.5\n",tid,data_dex,sampled_E);
sampled_E = 2.5;
}
// sample mu/phi isotropically
mu = 2.0*get_rand(&rn)-1.0;
phi = 2.0*pi*get_rand(&rn);
}
else if ( this_law == 7 ){ // maxwell spectrum
// get tabulated temperature
float t0 = edist_lower.var[0];
float t1 = edist_upper.var[0];
float U = edist_lower.cdf[0];
float e0 = edist_lower.erg;
float e1 = edist_upper.erg;
float T = 0;
sampled_E = 99999.0;
// interpolate T
if (e1==e0 | edist_lower.intt==1){ // in top bin, both values are the same
T = t0;
}
else if (edist_lower.intt==2){// lin-lin interpolation
T = (t1 - t0)/(e1 - e0) * (this_E - e0) + t0;
}
else{
printf("dont know what to do!\n");
}
// restriction
while (sampled_E > this_E - U){
// rejection sample
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
while ( rn1*rn1+rn2*rn2 > 1.0 ) {
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
}
// mcnp5 volIII pg 2-43
sampled_E = -T * ( rn1*rn1*logf(get_rand(&rn)) / (rn1*rn1+rn2*rn2) + logf(get_rand(&rn)) );
}
// isotropic mu/phi
mu = 2.0*get_rand(&rn)-1.0;
phi = 2.0*pi*get_rand(&rn);
}
else if ( this_law == 9 ){ //evaopration spectrum
// get tabulated temperature
float t0 = edist_lower.var[0];
float t1 = edist_upper.var[0];
float U = edist_lower.cdf[0];
float e0 = edist_lower.erg;
float e1 = edist_upper.erg;
float T = 0.0;
float m = 0.0;
// interpolate T
if (e1==e0 | edist_lower.intt==1){ // in top bin, both values are the same
T = t0;
}
else if (edist_lower.intt==2){// lin-lin interpolation
T = (t1 - t0)/(e1 - e0) * (this_E - e0) + t0;
}
else{
printf("dont know what to do!\n");
}
// rejection sample
m = (this_E - U)/T;
e0 = 1.0-expf(-m);
float x = -logf(1.0-e0*get_rand(&rn)) - logf(1.0-e0*get_rand(&rn));
while ( x>m ) {
x = -logf(1.0-e0*get_rand(&rn)) - logf(1.0-e0*get_rand(&rn));
}
// mcnp5 volIII pg 2-43
sampled_E = T * x;
// isotropic mu/phi
mu = 2.0*get_rand(&rn)-1.0;
phi = 2.0*pi*get_rand(&rn);
}
else if( this_law == 11 ){ // energy-dependent maxwellian
// get tabulated parameters
float a0 = edist_lower.var[0];
float a1 = edist_upper.var[0];
float b0 = edist_lower.cdf[0];
float b1 = edist_upper.cdf[0];
float U = edist_lower.pdf[0];
float e0 = edist_lower.erg;
float e1 = edist_upper.erg;
float a = 0.0;
float b = 0.0;
float g = 0.0;
float c = 0.0;
sampled_E = 99999.0;
// interpolate T
if (e1==e0 | edist_lower.intt==1){ // in top bin, both values are the same
a = a0;
b = b0;
}
else if (edist_lower.intt==2){// lin-lin interpolation
a = (a1 - a0)/(e1 - e0) * (this_E - e0) + a0;
b = (b1 - b0)/(e1 - e0) * (this_E - e0) + b0;
c = 1.0 + a*b/8.0;
g = sqrtf( c*c - 1.0 ) + c;
}
else{
printf("dont know what to do!\n");
}
// restriction
while (sampled_E > this_E - U){
// rejection sample
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
sampled_E = -a*g*logf(rn1);
c = (1.0-g)*(1.0-logf(rn1)) - logf(rn2);
while ( c*c > b*sampled_E ) {
rn1 = get_rand(&rn);
rn2 = get_rand(&rn);
sampled_E = -a*g*logf(rn1);
c = (1.0-g)*(1.0-logf(rn1)) - logf(rn2);
}
}
// isotropic mu/phi
mu = 2.0*get_rand(&rn)-1.0;
phi = 2.0*pi*get_rand(&rn);
}
else{
printf("LAW %u NOT HANDLED IN FISSION POP!\n",this_law);
}
// check temp array
if(fission_energy[ data_dex] != 0.0){printf("NONZERO fission_energy[ data_dex] = % 6.4E \n",fission_energy[ data_dex] );}
if(fission_particles[data_dex].x != 0.0){printf("NONZERO fission_particles[data_dex].x = % 6.4E \n",fission_particles[data_dex].x );}
if(fission_particles[data_dex].y != 0.0){printf("NONZERO fission_particles[data_dex].y = % 6.4E \n",fission_particles[data_dex].y );}
if(fission_particles[data_dex].z != 0.0){printf("NONZERO fission_particles[data_dex].z = % 6.4E \n",fission_particles[data_dex].z );}
if(fission_particles[data_dex].xhat != 0.0){printf("NONZERO fission_particles[data_dex].xhat = % 6.4E \n",fission_particles[data_dex].xhat );}
if(fission_particles[data_dex].yhat != 0.0){printf("NONZERO fission_particles[data_dex].yhat = % 6.4E \n",fission_particles[data_dex].yhat );}
if(fission_particles[data_dex].zhat != 0.0){printf("NONZERO fission_particles[data_dex].zhat = % 6.4E \n",fission_particles[data_dex].zhat );}
if(fission_particles[data_dex].surf_dist != 0.0){printf("NONZERO fission_particles[data_dex].surf_dist = % 6.4E \n",fission_particles[data_dex].surf_dist );}
if(fission_particles[data_dex].enforce_BC != 0 ){printf("NONZERO fission_particles[data_dex].enforce_BC = %u \n",fission_particles[data_dex].enforce_BC );}
if(fission_particles[data_dex].norm[0] != 0.0){printf("NONZERO fission_particles[data_dex].norm[0] = % 6.4E \n",fission_particles[data_dex].norm[0] );}
if(fission_particles[data_dex].norm[1] != 0.0){printf("NONZERO fission_particles[data_dex].norm[1] = % 6.4E \n",fission_particles[data_dex].norm[1] );}
if(fission_particles[data_dex].norm[2] != 0.0){printf("NONZERO fission_particles[data_dex].norm[2] = % 6.4E \n",fission_particles[data_dex].norm[2] );}
// set data in temp array since GRID-WISE threadsync cannot be done (easily?)!
fission_energy[ data_dex ] = sampled_E;
fission_particles[ data_dex ].x = this_x;
fission_particles[ data_dex ].y = this_y;
fission_particles[ data_dex ].z = this_z;
fission_particles[ data_dex ].xhat = sqrtf(1.0-(mu*mu))*cosf(phi);
fission_particles[ data_dex ].yhat = sqrtf(1.0-(mu*mu))*sinf(phi);
fission_particles[ data_dex ].zhat = mu;
fission_particles[ data_dex ].enforce_BC = 0;
fission_particles[ data_dex ].surf_dist = 999999.0;
//if(data_dex<=9){printf("array index %u, E = % 6.4E d_fissile_energy[ data_dex ] = % 6.4E\n",data_dex,sampled_E,E[ data_dex ]);}
}
// write current seed out
rn_bank[tid] = rn;
}
/**
* \brief a
* \details b
*
* @param[in] NUM_THREADS - the number of threads to run per thread block
* @param[in] N - the total number of threads to launch on the grid
* @param[in] d_xsdata - device pointer to cross section data pointer array
* @param[in] d_particles - device pointer to particle data pointer array
* @param[in] d_scanned - device pointer to array of the cumulative sum (scan) of the yield array, used to find final index where new particles will be written
* @param[in] fission_particles - device pointer to intermadiate spatial data array where popped values will be written
* @param[in] fission_energy - device pointer to intermadiate energy data array where popped values will be written
*/
void pop_fission( unsigned NUM_THREADS, unsigned N, cross_section_data* d_xsdata, particle_data* d_particles, unsigned* d_scanned, spatial_data* fission_particles, float* fission_energy ){
unsigned blks = ( N + NUM_THREADS - 1 ) / NUM_THREADS;
pop_fission_kernel <<< blks, NUM_THREADS >>> ( N, d_xsdata, d_particles, d_scanned, fission_particles, fission_energy);
check_cuda(cudaThreadSynchronize());
}
|
d03a1992ec879438c4ad24f126ca212f75f5b952.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
#include <cmath>
#include <iostream>
namespace StreamCompaction {
namespace Naive {
#define SHOW_TIMING 0
int numBlocks, numThreads = 256;
int * dev_odata;
int * dev_idata;
__global__ void scanStep(int n, int jump, int *odata, int *idata)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index >= jump && index < n)
{
odata[index] = idata[index] + idata[index - jump];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
hipMalloc((void**)&dev_odata, n * sizeof(int));
hipMalloc((void**)&dev_idata, n * sizeof(int));
hipMemcpy(dev_odata, idata, n * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
numBlocks = n / numThreads + 1;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
int p=1;
for(int i=1; i< ilog2ceil(n)+1; ++i)
{
hipLaunchKernelGGL(( scanStep), dim3(numBlocks), dim3(numThreads), 0, 0, n, p, dev_odata, dev_idata);
p <<= 1;
hipMemcpy(dev_idata, dev_odata, n * sizeof(int), hipMemcpyDeviceToDevice);
}
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
if(SHOW_TIMING)
std::cout<<"Total time in milliseconds : "<<milliseconds<<std::endl;
hipMemcpy(odata+1, dev_odata, (n-1) * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_odata);
hipFree(dev_idata);
}
}
}
| d03a1992ec879438c4ad24f126ca212f75f5b952.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
#include <cmath>
#include <iostream>
namespace StreamCompaction {
namespace Naive {
#define SHOW_TIMING 0
int numBlocks, numThreads = 256;
int * dev_odata;
int * dev_idata;
__global__ void scanStep(int n, int jump, int *odata, int *idata)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index >= jump && index < n)
{
odata[index] = idata[index] + idata[index - jump];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
cudaMalloc((void**)&dev_odata, n * sizeof(int));
cudaMalloc((void**)&dev_idata, n * sizeof(int));
cudaMemcpy(dev_odata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
numBlocks = n / numThreads + 1;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int p=1;
for(int i=1; i< ilog2ceil(n)+1; ++i)
{
scanStep<<<numBlocks, numThreads>>>(n, p, dev_odata, dev_idata);
p <<= 1;
cudaMemcpy(dev_idata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToDevice);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
if(SHOW_TIMING)
std::cout<<"Total time in milliseconds : "<<milliseconds<<std::endl;
cudaMemcpy(odata+1, dev_odata, (n-1) * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_odata);
cudaFree(dev_idata);
}
}
}
|
f3feddcd6a27b4940c7ba012e8ca34c5a8601c5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Implements the Landau kernel
*/
#include <petscconf.h>
#include <petsc/private/dmpleximpl.h> /*I "dmpleximpl.h" I*/
#include <petsclandau.h>
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <../src/mat/impls/aij/seq/aij.h>
#include <petscmat.h>
#include <petscdevice.h>
#include "../land_tensors.h"
#include <petscaijdevice.h>
#define CHECK_LAUNCH_ERROR() \
do { \
/* Check synchronous errors, i.e. pre-launch */ \
hipError_t err = hipGetLastError(); \
if (hipSuccess != err) { \
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",hipGetErrorString(err)); \
} \
/* Check asynchronous errors, i.e. kernel failed (ULF) */ \
err = hipDeviceSynchronize(); \
if (hipSuccess != err) { \
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",hipGetErrorString(err)); \
} \
} while (0)
PETSC_EXTERN PetscErrorCode LandauCUDACreateMatMaps(P4estVertexMaps *maps, pointInterpolationP4est (*points)[LANDAU_MAX_Q_FACE], PetscInt Nf, PetscInt Nq)
{
P4estVertexMaps h_maps;
hipError_t cerr;
PetscFunctionBegin;
h_maps.num_elements =maps->num_elements;
h_maps.num_face = maps->num_face;
h_maps.num_reduced = maps->num_reduced;
h_maps.deviceType = maps->deviceType;
h_maps.Nf = Nf;
h_maps.Nq = Nq;
cerr = hipMalloc((void **)&h_maps.c_maps, maps->num_reduced * sizeof *points);CHKERRCUDA(cerr);
cerr = hipMemcpy( h_maps.c_maps, maps->c_maps, maps->num_reduced * sizeof *points, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&h_maps.gIdx, maps->num_elements * sizeof *maps->gIdx);CHKERRCUDA(cerr);
cerr = hipMemcpy( h_maps.gIdx, maps->gIdx, maps->num_elements * sizeof *maps->gIdx, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&maps->data, sizeof(P4estVertexMaps));CHKERRCUDA(cerr);
cerr = hipMemcpy( maps->data, &h_maps, sizeof(P4estVertexMaps), hipMemcpyHostToDevice);CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode LandauCUDADestroyMatMaps(P4estVertexMaps *pMaps)
{
P4estVertexMaps *d_maps = pMaps->data, h_maps;
hipError_t cerr;
PetscFunctionBegin;
cerr = hipMemcpy(&h_maps, d_maps, sizeof(P4estVertexMaps), hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = hipFree(h_maps.c_maps);CHKERRCUDA(cerr);
cerr = hipFree(h_maps.gIdx);CHKERRCUDA(cerr);
cerr = hipFree(d_maps);CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
PetscErrorCode LandauCUDAStaticDataSet(DM plex, const PetscInt Nq, PetscReal nu_alpha[], PetscReal nu_beta[], PetscReal a_invMass[], PetscReal a_invJ[], PetscReal a_mass_w[],
PetscReal a_x[], PetscReal a_y[], PetscReal a_z[], PetscReal a_w[], LandauGeomData *SData_d)
{
PetscErrorCode ierr;
PetscTabulation *Tf;
LandauCtx *ctx;
PetscInt *Nbf,dim,Nf,Nb,nip,cStart,cEnd,szf=sizeof(PetscReal),szs=sizeof(PetscScalar);
PetscDS prob;
hipError_t cerr;
PetscFunctionBegin;
ierr = DMGetApplicationContext(plex, &ctx);CHKERRQ(ierr);
if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
ierr = DMGetDimension(plex, &dim);CHKERRQ(ierr);
ierr = DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);CHKERRQ(ierr);
nip = (cEnd - cStart)*Nq;
ierr = DMGetDS(plex, &prob);CHKERRQ(ierr);
ierr = PetscDSGetNumFields(prob, &Nf);CHKERRQ(ierr);
ierr = PetscDSGetDimensions(prob, &Nbf);CHKERRQ(ierr); Nb = Nbf[0];
if (Nq != Nb) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Nq != Nb. %D %D",Nq,Nb);
if (LANDAU_DIM != dim) SETERRQ2(PETSC_COMM_WORLD, PETSC_ERR_PLIB, "dim %D != LANDAU_DIM %d",dim,LANDAU_DIM);
ierr = PetscDSGetTabulation(prob, &Tf);CHKERRQ(ierr);
{
cerr = hipMalloc((void **)&SData_d->B, Nq*Nb*szf);CHKERRCUDA(cerr); // kernel input
cerr = hipMemcpy( SData_d->B, Tf[0]->T[0], Nq*Nb*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&SData_d->D, Nq*Nb*dim*szf);CHKERRCUDA(cerr); // kernel input
cerr = hipMemcpy( SData_d->D, Tf[0]->T[1], Nq*Nb*dim*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&SData_d->mass_w, nip*szf);CHKERRCUDA(cerr); // kernel input
cerr = hipMemcpy( SData_d->mass_w, a_mass_w,nip*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&SData_d->alpha, Nf*szf);CHKERRCUDA(cerr); // kernel input
cerr = hipMalloc((void **)&SData_d->beta, Nf*szf);CHKERRCUDA(cerr); // kernel input
cerr = hipMalloc((void **)&SData_d->invMass, Nf*szf);CHKERRCUDA(cerr); // kernel input
cerr = hipMemcpy(SData_d->alpha, nu_alpha, Nf*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(SData_d->beta, nu_beta, Nf*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMemcpy(SData_d->invMass,a_invMass,Nf*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
// collect geometry
cerr = hipMalloc((void **)&SData_d->invJ, nip*dim*dim*szf);CHKERRCUDA(cerr); // kernel input
cerr = hipMemcpy(SData_d->invJ, a_invJ, nip*dim*dim*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&SData_d->x, nip*szf);CHKERRCUDA(cerr); // kernel input
cerr = hipMemcpy( SData_d->x, a_x, nip*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&SData_d->y, nip*szf);CHKERRCUDA(cerr); // kernel input
cerr = hipMemcpy( SData_d->y, a_y, nip*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
#if LANDAU_DIM==3
cerr = hipMalloc((void **)&SData_d->z, nip*szf);CHKERRCUDA(cerr); // kernel input
cerr = hipMemcpy( SData_d->z, a_z, nip*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
#endif
cerr = hipMalloc((void **)&SData_d->w, nip*szf);CHKERRCUDA(cerr); // kernel input
cerr = hipMemcpy( SData_d->w, a_w, nip*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
// allcoate space for dynamic data once
cerr = hipMalloc((void **)&SData_d->Eq_m, Nf*szf);CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&SData_d->f, nip*Nf*szs);CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&SData_d->dfdx, nip*Nf*szs);CHKERRCUDA(cerr);
cerr = hipMalloc((void **)&SData_d->dfdy, nip*Nf*szs);CHKERRCUDA(cerr);
#if LANDAU_DIM==3
cerr = hipMalloc((void **)&SData_d->dfdz, nip*Nf*szs);CHKERRCUDA(cerr); // kernel input
#endif
cerr = hipMalloc((void **)&SData_d->IPf, nip*Nf*szs);CHKERRCUDA(cerr); // Nq==Nb
}
PetscFunctionReturn(0);
}
PetscErrorCode LandauCUDAStaticDataClear(LandauGeomData *SData_d)
{
hipError_t cerr;
PetscFunctionBegin;
if (SData_d->alpha) {
cerr = hipFree(SData_d->alpha);CHKERRCUDA(cerr);
cerr = hipFree(SData_d->beta);CHKERRCUDA(cerr);
cerr = hipFree(SData_d->invMass);CHKERRCUDA(cerr);
cerr = hipFree(SData_d->B);CHKERRCUDA(cerr);
cerr = hipFree(SData_d->D);CHKERRCUDA(cerr);
cerr = hipFree(SData_d->mass_w);CHKERRCUDA(cerr);
cerr = hipFree(SData_d->invJ);CHKERRCUDA(cerr);
#if LANDAU_DIM==3
cerr = hipFree(SData_d->z);CHKERRCUDA(cerr);
#endif
cerr = hipFree(SData_d->x);CHKERRCUDA(cerr);
cerr = hipFree(SData_d->y);CHKERRCUDA(cerr);
cerr = hipFree(SData_d->w);CHKERRCUDA(cerr);
// dynamic data
cerr = hipFree(SData_d->Eq_m);CHKERRCUDA(cerr);
cerr = hipFree(SData_d->f);CHKERRCUDA(cerr);
cerr = hipFree(SData_d->dfdx);CHKERRCUDA(cerr);
cerr = hipFree(SData_d->dfdy);CHKERRCUDA(cerr);
#if LANDAU_DIM==3
cerr = hipFree(SData_d->dfdz);CHKERRCUDA(cerr);
#endif
if (SData_d->IPf) {
cerr = hipFree(SData_d->IPf);CHKERRCUDA(cerr);
}
}
PetscFunctionReturn(0);
}
// The GPU Landau kernel
//
__global__
void landau_form_fdf(const PetscInt nip, const PetscInt dim, const PetscInt Nf, const PetscInt Nb, const PetscReal invJ_a[],
const PetscReal * const BB, const PetscReal * const DD, PetscScalar *a_coef, P4estVertexMaps *maps,
PetscReal d_f[], PetscReal d_dfdx[], PetscReal d_dfdy[]
#if LANDAU_DIM==3
, PetscReal d_dfdz[]
#endif
) // output
{
const PetscInt Nq = blockDim.y, elem = blockIdx.x;
const PetscInt myQi = threadIdx.y;
const PetscInt jpidx = myQi + elem * Nq;
const PetscReal *invJ = &invJ_a[jpidx*dim*dim];
const PetscReal *Bq = &BB[myQi*Nb], *Dq = &DD[myQi*Nb*dim];
PetscInt f,d,b,e,q;
PetscReal u_x[LANDAU_MAX_SPECIES][LANDAU_DIM];
const PetscScalar *coef;
PetscScalar coef_buff[LANDAU_MAX_SPECIES*LANDAU_MAX_NQ];
if (!maps) {
coef = &a_coef[elem*Nb*Nf];
} else {
coef = coef_buff;
for (f = 0; f < Nf; ++f) {
LandauIdx *const Idxs = &maps->gIdx[elem][f][0];
for (b = 0; b < Nb; ++b) {
PetscInt idx = Idxs[b];
if (idx >= 0) {
coef_buff[f*Nb+b] = a_coef[idx];
} else {
idx = -idx - 1;
coef_buff[f*Nb+b] = 0;
for (q = 0; q < maps->num_face; q++) {
PetscInt id = maps->c_maps[idx][q].gid;
PetscScalar scale = maps->c_maps[idx][q].scale;
coef_buff[f*Nb+b] += scale*a_coef[id];
}
}
}
}
}
/* get f and df */
for (f = threadIdx.x; f < Nf; f += blockDim.x) {
PetscReal refSpaceDer[LANDAU_DIM];
d_f[jpidx + f*nip] = 0.0;
for (d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
for (b = 0; b < Nb; ++b) {
const PetscInt cidx = b;
d_f[jpidx + f*nip] += Bq[cidx]*PetscRealPart(coef[f*Nb+cidx]);
for (d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx*dim+d]*PetscRealPart(coef[f*Nb+cidx]);
}
for (d = 0; d < dim; ++d) {
for (e = 0, u_x[f][d] = 0.0; e < dim; ++e) {
u_x[f][d] += invJ[e*dim+d]*refSpaceDer[e];
}
}
}
for (f = threadIdx.x; f < Nf; f += blockDim.x) {
d_dfdx[jpidx + f*nip] = u_x[f][0];
d_dfdy[jpidx + f*nip] = u_x[f][1];
#if LANDAU_DIM==3
d_dfdz[jpidx + f*nip] = u_x[f][2];
#endif
}
}
__device__ void
landau_inner_integral_v2(const PetscInt myQi, const PetscInt jpidx, PetscInt nip, const PetscInt Nq, const PetscInt Nf, const PetscInt Nb,
const PetscInt dim, const PetscReal xx[], const PetscReal yy[], const PetscReal ww[],
const PetscReal invJj[], const PetscReal nu_alpha[],
const PetscReal nu_beta[], const PetscReal invMass[], const PetscReal Eq_m[],
const PetscReal * const BB, const PetscReal * const DD,
PetscScalar *elemMat, P4estVertexMaps *d_maps, PetscSplitCSRDataStructure d_mat, // output
PetscScalar s_fieldMats[][LANDAU_MAX_NQ], // all these arrays are in shared memory
PetscReal s_scale[][LANDAU_MAX_Q_FACE],
PetscInt s_idx[][LANDAU_MAX_Q_FACE],
PetscReal s_g2[][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES],
PetscReal s_g3[][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES],
PetscReal s_gg2[][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES],
PetscReal s_gg3[][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES],
PetscReal s_nu_alpha[],
PetscReal s_nu_beta[],
PetscReal s_invMass[],
PetscReal s_f[],
PetscReal s_dfx[],
PetscReal s_dfy[],
PetscReal d_f[], PetscReal d_dfdx[], PetscReal d_dfdy[], // global memory
#if LANDAU_DIM==3
const PetscReal zz[], PetscReal s_dfz[], PetscReal d_dfdz[],
#endif
PetscReal d_mass_w[], PetscReal shift, PetscInt elem)
{
int delta,d,f,g,d2,dp,d3,fieldA,ipidx_b,nip_pad = nip; // vectorization padding not supported;
PetscReal gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
// create g2 & g3
for (f=threadIdx.x; f<Nf; f+=blockDim.x) {
for (d=0;d<dim;d++) { // clear accumulation data D & K
s_gg2[d][myQi][f] = 0;
for (d2=0;d2<dim;d2++) s_gg3[d][d2][myQi][f] = 0;
}
}
if (threadIdx.y == 0) {
for (int i = threadIdx.x; i < Nf; i += blockDim.x) {
s_nu_alpha[i] = nu_alpha[i];
s_nu_beta[i] = nu_beta[i];
s_invMass[i] = invMass[i];
}
}
for (d2 = 0; d2 < dim; d2++) {
gg2_temp[d2] = 0;
for (d3 = 0; d3 < dim; d3++) {
gg3_temp[d2][d3] = 0;
}
}
__syncthreads();
for (ipidx_b = 0; ipidx_b < nip; ipidx_b += blockDim.x) {
#if LANDAU_DIM==2
const PetscReal vj[3] = {xx[jpidx], yy[jpidx]};
#else
const PetscReal vj[3] = {xx[jpidx], yy[jpidx], zz[jpidx]};
#endif
int ipidx = ipidx_b + threadIdx.x;
__syncthreads();
if (ipidx < nip) {
for (fieldA = threadIdx.y; fieldA < Nf; fieldA += blockDim.y) {
s_f [fieldA*blockDim.x+threadIdx.x] = d_f[ipidx + fieldA*nip_pad];
s_dfx[fieldA*blockDim.x+threadIdx.x] = d_dfdx[ipidx + fieldA*nip_pad];
s_dfy[fieldA*blockDim.x+threadIdx.x] = d_dfdy[ipidx + fieldA*nip_pad];
#if LANDAU_DIM==3
s_dfz[fieldA*blockDim.x+threadIdx.x] = d_dfdz[ipidx + fieldA*nip_pad];
#endif
}
}
__syncthreads();
if (ipidx < nip) {
const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
PetscReal temp1[3] = {0, 0, 0}, temp2 = 0;
#if LANDAU_DIM==2
PetscReal Ud[2][2], Uk[2][2];
LandauTensor2D(vj, x, y, Ud, Uk, (ipidx==jpidx) ? 0. : 1.);
#else
PetscReal U[3][3], z = zz[ipidx];
LandauTensor3D(vj, x, y, z, U, (ipidx==jpidx) ? 0. : 1.);
#endif
for (fieldA = 0; fieldA < Nf; fieldA++) {
temp1[0] += s_dfx[fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA]*s_invMass[fieldA];
temp1[1] += s_dfy[fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA]*s_invMass[fieldA];
#if LANDAU_DIM==3
temp1[2] += s_dfz[fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA]*s_invMass[fieldA];
#endif
temp2 += s_f [fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA];
}
temp1[0] *= wi;
temp1[1] *= wi;
#if LANDAU_DIM==3
temp1[2] *= wi;
#endif
temp2 *= wi;
#if LANDAU_DIM==2
for (d2 = 0; d2 < 2; d2++) {
for (d3 = 0; d3 < 2; ++d3) {
/* K = U * grad(f): g2=e: i,A */
gg2_temp[d2] += Uk[d2][d3]*temp1[d3];
/* D = -U * (I \kron (fx)): g3=f: i,j,A */
gg3_temp[d2][d3] += Ud[d2][d3]*temp2;
}
}
#else
for (d2 = 0; d2 < 3; ++d2) {
for (d3 = 0; d3 < 3; ++d3) {
/* K = U * grad(f): g2 = e: i,A */
gg2_temp[d2] += U[d2][d3]*temp1[d3];
/* D = -U * (I \kron (fx)): g3 = f: i,j,A */
gg3_temp[d2][d3] += U[d2][d3]*temp2;
}
}
#endif
}
} /* IPs */
/* reduce gg temp sums across threads */
for (delta = blockDim.x/2; delta > 0; delta /= 2) {
for (d2 = 0; d2 < dim; d2++) {
gg2_temp[d2] += __shfl_xor_sync(0xffffffff, gg2_temp[d2], delta, blockDim.x);
for (d3 = 0; d3 < dim; d3++) {
gg3_temp[d2][d3] += __shfl_xor_sync(0xffffffff, gg3_temp[d2][d3], delta, blockDim.x);
}
}
}
// add alpha and put in gg2/3
for (fieldA = threadIdx.x; fieldA < Nf; fieldA += blockDim.x) {
for (d2 = 0; d2 < dim; d2++) {
s_gg2[d2][myQi][fieldA] += gg2_temp[d2]*s_nu_alpha[fieldA];
for (d3 = 0; d3 < dim; d3++) {
s_gg3[d2][d3][myQi][fieldA] -= gg3_temp[d2][d3]*s_nu_alpha[fieldA]*s_invMass[fieldA];
}
}
}
__syncthreads();
/* add electric field term once per IP */
for (fieldA = threadIdx.x; fieldA < Nf; fieldA += blockDim.x) {
s_gg2[dim-1][myQi][fieldA] += Eq_m[fieldA];
}
__syncthreads();
/* Jacobian transform - g2 */
for (fieldA = threadIdx.x; fieldA < Nf; fieldA += blockDim.x) {
PetscReal wj = ww[jpidx];
for (d = 0; d < dim; ++d) {
s_g2[d][myQi][fieldA] = 0.0;
for (d2 = 0; d2 < dim; ++d2) {
s_g2[d][myQi][fieldA] += invJj[d*dim+d2]*s_gg2[d2][myQi][fieldA];
s_g3[d][d2][myQi][fieldA] = 0.0;
for (d3 = 0; d3 < dim; ++d3) {
for (dp = 0; dp < dim; ++dp) {
s_g3[d][d2][myQi][fieldA] += invJj[d*dim + d3]*s_gg3[d3][dp][myQi][fieldA]*invJj[d2*dim + dp];
}
}
s_g3[d][d2][myQi][fieldA] *= wj;
}
s_g2[d][myQi][fieldA] *= wj;
}
}
__syncthreads(); // Synchronize (ensure all the data is available) and sum IP matrices
/* FE matrix construction */
{
int fieldA,d,qj,d2,q,idx,totDim=Nb*Nf;
/* assemble */
for (fieldA = 0; fieldA < Nf; fieldA++) {
for (f = threadIdx.y; f < Nb ; f += blockDim.y) {
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
PetscScalar t = 0;
for (qj = 0 ; qj < Nq ; qj++) {
const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim];
for (d = 0; d < dim; ++d) {
t += DIq[f*dim+d]*s_g2[d][qj][fieldA]*BJq[g];
for (d2 = 0; d2 < dim; ++d2) {
t += DIq[f*dim + d]*s_g3[d][d2][qj][fieldA]*DIq[g*dim + d2];
}
}
}
if (elemMat) {
const PetscInt fOff = (fieldA*Nb + f)*totDim + fieldA*Nb + g;
elemMat[fOff] += t; // ????
} else s_fieldMats[f][g] = t;
}
}
if (s_fieldMats) {
PetscScalar vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE];
PetscInt nr,nc;
const LandauIdx *const Idxs = &d_maps->gIdx[elem][fieldA][0];
__syncthreads();
if (threadIdx.y == 0) {
for (f = threadIdx.x; f < Nb ; f += blockDim.x) {
idx = Idxs[f];
if (idx >= 0) {
s_idx[f][0] = idx;
s_scale[f][0] = 1.;
} else {
idx = -idx - 1;
for (q = 0; q < d_maps->num_face; q++) {
s_idx[f][q] = d_maps->c_maps[idx][q].gid;
s_scale[f][q] = d_maps->c_maps[idx][q].scale;
}
}
}
}
__syncthreads();
for (f = threadIdx.y; f < Nb ; f += blockDim.y) {
idx = Idxs[f];
if (idx >= 0) {
nr = 1;
} else {
nr = d_maps->num_face;
}
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
idx = Idxs[g];
if (idx >= 0) {
nc = 1;
} else {
nc = d_maps->num_face;
}
for (q = 0; q < nr; q++) {
for (d = 0; d < nc; d++) {
vals[q*nc + d] = s_scale[f][q]*s_scale[g][d]*s_fieldMats[f][g];
}
}
MatSetValuesDevice(d_mat,nr,s_idx[f],nc,s_idx[g],vals,ADD_VALUES);
}
}
__syncthreads();
}
}
}
}
//
// The CUDA Landau kernel
//
__global__
void __launch_bounds__(256,4) landau_kernel_v2(const PetscInt nip, const PetscInt dim, const PetscInt totDim, const PetscInt Nf, const PetscInt Nb, const PetscReal invJj[],
const PetscReal nu_alpha[], const PetscReal nu_beta[], const PetscReal invMass[], const PetscReal Eq_m[],
const PetscReal * const BB, const PetscReal * const DD, const PetscReal xx[], const PetscReal yy[], const PetscReal ww[],
PetscScalar elemMats_out[], P4estVertexMaps *d_maps, PetscSplitCSRDataStructure d_mat, PetscReal d_f[], PetscReal d_dfdx[], PetscReal d_dfdy[],
#if LANDAU_DIM==3
const PetscReal zz[], PetscReal d_dfdz[],
#endif
PetscReal d_mass_w[], PetscReal shift)
{
const PetscInt Nq = blockDim.y, elem = blockIdx.x;
extern __shared__ PetscReal smem[];
int size = 0;
PetscReal (*s_g2)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = // shared mem not needed when nu_alpha, etc
(PetscReal (*)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size];
size += LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_DIM;
PetscReal (*s_g3)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] =
(PetscReal (*)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size];
size += LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES;
PetscReal (*s_gg2)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] =
(PetscReal (*)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size];
size += LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_DIM;
PetscReal (*s_gg3)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] =
(PetscReal (*)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size];
size += LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES;
PetscReal *s_nu_alpha = &smem[size];
size += LANDAU_MAX_SPECIES;
PetscReal *s_nu_beta = &smem[size];
size += LANDAU_MAX_SPECIES;
PetscReal *s_invMass = &smem[size];
size += LANDAU_MAX_SPECIES;
PetscReal *s_f = &smem[size];
size += blockDim.x*LANDAU_MAX_SPECIES;
PetscReal *s_dfx = &smem[size];
size += blockDim.x*LANDAU_MAX_SPECIES;
PetscReal *s_dfy = &smem[size];
size += blockDim.x*LANDAU_MAX_SPECIES;
#if LANDAU_DIM==3
PetscReal *s_dfz = &smem[size];
size += blockDim.x*LANDAU_MAX_SPECIES;
#endif
PetscScalar (*s_fieldMats)[LANDAU_MAX_NQ][LANDAU_MAX_NQ];
PetscReal (*s_scale)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
PetscInt (*s_idx)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
if (d_maps) {
// reuse the space for fieldMats
s_fieldMats = (PetscScalar (*)[LANDAU_MAX_NQ][LANDAU_MAX_NQ]) &smem[size];
size += LANDAU_MAX_NQ*LANDAU_MAX_NQ;
s_scale = (PetscReal (*)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE]) &smem[size];
size += LANDAU_MAX_NQ*LANDAU_MAX_Q_FACE;
s_idx = (PetscInt (*)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE]) &smem[size];
size += LANDAU_MAX_NQ*LANDAU_MAX_Q_FACE; // this is too big, idx is an integer
} else {
s_fieldMats = NULL;
}
const PetscInt myQi = threadIdx.y;
const PetscInt jpidx = myQi + elem * Nq;
//const PetscInt subblocksz = nip/nSubBlks + !!(nip%nSubBlks), ip_start = mySubBlk*subblocksz, ip_end = (mySubBlk+1)*subblocksz > nip ? nip : (mySubBlk+1)*subblocksz; /* this could be wrong with very few global IPs */
PetscScalar *elemMat = elemMats_out ? &elemMats_out[elem*totDim*totDim] : NULL; /* my output */
int tid = threadIdx.x + threadIdx.y*blockDim.x;
const PetscReal *invJ = invJj ? &invJj[jpidx*dim*dim] : NULL;
if (elemMat) for (int i = tid; i < totDim*totDim; i += blockDim.x*blockDim.y) elemMat[i] = 0;
__syncthreads();
landau_inner_integral_v2(myQi, jpidx, nip, Nq, Nf, Nb, dim, xx, yy, ww,
invJ, nu_alpha, nu_beta, invMass, Eq_m, BB, DD,
elemMat, d_maps, d_mat,
*s_fieldMats, *s_scale, *s_idx,
*s_g2, *s_g3, *s_gg2, *s_gg3,
s_nu_alpha, s_nu_beta, s_invMass,
s_f, s_dfx, s_dfy, d_f, d_dfdx, d_dfdy,
#if LANDAU_DIM==3
zz, s_dfz, d_dfdz,
#endif
d_mass_w, shift, elem);
}
__global__
void __launch_bounds__(256,4) mass_kernel(const PetscInt nip, const PetscInt dim, const PetscInt totDim, const PetscInt Nf, const PetscInt Nb, const PetscReal * const BB, const PetscReal * const DD,
PetscScalar elemMats_out[], P4estVertexMaps *d_maps, PetscSplitCSRDataStructure d_mat,
PetscReal d_mass_w[], PetscReal shift)
{
const PetscInt Nq = blockDim.y, elem = blockIdx.x;
__shared__ PetscScalar s_fieldMats[LANDAU_MAX_NQ][LANDAU_MAX_NQ];
__shared__ PetscInt s_idx[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
__shared__ PetscReal s_scale[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
PetscScalar *elemMat = elemMats_out ? &elemMats_out[elem*totDim*totDim] : NULL; /* my output */
int fieldA,d,qj,q,idx,f,g;
int tid = threadIdx.x + threadIdx.y*blockDim.x;
if (elemMat) for (int i = tid; i < totDim*totDim; i += blockDim.x*blockDim.y) elemMat[i] = 0;
__syncthreads();
/* FE mass matrix construction */
for (fieldA = 0; fieldA < Nf; fieldA++) {
PetscScalar vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE];
PetscInt nr,nc;
for (f = threadIdx.y; f < Nb ; f += blockDim.y) {
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
PetscScalar t = 0;
for (qj = 0 ; qj < Nq ; qj++) {
const PetscReal *BJq = &BB[qj*Nb];
const PetscInt jpidx = qj + elem * Nq;
t += BJq[f] * d_mass_w[jpidx]*shift * BJq[g];
}
if (elemMat) {
const PetscInt fOff = (fieldA*Nb + f)*totDim + fieldA*Nb + g;
elemMat[fOff] += t; // ????
} else s_fieldMats[f][g] = t;
}
}
if (!elemMat) {
const LandauIdx *const Idxs = &d_maps->gIdx[elem][fieldA][0];
__syncthreads();
if (threadIdx.y == 0) {
for (f = threadIdx.x; f < Nb ; f += blockDim.x) {
idx = Idxs[f];
if (idx >= 0) {
s_idx[f][0] = idx;
s_scale[f][0] = 1.;
} else {
idx = -idx - 1;
for (q = 0; q < d_maps->num_face; q++) {
s_idx[f][q] = d_maps->c_maps[idx][q].gid;
s_scale[f][q] = d_maps->c_maps[idx][q].scale;
}
}
}
}
__syncthreads();
for (f = threadIdx.y; f < Nb ; f += blockDim.y) {
idx = Idxs[f];
if (idx >= 0) {
nr = 1;
} else {
nr = d_maps->num_face;
}
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
idx = Idxs[g];
if (idx >= 0) {
nc = 1;
} else {
nc = d_maps->num_face;
}
for (q = 0; q < nr; q++) {
for (d = 0; d < nc; d++) {
vals[q*nc + d] = s_scale[f][q]*s_scale[g][d]*s_fieldMats[f][g];
}
}
MatSetValuesDevice(d_mat,nr,s_idx[f],nc,s_idx[g],vals,ADD_VALUES);
}
}
}
__syncthreads();
}
}
PetscErrorCode LandauCUDAJacobian(DM plex, const PetscInt Nq, PetscReal a_Eq_m[], PetscScalar a_IPf[], const PetscInt N, const PetscScalar a_xarray[], LandauGeomData *SData_d, const PetscInt num_sub_blocks,
PetscReal shift, const PetscLogEvent events[], Mat JacP)
{
PetscErrorCode ierr;
hipError_t cerr;
PetscInt ii,ej,*Nbf,Nb,cStart,cEnd,Nf,dim,numGCells,totDim,nip,szf=sizeof(PetscReal),szs=sizeof(PetscScalar);
PetscReal *d_BB=NULL,*d_DD=NULL,*d_invJj=NULL,*d_nu_alpha=NULL,*d_nu_beta=NULL,*d_invMass=NULL,*d_Eq_m=NULL,*d_mass_w=NULL,*d_x=NULL,*d_y=NULL,*d_w=NULL;
PetscScalar *d_elemMats=NULL,*d_IPf=NULL;
PetscReal *d_f=NULL,*d_dfdx=NULL,*d_dfdy=NULL;
#if LANDAU_DIM==3
PetscReal *d_dfdz=NULL, *d_z = NULL;
#endif
PetscTabulation *Tf;
PetscDS prob;
PetscSection section, globalSection;
LandauCtx *ctx;
PetscSplitCSRDataStructure d_mat=NULL;
P4estVertexMaps *h_maps, *d_maps=NULL;
int nnn = 256/Nq; // machine dependent
PetscFunctionBegin;
ierr = PetscLogEventBegin(events[3],0,0,0,0);CHKERRQ(ierr);
while (nnn & nnn - 1) nnn = nnn & nnn - 1;
if (nnn>16) nnn = 16;
ierr = DMGetDimension(plex, &dim);CHKERRQ(ierr);
if (dim!=LANDAU_DIM) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "LANDAU_DIM %D != dim %d",LANDAU_DIM,dim);
ierr = DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);CHKERRQ(ierr);
numGCells = cEnd - cStart;
nip = numGCells*Nq; /* length of inner global iteration */
ierr = DMGetDS(plex, &prob);CHKERRQ(ierr);
ierr = PetscDSGetNumFields(prob, &Nf);CHKERRQ(ierr);
ierr = PetscDSGetDimensions(prob, &Nbf);CHKERRQ(ierr); Nb = Nbf[0];
if (Nq != Nb) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Nq != Nb. %D %D",Nq,Nb);
ierr = PetscDSGetTotalDimension(prob, &totDim);CHKERRQ(ierr);
ierr = PetscDSGetTabulation(prob, &Tf);CHKERRQ(ierr);
ierr = DMGetLocalSection(plex, §ion);CHKERRQ(ierr);
ierr = DMGetGlobalSection(plex, &globalSection);CHKERRQ(ierr);
ierr = DMGetApplicationContext(plex, &ctx);CHKERRQ(ierr);
if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
if (ctx->gpu_assembly) {
PetscContainer container;
ierr = PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container);CHKERRQ(ierr);
if (container) { // not here first call
ierr = PetscContainerGetPointer(container, (void **) &h_maps);CHKERRQ(ierr);
if (h_maps->data) {
d_maps = h_maps->data;
if (!d_maps) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "GPU assembly but no metadata");
} else {
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "GPU assembly but no metadata in container");
}
// this does the setup the first time called
ierr = MatCUSPARSEGetDeviceMatWrite(JacP,&d_mat);CHKERRQ(ierr);
} else {
cerr = hipMalloc((void **)&d_elemMats, totDim*totDim*numGCells*szs);CHKERRCUDA(cerr); // kernel output - first call is on CPU
}
} else {
cerr = hipMalloc((void **)&d_elemMats, totDim*totDim*numGCells*szs);CHKERRCUDA(cerr); // kernel output - no GPU assembly
}
ierr = PetscLogEventEnd(events[3],0,0,0,0);CHKERRQ(ierr);
// create data
d_BB = (PetscReal*)SData_d->B;
d_DD = (PetscReal*)SData_d->D;
if (a_IPf || a_xarray) { // form f and df
dim3 dimBlock(nnn>Nf ? Nf : nnn, Nq);
ierr = PetscLogEventBegin(events[1],0,0,0,0);CHKERRQ(ierr);
cerr = hipMemcpy(SData_d->Eq_m, a_Eq_m, Nf*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
if (a_IPf) {
cerr = hipMemcpy(SData_d->IPf, a_IPf, nip*Nf*szf, hipMemcpyHostToDevice);CHKERRCUDA(cerr);
d_IPf = (PetscScalar*)SData_d->IPf;
} else {
d_IPf = (PetscScalar*)a_xarray;
}
ierr = PetscLogEventEnd(events[1],0,0,0,0);CHKERRQ(ierr);
d_invJj = (PetscReal*)SData_d->invJ;
d_nu_alpha = (PetscReal*)SData_d->alpha;
d_nu_beta = (PetscReal*)SData_d->beta;
d_invMass = (PetscReal*)SData_d->invMass;
d_x = (PetscReal*)SData_d->x;
d_y = (PetscReal*)SData_d->y;
d_w = (PetscReal*)SData_d->w;
d_Eq_m = (PetscReal*)SData_d->Eq_m;
d_dfdx = (PetscReal*)SData_d->dfdx;
d_dfdy = (PetscReal*)SData_d->dfdy;
#if LANDAU_DIM==3
d_dfdz = (PetscReal*)SData_d->dfdz;
d_z = (PetscReal*)SData_d->z;
#endif
d_f = (PetscReal*)SData_d->f;
ierr = PetscLogEventBegin(events[8],0,0,0,0);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
hipLaunchKernelGGL(( landau_form_fdf), dim3(numGCells),dim3(dimBlock), 0, 0, nip, dim, Nf, Nb, d_invJj, d_BB, d_DD, d_IPf, d_maps, d_f, d_dfdx, d_dfdy
#if LANDAU_DIM==3
, d_dfdz
#endif
);
CHECK_LAUNCH_ERROR();
#if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP)
ierr = PetscLogGpuFlops(nip*(PetscLogDouble)(2*Nb*(1+dim)));CHKERRQ(ierr);
#else
ierr = PetscLogFlops(nip*(PetscLogDouble)(2*Nb*(1+dim)));CHKERRQ(ierr);
#endif
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogEventEnd(events[8],0,0,0,0);CHKERRQ(ierr);
} else {
d_mass_w = (PetscReal*)SData_d->mass_w;
}
// kernel
{
dim3 dimBlock(nnn,Nq);
ierr = PetscLogEventBegin(events[4],0,0,0,0);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(nip*(PetscLogDouble)(a_IPf ? (nip*(11*Nf+ 4*dim*dim) + 6*Nf*dim*dim*dim + 10*Nf*dim*dim + 4*Nf*dim + Nb*Nf*Nb*Nq*dim*dim*5) : Nb*Nf*Nb*Nq*4));CHKERRQ(ierr);
if (!d_mass_w) {
ii = 2*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_DIM*(1+LANDAU_DIM) + 3*LANDAU_MAX_SPECIES + (1+LANDAU_DIM)*dimBlock.x*LANDAU_MAX_SPECIES + LANDAU_MAX_NQ*LANDAU_MAX_NQ + 2*LANDAU_MAX_NQ*LANDAU_MAX_Q_FACE;
if (ii*szf >= 49152) {
cerr = hipFuncSetAttribute(landau_kernel_v2,
hipFuncAttributeMaxDynamicSharedMemorySize,
98304);CHKERRCUDA(cerr);
}
ierr = PetscInfo1(plex, "Jacobian shared memory size: %D bytes\n",ii);CHKERRQ(ierr);
hipLaunchKernelGGL(( landau_kernel_v2), dim3(numGCells),dim3(dimBlock),ii*szf, 0, nip,dim,totDim,Nf,Nb,d_invJj,d_nu_alpha,d_nu_beta,d_invMass,d_Eq_m,
d_BB, d_DD, d_x, d_y, d_w,
d_elemMats, d_maps, d_mat, d_f, d_dfdx, d_dfdy,
#if LANDAU_DIM==3
d_z, d_dfdz,
#endif
d_mass_w, shift);
} else {
ierr = PetscInfo1(plex, "Mass no dynamic shared memory. d_maps = %p\n",d_maps);CHKERRQ(ierr);
hipLaunchKernelGGL(( mass_kernel), dim3(numGCells),dim3(dimBlock), 0, 0, nip, dim, totDim, Nf, Nb, d_BB, d_DD, d_elemMats,
d_maps, d_mat, d_mass_w, shift);
}
CHECK_LAUNCH_ERROR(); // has sync
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogEventEnd(events[4],0,0,0,0);CHKERRQ(ierr);
}
// First time assembly with or without GPU assembly
if (d_elemMats) {
PetscScalar *elemMats=NULL,*elMat;
ierr = PetscLogEventBegin(events[5],0,0,0,0);CHKERRQ(ierr);
ierr = PetscMalloc1(totDim*totDim*numGCells,&elemMats);CHKERRQ(ierr);
cerr = hipMemcpy(elemMats, d_elemMats, totDim*totDim*numGCells*sizeof(PetscScalar), hipMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = hipFree(d_elemMats);CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(events[5],0,0,0,0);CHKERRQ(ierr);
ierr = PetscLogEventBegin(events[6],0,0,0,0);CHKERRQ(ierr);
for (ej = cStart, elMat = elemMats ; ej < cEnd; ++ej, elMat += totDim*totDim) {
ierr = DMPlexMatSetClosure(plex, section, globalSection, JacP, ej, elMat, ADD_VALUES);CHKERRQ(ierr);
if (ej==-1) {
int d,f;
PetscPrintf(PETSC_COMM_SELF,"GPU Element matrix\n");
for (d = 0; d < totDim; ++d) {
for (f = 0; f < totDim; ++f) PetscPrintf(PETSC_COMM_SELF," %12.5e", PetscRealPart(elMat[d*totDim + f]));
PetscPrintf(PETSC_COMM_SELF,"\n");
}
exit(14);
}
}
ierr = PetscFree(elemMats);CHKERRQ(ierr);
ierr = PetscLogEventEnd(events[6],0,0,0,0);CHKERRQ(ierr);
if (ctx->gpu_assembly) {
// transition to use of maps for VecGetClosure
cerr = hipFree(SData_d->IPf);CHKERRCUDA(cerr);
SData_d->IPf = NULL;
if (!(a_IPf || a_xarray)) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "transition without Jacobian");
}
}
PetscFunctionReturn(0);
}
| f3feddcd6a27b4940c7ba012e8ca34c5a8601c5d.cu | /*
Implements the Landau kernel
*/
#include <petscconf.h>
#include <petsc/private/dmpleximpl.h> /*I "dmpleximpl.h" I*/
#include <petsclandau.h>
#define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1
#include <../src/mat/impls/aij/seq/aij.h>
#include <petscmat.h>
#include <petscdevice.h>
#include "../land_tensors.h"
#include <petscaijdevice.h>
#define CHECK_LAUNCH_ERROR() \
do { \
/* Check synchronous errors, i.e. pre-launch */ \
cudaError_t err = cudaGetLastError(); \
if (cudaSuccess != err) { \
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",cudaGetErrorString(err)); \
} \
/* Check asynchronous errors, i.e. kernel failed (ULF) */ \
err = cudaDeviceSynchronize(); \
if (cudaSuccess != err) { \
SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cuda error: %s",cudaGetErrorString(err)); \
} \
} while (0)
PETSC_EXTERN PetscErrorCode LandauCUDACreateMatMaps(P4estVertexMaps *maps, pointInterpolationP4est (*points)[LANDAU_MAX_Q_FACE], PetscInt Nf, PetscInt Nq)
{
P4estVertexMaps h_maps;
cudaError_t cerr;
PetscFunctionBegin;
h_maps.num_elements =maps->num_elements;
h_maps.num_face = maps->num_face;
h_maps.num_reduced = maps->num_reduced;
h_maps.deviceType = maps->deviceType;
h_maps.Nf = Nf;
h_maps.Nq = Nq;
cerr = cudaMalloc((void **)&h_maps.c_maps, maps->num_reduced * sizeof *points);CHKERRCUDA(cerr);
cerr = cudaMemcpy( h_maps.c_maps, maps->c_maps, maps->num_reduced * sizeof *points, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&h_maps.gIdx, maps->num_elements * sizeof *maps->gIdx);CHKERRCUDA(cerr);
cerr = cudaMemcpy( h_maps.gIdx, maps->gIdx, maps->num_elements * sizeof *maps->gIdx, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&maps->data, sizeof(P4estVertexMaps));CHKERRCUDA(cerr);
cerr = cudaMemcpy( maps->data, &h_maps, sizeof(P4estVertexMaps), cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode LandauCUDADestroyMatMaps(P4estVertexMaps *pMaps)
{
P4estVertexMaps *d_maps = pMaps->data, h_maps;
cudaError_t cerr;
PetscFunctionBegin;
cerr = cudaMemcpy(&h_maps, d_maps, sizeof(P4estVertexMaps), cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = cudaFree(h_maps.c_maps);CHKERRCUDA(cerr);
cerr = cudaFree(h_maps.gIdx);CHKERRCUDA(cerr);
cerr = cudaFree(d_maps);CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
PetscErrorCode LandauCUDAStaticDataSet(DM plex, const PetscInt Nq, PetscReal nu_alpha[], PetscReal nu_beta[], PetscReal a_invMass[], PetscReal a_invJ[], PetscReal a_mass_w[],
PetscReal a_x[], PetscReal a_y[], PetscReal a_z[], PetscReal a_w[], LandauGeomData *SData_d)
{
PetscErrorCode ierr;
PetscTabulation *Tf;
LandauCtx *ctx;
PetscInt *Nbf,dim,Nf,Nb,nip,cStart,cEnd,szf=sizeof(PetscReal),szs=sizeof(PetscScalar);
PetscDS prob;
cudaError_t cerr;
PetscFunctionBegin;
ierr = DMGetApplicationContext(plex, &ctx);CHKERRQ(ierr);
if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
ierr = DMGetDimension(plex, &dim);CHKERRQ(ierr);
ierr = DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);CHKERRQ(ierr);
nip = (cEnd - cStart)*Nq;
ierr = DMGetDS(plex, &prob);CHKERRQ(ierr);
ierr = PetscDSGetNumFields(prob, &Nf);CHKERRQ(ierr);
ierr = PetscDSGetDimensions(prob, &Nbf);CHKERRQ(ierr); Nb = Nbf[0];
if (Nq != Nb) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Nq != Nb. %D %D",Nq,Nb);
if (LANDAU_DIM != dim) SETERRQ2(PETSC_COMM_WORLD, PETSC_ERR_PLIB, "dim %D != LANDAU_DIM %d",dim,LANDAU_DIM);
ierr = PetscDSGetTabulation(prob, &Tf);CHKERRQ(ierr);
{
cerr = cudaMalloc((void **)&SData_d->B, Nq*Nb*szf);CHKERRCUDA(cerr); // kernel input
cerr = cudaMemcpy( SData_d->B, Tf[0]->T[0], Nq*Nb*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&SData_d->D, Nq*Nb*dim*szf);CHKERRCUDA(cerr); // kernel input
cerr = cudaMemcpy( SData_d->D, Tf[0]->T[1], Nq*Nb*dim*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&SData_d->mass_w, nip*szf);CHKERRCUDA(cerr); // kernel input
cerr = cudaMemcpy( SData_d->mass_w, a_mass_w,nip*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&SData_d->alpha, Nf*szf);CHKERRCUDA(cerr); // kernel input
cerr = cudaMalloc((void **)&SData_d->beta, Nf*szf);CHKERRCUDA(cerr); // kernel input
cerr = cudaMalloc((void **)&SData_d->invMass, Nf*szf);CHKERRCUDA(cerr); // kernel input
cerr = cudaMemcpy(SData_d->alpha, nu_alpha, Nf*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(SData_d->beta, nu_beta, Nf*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMemcpy(SData_d->invMass,a_invMass,Nf*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
// collect geometry
cerr = cudaMalloc((void **)&SData_d->invJ, nip*dim*dim*szf);CHKERRCUDA(cerr); // kernel input
cerr = cudaMemcpy(SData_d->invJ, a_invJ, nip*dim*dim*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&SData_d->x, nip*szf);CHKERRCUDA(cerr); // kernel input
cerr = cudaMemcpy( SData_d->x, a_x, nip*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&SData_d->y, nip*szf);CHKERRCUDA(cerr); // kernel input
cerr = cudaMemcpy( SData_d->y, a_y, nip*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
#if LANDAU_DIM==3
cerr = cudaMalloc((void **)&SData_d->z, nip*szf);CHKERRCUDA(cerr); // kernel input
cerr = cudaMemcpy( SData_d->z, a_z, nip*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
#endif
cerr = cudaMalloc((void **)&SData_d->w, nip*szf);CHKERRCUDA(cerr); // kernel input
cerr = cudaMemcpy( SData_d->w, a_w, nip*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
// allcoate space for dynamic data once
cerr = cudaMalloc((void **)&SData_d->Eq_m, Nf*szf);CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&SData_d->f, nip*Nf*szs);CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&SData_d->dfdx, nip*Nf*szs);CHKERRCUDA(cerr);
cerr = cudaMalloc((void **)&SData_d->dfdy, nip*Nf*szs);CHKERRCUDA(cerr);
#if LANDAU_DIM==3
cerr = cudaMalloc((void **)&SData_d->dfdz, nip*Nf*szs);CHKERRCUDA(cerr); // kernel input
#endif
cerr = cudaMalloc((void **)&SData_d->IPf, nip*Nf*szs);CHKERRCUDA(cerr); // Nq==Nb
}
PetscFunctionReturn(0);
}
PetscErrorCode LandauCUDAStaticDataClear(LandauGeomData *SData_d)
{
cudaError_t cerr;
PetscFunctionBegin;
if (SData_d->alpha) {
cerr = cudaFree(SData_d->alpha);CHKERRCUDA(cerr);
cerr = cudaFree(SData_d->beta);CHKERRCUDA(cerr);
cerr = cudaFree(SData_d->invMass);CHKERRCUDA(cerr);
cerr = cudaFree(SData_d->B);CHKERRCUDA(cerr);
cerr = cudaFree(SData_d->D);CHKERRCUDA(cerr);
cerr = cudaFree(SData_d->mass_w);CHKERRCUDA(cerr);
cerr = cudaFree(SData_d->invJ);CHKERRCUDA(cerr);
#if LANDAU_DIM==3
cerr = cudaFree(SData_d->z);CHKERRCUDA(cerr);
#endif
cerr = cudaFree(SData_d->x);CHKERRCUDA(cerr);
cerr = cudaFree(SData_d->y);CHKERRCUDA(cerr);
cerr = cudaFree(SData_d->w);CHKERRCUDA(cerr);
// dynamic data
cerr = cudaFree(SData_d->Eq_m);CHKERRCUDA(cerr);
cerr = cudaFree(SData_d->f);CHKERRCUDA(cerr);
cerr = cudaFree(SData_d->dfdx);CHKERRCUDA(cerr);
cerr = cudaFree(SData_d->dfdy);CHKERRCUDA(cerr);
#if LANDAU_DIM==3
cerr = cudaFree(SData_d->dfdz);CHKERRCUDA(cerr);
#endif
if (SData_d->IPf) {
cerr = cudaFree(SData_d->IPf);CHKERRCUDA(cerr);
}
}
PetscFunctionReturn(0);
}
// The GPU Landau kernel
//
__global__
void landau_form_fdf(const PetscInt nip, const PetscInt dim, const PetscInt Nf, const PetscInt Nb, const PetscReal invJ_a[],
const PetscReal * const BB, const PetscReal * const DD, PetscScalar *a_coef, P4estVertexMaps *maps,
PetscReal d_f[], PetscReal d_dfdx[], PetscReal d_dfdy[]
#if LANDAU_DIM==3
, PetscReal d_dfdz[]
#endif
) // output
{
const PetscInt Nq = blockDim.y, elem = blockIdx.x;
const PetscInt myQi = threadIdx.y;
const PetscInt jpidx = myQi + elem * Nq;
const PetscReal *invJ = &invJ_a[jpidx*dim*dim];
const PetscReal *Bq = &BB[myQi*Nb], *Dq = &DD[myQi*Nb*dim];
PetscInt f,d,b,e,q;
PetscReal u_x[LANDAU_MAX_SPECIES][LANDAU_DIM];
const PetscScalar *coef;
PetscScalar coef_buff[LANDAU_MAX_SPECIES*LANDAU_MAX_NQ];
if (!maps) {
coef = &a_coef[elem*Nb*Nf];
} else {
coef = coef_buff;
for (f = 0; f < Nf; ++f) {
LandauIdx *const Idxs = &maps->gIdx[elem][f][0];
for (b = 0; b < Nb; ++b) {
PetscInt idx = Idxs[b];
if (idx >= 0) {
coef_buff[f*Nb+b] = a_coef[idx];
} else {
idx = -idx - 1;
coef_buff[f*Nb+b] = 0;
for (q = 0; q < maps->num_face; q++) {
PetscInt id = maps->c_maps[idx][q].gid;
PetscScalar scale = maps->c_maps[idx][q].scale;
coef_buff[f*Nb+b] += scale*a_coef[id];
}
}
}
}
}
/* get f and df */
for (f = threadIdx.x; f < Nf; f += blockDim.x) {
PetscReal refSpaceDer[LANDAU_DIM];
d_f[jpidx + f*nip] = 0.0;
for (d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
for (b = 0; b < Nb; ++b) {
const PetscInt cidx = b;
d_f[jpidx + f*nip] += Bq[cidx]*PetscRealPart(coef[f*Nb+cidx]);
for (d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx*dim+d]*PetscRealPart(coef[f*Nb+cidx]);
}
for (d = 0; d < dim; ++d) {
for (e = 0, u_x[f][d] = 0.0; e < dim; ++e) {
u_x[f][d] += invJ[e*dim+d]*refSpaceDer[e];
}
}
}
for (f = threadIdx.x; f < Nf; f += blockDim.x) {
d_dfdx[jpidx + f*nip] = u_x[f][0];
d_dfdy[jpidx + f*nip] = u_x[f][1];
#if LANDAU_DIM==3
d_dfdz[jpidx + f*nip] = u_x[f][2];
#endif
}
}
__device__ void
landau_inner_integral_v2(const PetscInt myQi, const PetscInt jpidx, PetscInt nip, const PetscInt Nq, const PetscInt Nf, const PetscInt Nb,
const PetscInt dim, const PetscReal xx[], const PetscReal yy[], const PetscReal ww[],
const PetscReal invJj[], const PetscReal nu_alpha[],
const PetscReal nu_beta[], const PetscReal invMass[], const PetscReal Eq_m[],
const PetscReal * const BB, const PetscReal * const DD,
PetscScalar *elemMat, P4estVertexMaps *d_maps, PetscSplitCSRDataStructure d_mat, // output
PetscScalar s_fieldMats[][LANDAU_MAX_NQ], // all these arrays are in shared memory
PetscReal s_scale[][LANDAU_MAX_Q_FACE],
PetscInt s_idx[][LANDAU_MAX_Q_FACE],
PetscReal s_g2[][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES],
PetscReal s_g3[][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES],
PetscReal s_gg2[][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES],
PetscReal s_gg3[][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES],
PetscReal s_nu_alpha[],
PetscReal s_nu_beta[],
PetscReal s_invMass[],
PetscReal s_f[],
PetscReal s_dfx[],
PetscReal s_dfy[],
PetscReal d_f[], PetscReal d_dfdx[], PetscReal d_dfdy[], // global memory
#if LANDAU_DIM==3
const PetscReal zz[], PetscReal s_dfz[], PetscReal d_dfdz[],
#endif
PetscReal d_mass_w[], PetscReal shift, PetscInt elem)
{
int delta,d,f,g,d2,dp,d3,fieldA,ipidx_b,nip_pad = nip; // vectorization padding not supported;
PetscReal gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
// create g2 & g3
for (f=threadIdx.x; f<Nf; f+=blockDim.x) {
for (d=0;d<dim;d++) { // clear accumulation data D & K
s_gg2[d][myQi][f] = 0;
for (d2=0;d2<dim;d2++) s_gg3[d][d2][myQi][f] = 0;
}
}
if (threadIdx.y == 0) {
for (int i = threadIdx.x; i < Nf; i += blockDim.x) {
s_nu_alpha[i] = nu_alpha[i];
s_nu_beta[i] = nu_beta[i];
s_invMass[i] = invMass[i];
}
}
for (d2 = 0; d2 < dim; d2++) {
gg2_temp[d2] = 0;
for (d3 = 0; d3 < dim; d3++) {
gg3_temp[d2][d3] = 0;
}
}
__syncthreads();
for (ipidx_b = 0; ipidx_b < nip; ipidx_b += blockDim.x) {
#if LANDAU_DIM==2
const PetscReal vj[3] = {xx[jpidx], yy[jpidx]};
#else
const PetscReal vj[3] = {xx[jpidx], yy[jpidx], zz[jpidx]};
#endif
int ipidx = ipidx_b + threadIdx.x;
__syncthreads();
if (ipidx < nip) {
for (fieldA = threadIdx.y; fieldA < Nf; fieldA += blockDim.y) {
s_f [fieldA*blockDim.x+threadIdx.x] = d_f[ipidx + fieldA*nip_pad];
s_dfx[fieldA*blockDim.x+threadIdx.x] = d_dfdx[ipidx + fieldA*nip_pad];
s_dfy[fieldA*blockDim.x+threadIdx.x] = d_dfdy[ipidx + fieldA*nip_pad];
#if LANDAU_DIM==3
s_dfz[fieldA*blockDim.x+threadIdx.x] = d_dfdz[ipidx + fieldA*nip_pad];
#endif
}
}
__syncthreads();
if (ipidx < nip) {
const PetscReal wi = ww[ipidx], x = xx[ipidx], y = yy[ipidx];
PetscReal temp1[3] = {0, 0, 0}, temp2 = 0;
#if LANDAU_DIM==2
PetscReal Ud[2][2], Uk[2][2];
LandauTensor2D(vj, x, y, Ud, Uk, (ipidx==jpidx) ? 0. : 1.);
#else
PetscReal U[3][3], z = zz[ipidx];
LandauTensor3D(vj, x, y, z, U, (ipidx==jpidx) ? 0. : 1.);
#endif
for (fieldA = 0; fieldA < Nf; fieldA++) {
temp1[0] += s_dfx[fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA]*s_invMass[fieldA];
temp1[1] += s_dfy[fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA]*s_invMass[fieldA];
#if LANDAU_DIM==3
temp1[2] += s_dfz[fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA]*s_invMass[fieldA];
#endif
temp2 += s_f [fieldA*blockDim.x+threadIdx.x]*s_nu_beta[fieldA];
}
temp1[0] *= wi;
temp1[1] *= wi;
#if LANDAU_DIM==3
temp1[2] *= wi;
#endif
temp2 *= wi;
#if LANDAU_DIM==2
for (d2 = 0; d2 < 2; d2++) {
for (d3 = 0; d3 < 2; ++d3) {
/* K = U * grad(f): g2=e: i,A */
gg2_temp[d2] += Uk[d2][d3]*temp1[d3];
/* D = -U * (I \kron (fx)): g3=f: i,j,A */
gg3_temp[d2][d3] += Ud[d2][d3]*temp2;
}
}
#else
for (d2 = 0; d2 < 3; ++d2) {
for (d3 = 0; d3 < 3; ++d3) {
/* K = U * grad(f): g2 = e: i,A */
gg2_temp[d2] += U[d2][d3]*temp1[d3];
/* D = -U * (I \kron (fx)): g3 = f: i,j,A */
gg3_temp[d2][d3] += U[d2][d3]*temp2;
}
}
#endif
}
} /* IPs */
/* reduce gg temp sums across threads */
for (delta = blockDim.x/2; delta > 0; delta /= 2) {
for (d2 = 0; d2 < dim; d2++) {
gg2_temp[d2] += __shfl_xor_sync(0xffffffff, gg2_temp[d2], delta, blockDim.x);
for (d3 = 0; d3 < dim; d3++) {
gg3_temp[d2][d3] += __shfl_xor_sync(0xffffffff, gg3_temp[d2][d3], delta, blockDim.x);
}
}
}
// add alpha and put in gg2/3
for (fieldA = threadIdx.x; fieldA < Nf; fieldA += blockDim.x) {
for (d2 = 0; d2 < dim; d2++) {
s_gg2[d2][myQi][fieldA] += gg2_temp[d2]*s_nu_alpha[fieldA];
for (d3 = 0; d3 < dim; d3++) {
s_gg3[d2][d3][myQi][fieldA] -= gg3_temp[d2][d3]*s_nu_alpha[fieldA]*s_invMass[fieldA];
}
}
}
__syncthreads();
/* add electric field term once per IP */
for (fieldA = threadIdx.x; fieldA < Nf; fieldA += blockDim.x) {
s_gg2[dim-1][myQi][fieldA] += Eq_m[fieldA];
}
__syncthreads();
/* Jacobian transform - g2 */
for (fieldA = threadIdx.x; fieldA < Nf; fieldA += blockDim.x) {
PetscReal wj = ww[jpidx];
for (d = 0; d < dim; ++d) {
s_g2[d][myQi][fieldA] = 0.0;
for (d2 = 0; d2 < dim; ++d2) {
s_g2[d][myQi][fieldA] += invJj[d*dim+d2]*s_gg2[d2][myQi][fieldA];
s_g3[d][d2][myQi][fieldA] = 0.0;
for (d3 = 0; d3 < dim; ++d3) {
for (dp = 0; dp < dim; ++dp) {
s_g3[d][d2][myQi][fieldA] += invJj[d*dim + d3]*s_gg3[d3][dp][myQi][fieldA]*invJj[d2*dim + dp];
}
}
s_g3[d][d2][myQi][fieldA] *= wj;
}
s_g2[d][myQi][fieldA] *= wj;
}
}
__syncthreads(); // Synchronize (ensure all the data is available) and sum IP matrices
/* FE matrix construction */
{
int fieldA,d,qj,d2,q,idx,totDim=Nb*Nf;
/* assemble */
for (fieldA = 0; fieldA < Nf; fieldA++) {
for (f = threadIdx.y; f < Nb ; f += blockDim.y) {
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
PetscScalar t = 0;
for (qj = 0 ; qj < Nq ; qj++) {
const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim];
for (d = 0; d < dim; ++d) {
t += DIq[f*dim+d]*s_g2[d][qj][fieldA]*BJq[g];
for (d2 = 0; d2 < dim; ++d2) {
t += DIq[f*dim + d]*s_g3[d][d2][qj][fieldA]*DIq[g*dim + d2];
}
}
}
if (elemMat) {
const PetscInt fOff = (fieldA*Nb + f)*totDim + fieldA*Nb + g;
elemMat[fOff] += t; // ????
} else s_fieldMats[f][g] = t;
}
}
if (s_fieldMats) {
PetscScalar vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE];
PetscInt nr,nc;
const LandauIdx *const Idxs = &d_maps->gIdx[elem][fieldA][0];
__syncthreads();
if (threadIdx.y == 0) {
for (f = threadIdx.x; f < Nb ; f += blockDim.x) {
idx = Idxs[f];
if (idx >= 0) {
s_idx[f][0] = idx;
s_scale[f][0] = 1.;
} else {
idx = -idx - 1;
for (q = 0; q < d_maps->num_face; q++) {
s_idx[f][q] = d_maps->c_maps[idx][q].gid;
s_scale[f][q] = d_maps->c_maps[idx][q].scale;
}
}
}
}
__syncthreads();
for (f = threadIdx.y; f < Nb ; f += blockDim.y) {
idx = Idxs[f];
if (idx >= 0) {
nr = 1;
} else {
nr = d_maps->num_face;
}
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
idx = Idxs[g];
if (idx >= 0) {
nc = 1;
} else {
nc = d_maps->num_face;
}
for (q = 0; q < nr; q++) {
for (d = 0; d < nc; d++) {
vals[q*nc + d] = s_scale[f][q]*s_scale[g][d]*s_fieldMats[f][g];
}
}
MatSetValuesDevice(d_mat,nr,s_idx[f],nc,s_idx[g],vals,ADD_VALUES);
}
}
__syncthreads();
}
}
}
}
//
// The CUDA Landau kernel
//
__global__
void __launch_bounds__(256,4) landau_kernel_v2(const PetscInt nip, const PetscInt dim, const PetscInt totDim, const PetscInt Nf, const PetscInt Nb, const PetscReal invJj[],
const PetscReal nu_alpha[], const PetscReal nu_beta[], const PetscReal invMass[], const PetscReal Eq_m[],
const PetscReal * const BB, const PetscReal * const DD, const PetscReal xx[], const PetscReal yy[], const PetscReal ww[],
PetscScalar elemMats_out[], P4estVertexMaps *d_maps, PetscSplitCSRDataStructure d_mat, PetscReal d_f[], PetscReal d_dfdx[], PetscReal d_dfdy[],
#if LANDAU_DIM==3
const PetscReal zz[], PetscReal d_dfdz[],
#endif
PetscReal d_mass_w[], PetscReal shift)
{
const PetscInt Nq = blockDim.y, elem = blockIdx.x;
extern __shared__ PetscReal smem[];
int size = 0;
PetscReal (*s_g2)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] = // shared mem not needed when nu_alpha, etc
(PetscReal (*)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size];
size += LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_DIM;
PetscReal (*s_g3)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] =
(PetscReal (*)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size];
size += LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES;
PetscReal (*s_gg2)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] =
(PetscReal (*)[LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size];
size += LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_DIM;
PetscReal (*s_gg3)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES] =
(PetscReal (*)[LANDAU_DIM][LANDAU_DIM][LANDAU_MAX_NQ][LANDAU_MAX_SPECIES]) &smem[size];
size += LANDAU_DIM*LANDAU_DIM*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES;
PetscReal *s_nu_alpha = &smem[size];
size += LANDAU_MAX_SPECIES;
PetscReal *s_nu_beta = &smem[size];
size += LANDAU_MAX_SPECIES;
PetscReal *s_invMass = &smem[size];
size += LANDAU_MAX_SPECIES;
PetscReal *s_f = &smem[size];
size += blockDim.x*LANDAU_MAX_SPECIES;
PetscReal *s_dfx = &smem[size];
size += blockDim.x*LANDAU_MAX_SPECIES;
PetscReal *s_dfy = &smem[size];
size += blockDim.x*LANDAU_MAX_SPECIES;
#if LANDAU_DIM==3
PetscReal *s_dfz = &smem[size];
size += blockDim.x*LANDAU_MAX_SPECIES;
#endif
PetscScalar (*s_fieldMats)[LANDAU_MAX_NQ][LANDAU_MAX_NQ];
PetscReal (*s_scale)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
PetscInt (*s_idx)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
if (d_maps) {
// reuse the space for fieldMats
s_fieldMats = (PetscScalar (*)[LANDAU_MAX_NQ][LANDAU_MAX_NQ]) &smem[size];
size += LANDAU_MAX_NQ*LANDAU_MAX_NQ;
s_scale = (PetscReal (*)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE]) &smem[size];
size += LANDAU_MAX_NQ*LANDAU_MAX_Q_FACE;
s_idx = (PetscInt (*)[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE]) &smem[size];
size += LANDAU_MAX_NQ*LANDAU_MAX_Q_FACE; // this is too big, idx is an integer
} else {
s_fieldMats = NULL;
}
const PetscInt myQi = threadIdx.y;
const PetscInt jpidx = myQi + elem * Nq;
//const PetscInt subblocksz = nip/nSubBlks + !!(nip%nSubBlks), ip_start = mySubBlk*subblocksz, ip_end = (mySubBlk+1)*subblocksz > nip ? nip : (mySubBlk+1)*subblocksz; /* this could be wrong with very few global IPs */
PetscScalar *elemMat = elemMats_out ? &elemMats_out[elem*totDim*totDim] : NULL; /* my output */
int tid = threadIdx.x + threadIdx.y*blockDim.x;
const PetscReal *invJ = invJj ? &invJj[jpidx*dim*dim] : NULL;
if (elemMat) for (int i = tid; i < totDim*totDim; i += blockDim.x*blockDim.y) elemMat[i] = 0;
__syncthreads();
landau_inner_integral_v2(myQi, jpidx, nip, Nq, Nf, Nb, dim, xx, yy, ww,
invJ, nu_alpha, nu_beta, invMass, Eq_m, BB, DD,
elemMat, d_maps, d_mat,
*s_fieldMats, *s_scale, *s_idx,
*s_g2, *s_g3, *s_gg2, *s_gg3,
s_nu_alpha, s_nu_beta, s_invMass,
s_f, s_dfx, s_dfy, d_f, d_dfdx, d_dfdy,
#if LANDAU_DIM==3
zz, s_dfz, d_dfdz,
#endif
d_mass_w, shift, elem);
}
__global__
void __launch_bounds__(256,4) mass_kernel(const PetscInt nip, const PetscInt dim, const PetscInt totDim, const PetscInt Nf, const PetscInt Nb, const PetscReal * const BB, const PetscReal * const DD,
PetscScalar elemMats_out[], P4estVertexMaps *d_maps, PetscSplitCSRDataStructure d_mat,
PetscReal d_mass_w[], PetscReal shift)
{
const PetscInt Nq = blockDim.y, elem = blockIdx.x;
__shared__ PetscScalar s_fieldMats[LANDAU_MAX_NQ][LANDAU_MAX_NQ];
__shared__ PetscInt s_idx[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
__shared__ PetscReal s_scale[LANDAU_MAX_NQ][LANDAU_MAX_Q_FACE];
PetscScalar *elemMat = elemMats_out ? &elemMats_out[elem*totDim*totDim] : NULL; /* my output */
int fieldA,d,qj,q,idx,f,g;
int tid = threadIdx.x + threadIdx.y*blockDim.x;
if (elemMat) for (int i = tid; i < totDim*totDim; i += blockDim.x*blockDim.y) elemMat[i] = 0;
__syncthreads();
/* FE mass matrix construction */
for (fieldA = 0; fieldA < Nf; fieldA++) {
PetscScalar vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE];
PetscInt nr,nc;
for (f = threadIdx.y; f < Nb ; f += blockDim.y) {
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
PetscScalar t = 0;
for (qj = 0 ; qj < Nq ; qj++) {
const PetscReal *BJq = &BB[qj*Nb];
const PetscInt jpidx = qj + elem * Nq;
t += BJq[f] * d_mass_w[jpidx]*shift * BJq[g];
}
if (elemMat) {
const PetscInt fOff = (fieldA*Nb + f)*totDim + fieldA*Nb + g;
elemMat[fOff] += t; // ????
} else s_fieldMats[f][g] = t;
}
}
if (!elemMat) {
const LandauIdx *const Idxs = &d_maps->gIdx[elem][fieldA][0];
__syncthreads();
if (threadIdx.y == 0) {
for (f = threadIdx.x; f < Nb ; f += blockDim.x) {
idx = Idxs[f];
if (idx >= 0) {
s_idx[f][0] = idx;
s_scale[f][0] = 1.;
} else {
idx = -idx - 1;
for (q = 0; q < d_maps->num_face; q++) {
s_idx[f][q] = d_maps->c_maps[idx][q].gid;
s_scale[f][q] = d_maps->c_maps[idx][q].scale;
}
}
}
}
__syncthreads();
for (f = threadIdx.y; f < Nb ; f += blockDim.y) {
idx = Idxs[f];
if (idx >= 0) {
nr = 1;
} else {
nr = d_maps->num_face;
}
for (g = threadIdx.x; g < Nb; g += blockDim.x) {
idx = Idxs[g];
if (idx >= 0) {
nc = 1;
} else {
nc = d_maps->num_face;
}
for (q = 0; q < nr; q++) {
for (d = 0; d < nc; d++) {
vals[q*nc + d] = s_scale[f][q]*s_scale[g][d]*s_fieldMats[f][g];
}
}
MatSetValuesDevice(d_mat,nr,s_idx[f],nc,s_idx[g],vals,ADD_VALUES);
}
}
}
__syncthreads();
}
}
PetscErrorCode LandauCUDAJacobian(DM plex, const PetscInt Nq, PetscReal a_Eq_m[], PetscScalar a_IPf[], const PetscInt N, const PetscScalar a_xarray[], LandauGeomData *SData_d, const PetscInt num_sub_blocks,
PetscReal shift, const PetscLogEvent events[], Mat JacP)
{
PetscErrorCode ierr;
cudaError_t cerr;
PetscInt ii,ej,*Nbf,Nb,cStart,cEnd,Nf,dim,numGCells,totDim,nip,szf=sizeof(PetscReal),szs=sizeof(PetscScalar);
PetscReal *d_BB=NULL,*d_DD=NULL,*d_invJj=NULL,*d_nu_alpha=NULL,*d_nu_beta=NULL,*d_invMass=NULL,*d_Eq_m=NULL,*d_mass_w=NULL,*d_x=NULL,*d_y=NULL,*d_w=NULL;
PetscScalar *d_elemMats=NULL,*d_IPf=NULL;
PetscReal *d_f=NULL,*d_dfdx=NULL,*d_dfdy=NULL;
#if LANDAU_DIM==3
PetscReal *d_dfdz=NULL, *d_z = NULL;
#endif
PetscTabulation *Tf;
PetscDS prob;
PetscSection section, globalSection;
LandauCtx *ctx;
PetscSplitCSRDataStructure d_mat=NULL;
P4estVertexMaps *h_maps, *d_maps=NULL;
int nnn = 256/Nq; // machine dependent
PetscFunctionBegin;
ierr = PetscLogEventBegin(events[3],0,0,0,0);CHKERRQ(ierr);
while (nnn & nnn - 1) nnn = nnn & nnn - 1;
if (nnn>16) nnn = 16;
ierr = DMGetDimension(plex, &dim);CHKERRQ(ierr);
if (dim!=LANDAU_DIM) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "LANDAU_DIM %D != dim %d",LANDAU_DIM,dim);
ierr = DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);CHKERRQ(ierr);
numGCells = cEnd - cStart;
nip = numGCells*Nq; /* length of inner global iteration */
ierr = DMGetDS(plex, &prob);CHKERRQ(ierr);
ierr = PetscDSGetNumFields(prob, &Nf);CHKERRQ(ierr);
ierr = PetscDSGetDimensions(prob, &Nbf);CHKERRQ(ierr); Nb = Nbf[0];
if (Nq != Nb) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Nq != Nb. %D %D",Nq,Nb);
ierr = PetscDSGetTotalDimension(prob, &totDim);CHKERRQ(ierr);
ierr = PetscDSGetTabulation(prob, &Tf);CHKERRQ(ierr);
ierr = DMGetLocalSection(plex, §ion);CHKERRQ(ierr);
ierr = DMGetGlobalSection(plex, &globalSection);CHKERRQ(ierr);
ierr = DMGetApplicationContext(plex, &ctx);CHKERRQ(ierr);
if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
if (ctx->gpu_assembly) {
PetscContainer container;
ierr = PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container);CHKERRQ(ierr);
if (container) { // not here first call
ierr = PetscContainerGetPointer(container, (void **) &h_maps);CHKERRQ(ierr);
if (h_maps->data) {
d_maps = h_maps->data;
if (!d_maps) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "GPU assembly but no metadata");
} else {
SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "GPU assembly but no metadata in container");
}
// this does the setup the first time called
ierr = MatCUSPARSEGetDeviceMatWrite(JacP,&d_mat);CHKERRQ(ierr);
} else {
cerr = cudaMalloc((void **)&d_elemMats, totDim*totDim*numGCells*szs);CHKERRCUDA(cerr); // kernel output - first call is on CPU
}
} else {
cerr = cudaMalloc((void **)&d_elemMats, totDim*totDim*numGCells*szs);CHKERRCUDA(cerr); // kernel output - no GPU assembly
}
ierr = PetscLogEventEnd(events[3],0,0,0,0);CHKERRQ(ierr);
// create data
d_BB = (PetscReal*)SData_d->B;
d_DD = (PetscReal*)SData_d->D;
if (a_IPf || a_xarray) { // form f and df
dim3 dimBlock(nnn>Nf ? Nf : nnn, Nq);
ierr = PetscLogEventBegin(events[1],0,0,0,0);CHKERRQ(ierr);
cerr = cudaMemcpy(SData_d->Eq_m, a_Eq_m, Nf*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
if (a_IPf) {
cerr = cudaMemcpy(SData_d->IPf, a_IPf, nip*Nf*szf, cudaMemcpyHostToDevice);CHKERRCUDA(cerr);
d_IPf = (PetscScalar*)SData_d->IPf;
} else {
d_IPf = (PetscScalar*)a_xarray;
}
ierr = PetscLogEventEnd(events[1],0,0,0,0);CHKERRQ(ierr);
d_invJj = (PetscReal*)SData_d->invJ;
d_nu_alpha = (PetscReal*)SData_d->alpha;
d_nu_beta = (PetscReal*)SData_d->beta;
d_invMass = (PetscReal*)SData_d->invMass;
d_x = (PetscReal*)SData_d->x;
d_y = (PetscReal*)SData_d->y;
d_w = (PetscReal*)SData_d->w;
d_Eq_m = (PetscReal*)SData_d->Eq_m;
d_dfdx = (PetscReal*)SData_d->dfdx;
d_dfdy = (PetscReal*)SData_d->dfdy;
#if LANDAU_DIM==3
d_dfdz = (PetscReal*)SData_d->dfdz;
d_z = (PetscReal*)SData_d->z;
#endif
d_f = (PetscReal*)SData_d->f;
ierr = PetscLogEventBegin(events[8],0,0,0,0);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
landau_form_fdf<<<numGCells,dimBlock>>>( nip, dim, Nf, Nb, d_invJj, d_BB, d_DD, d_IPf, d_maps, d_f, d_dfdx, d_dfdy
#if LANDAU_DIM==3
, d_dfdz
#endif
);
CHECK_LAUNCH_ERROR();
#if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_HIP)
ierr = PetscLogGpuFlops(nip*(PetscLogDouble)(2*Nb*(1+dim)));CHKERRQ(ierr);
#else
ierr = PetscLogFlops(nip*(PetscLogDouble)(2*Nb*(1+dim)));CHKERRQ(ierr);
#endif
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogEventEnd(events[8],0,0,0,0);CHKERRQ(ierr);
} else {
d_mass_w = (PetscReal*)SData_d->mass_w;
}
// kernel
{
dim3 dimBlock(nnn,Nq);
ierr = PetscLogEventBegin(events[4],0,0,0,0);CHKERRQ(ierr);
ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr);
ierr = PetscLogGpuFlops(nip*(PetscLogDouble)(a_IPf ? (nip*(11*Nf+ 4*dim*dim) + 6*Nf*dim*dim*dim + 10*Nf*dim*dim + 4*Nf*dim + Nb*Nf*Nb*Nq*dim*dim*5) : Nb*Nf*Nb*Nq*4));CHKERRQ(ierr);
if (!d_mass_w) {
ii = 2*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_DIM*(1+LANDAU_DIM) + 3*LANDAU_MAX_SPECIES + (1+LANDAU_DIM)*dimBlock.x*LANDAU_MAX_SPECIES + LANDAU_MAX_NQ*LANDAU_MAX_NQ + 2*LANDAU_MAX_NQ*LANDAU_MAX_Q_FACE;
if (ii*szf >= 49152) {
cerr = cudaFuncSetAttribute(landau_kernel_v2,
cudaFuncAttributeMaxDynamicSharedMemorySize,
98304);CHKERRCUDA(cerr);
}
ierr = PetscInfo1(plex, "Jacobian shared memory size: %D bytes\n",ii);CHKERRQ(ierr);
landau_kernel_v2<<<numGCells,dimBlock,ii*szf>>>(nip,dim,totDim,Nf,Nb,d_invJj,d_nu_alpha,d_nu_beta,d_invMass,d_Eq_m,
d_BB, d_DD, d_x, d_y, d_w,
d_elemMats, d_maps, d_mat, d_f, d_dfdx, d_dfdy,
#if LANDAU_DIM==3
d_z, d_dfdz,
#endif
d_mass_w, shift);
} else {
ierr = PetscInfo1(plex, "Mass no dynamic shared memory. d_maps = %p\n",d_maps);CHKERRQ(ierr);
mass_kernel<<<numGCells,dimBlock>>>(nip, dim, totDim, Nf, Nb, d_BB, d_DD, d_elemMats,
d_maps, d_mat, d_mass_w, shift);
}
CHECK_LAUNCH_ERROR(); // has sync
ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr);
ierr = PetscLogEventEnd(events[4],0,0,0,0);CHKERRQ(ierr);
}
// First time assembly with or without GPU assembly
if (d_elemMats) {
PetscScalar *elemMats=NULL,*elMat;
ierr = PetscLogEventBegin(events[5],0,0,0,0);CHKERRQ(ierr);
ierr = PetscMalloc1(totDim*totDim*numGCells,&elemMats);CHKERRQ(ierr);
cerr = cudaMemcpy(elemMats, d_elemMats, totDim*totDim*numGCells*sizeof(PetscScalar), cudaMemcpyDeviceToHost);CHKERRCUDA(cerr);
cerr = cudaFree(d_elemMats);CHKERRCUDA(cerr);
ierr = PetscLogEventEnd(events[5],0,0,0,0);CHKERRQ(ierr);
ierr = PetscLogEventBegin(events[6],0,0,0,0);CHKERRQ(ierr);
for (ej = cStart, elMat = elemMats ; ej < cEnd; ++ej, elMat += totDim*totDim) {
ierr = DMPlexMatSetClosure(plex, section, globalSection, JacP, ej, elMat, ADD_VALUES);CHKERRQ(ierr);
if (ej==-1) {
int d,f;
PetscPrintf(PETSC_COMM_SELF,"GPU Element matrix\n");
for (d = 0; d < totDim; ++d) {
for (f = 0; f < totDim; ++f) PetscPrintf(PETSC_COMM_SELF," %12.5e", PetscRealPart(elMat[d*totDim + f]));
PetscPrintf(PETSC_COMM_SELF,"\n");
}
exit(14);
}
}
ierr = PetscFree(elemMats);CHKERRQ(ierr);
ierr = PetscLogEventEnd(events[6],0,0,0,0);CHKERRQ(ierr);
if (ctx->gpu_assembly) {
// transition to use of maps for VecGetClosure
cerr = cudaFree(SData_d->IPf);CHKERRCUDA(cerr);
SData_d->IPf = NULL;
if (!(a_IPf || a_xarray)) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "transition without Jacobian");
}
}
PetscFunctionReturn(0);
}
|
c911d16395e7adb03c3344585ef523f450fed012.hip | // !!! This is a file automatically generated by hipify!!!
/**
* correlation.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 1.05
#define GPU_DEVICE 0
/* Problem size */
#define M 512
#define N 512
/* Thread block dimensions for kernel 1*/
#define DIM_THREAD_BLOCK_KERNEL_1_X 256
#define DIM_THREAD_BLOCK_KERNEL_1_Y 1
/* Thread block dimensions for kernel 2*/
#define DIM_THREAD_BLOCK_KERNEL_2_X 256
#define DIM_THREAD_BLOCK_KERNEL_2_Y 1
/* Thread block dimensions for kernel 3*/
#define DIM_THREAD_BLOCK_KERNEL_3_X 32
#define DIM_THREAD_BLOCK_KERNEL_3_Y 8
/* Thread block dimensions for kernel 4*/
#define DIM_THREAD_BLOCK_KERNEL_4_X 256
#define DIM_THREAD_BLOCK_KERNEL_4_Y 1
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#define FLOAT_N 3214212.01f
#define EPS 0.005f
#define STR_SIZE 256
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
typedef double DT;
void init_arrays(DATA_TYPE* data)
{
int i, j;
for (i=0; i < (M+1); i++)
{
for (j=0; j< (N+1); j++)
{
data[i*(N+1) + j] = ((DATA_TYPE) i*j)/ (M+1);
}
}
}
void print(DATA_TYPE* symmat_outputFromGpu){
FILE* fp;
fp=fopen("out.txt","w");
char str[STR_SIZE];
if(!fp)
{
printf("Error writing!");
return;
}
//sprintf(str,"%d",NI);
//fputs(str,fp);
int i,j;
for (i = 1 ; i < (M+1) ; ++i)
{
for (j = 1 ; j < (N+1) ; ++j)
{
sprintf(str,"%f\t",symmat_outputFromGpu[i*(N+1) + j]);
fputs(str,fp);
}
sprintf(str,"\n");
fputs(str,fp);
}
fclose(fp);
}
void correlation(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat)
{
int i, j, j1, j2;
// Determine mean of column vectors of input data matrix
for (j = 1; j < (M+1); j++)
{
mean[j] = 0.0;
for (i = 1; i < (N+1); i++)
{
mean[j] += data[i*(M+1) + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
// Determine standard deviations of column vectors of data matrix.
for (j = 1; j < (M+1); j++)
{
stddev[j] = 0.0;
for (i = 1; i < (N+1); i++)
{
stddev[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]);
}
stddev[j] /= FLOAT_N;
stddev[j] = sqrt_of_array_cell(stddev, j);
stddev[j] = stddev[j] <= EPS ? 1.0 : stddev[j];
}
// Center and reduce the column vectors.
for (i = 1; i < (N+1); i++)
{
for (j = 1; j < (M+1); j++)
{
data[i*(M+1) + j] -= mean[j];
data[i*(M+1) + j] /= (sqrt(FLOAT_N)*stddev[j]) ;
}
}
// Calculate the m * m correlation matrix.
for (j1 = 1; j1 < M; j1++)
{
symmat[j1*(M+1) + j1] = 1.0;
for (j2 = j1+1; j2 < (M+1); j2++)
{
symmat[j1*(M+1) + j2] = 0.0;
for (i = 1; i < (N+1); i++)
{
symmat[j1*(M+1) + j2] += (data[i*(M+1) + j1] * data[i*(M+1) + j2]);
}
symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2];
}
}
symmat[M*(M+1) + M] = 1.0;
}
void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu)
{
int i,j,fail;
fail = 0;
for (i=1; i < (M+1); i++)
{
for (j=1; j < (N+1); j++)
{
if (percentDiff(symmat[i*(N+1) + j], symmat_outputFromGpu[i*(N+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
//printf("i: %d j: %d\n1: %f 2: %f\n", i, j, symmat[i*N + j], symmat_outputFromGpu[i*N + j]);
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data, DT *f)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
if ((j >= 1) && (j < (M+1)))
{
mean[j] = 0.0;
int i;
for(i=1; i < (N+1); i++)
{
mean[j] += data[i*(M+1) + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
}
__global__ void std_kernel(DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data, DT *f)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
if ((j >= 1) && (j < (M+1)))
{
std[j] = 0.0;
int i;
for(i = 1; i < (N+1); i++)
{
std[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]);
}
std[j] /= (FLOAT_N);
std[j] = sqrt(std[j]);
if(std[j] <= EPS)
{
std[j] = 1.0;
}
}
}
__global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data, DT *f)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((i >= 1) && (i < (N+1)) && (j >= 1) && (j < (M+1)))
{
data[i*(M+1) + j] -= mean[j];
data[i*(M+1) + j] /= (sqrt(FLOAT_N) * std[j]);
}
}
__global__ void corr_kernel(DATA_TYPE *symmat, DATA_TYPE *data, DT *f)
{
int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i, j2;
if ((j1 >= 1) && (j1 < M))
{
symmat[j1*(M+1) + j1] = 1.0;
for (j2 = (j1 + 1); j2 < (M+1); j2++)
{
symmat[j1*(M+1) + j2] = 0.0;
for(i = 1; i < (N+1); i++)
{
symmat[j1*(M+1) + j2] += data[i*(M+1) + j1] * data[i*(M+1) + j2];
}
symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2];
}
}
}
void correlationCuda(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat,
DATA_TYPE* symmat_outputFromGpu, DT* f)
{
double t_start, t_end;
DATA_TYPE *data_gpu;
DATA_TYPE *stddev_gpu;
DATA_TYPE *mean_gpu;
DATA_TYPE *symmat_gpu;
DT *F_gpu;
hipMalloc((void **)&data_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1));
hipMalloc((void **)&symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1));
hipMalloc((void **)&stddev_gpu, sizeof(DATA_TYPE) * (M+1));
hipMalloc((void **)&mean_gpu, sizeof(DATA_TYPE) * (M+1));
hipMalloc((void **)&F_gpu, sizeof(DT) *2);
hipMemcpy(data_gpu, data, sizeof(DATA_TYPE) * (M+1) * (N+1), hipMemcpyHostToDevice);
hipMemcpy(symmat_gpu, symmat, sizeof(DATA_TYPE) * (M+1) * (N+1), hipMemcpyHostToDevice);
hipMemcpy(stddev_gpu, stddev, sizeof(DATA_TYPE) * (M+1), hipMemcpyHostToDevice);
hipMemcpy(mean_gpu, mean, sizeof(DATA_TYPE) * (M+1), hipMemcpyHostToDevice);
hipMemcpy(F_gpu, f, sizeof(DT) *2, hipMemcpyHostToDevice);
dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y);
dim3 grid1((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1);
dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y);
dim3 grid2((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), 1);
dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y);
dim3 grid3((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), (size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_Y)));
dim3 block4(DIM_THREAD_BLOCK_KERNEL_4_X, DIM_THREAD_BLOCK_KERNEL_4_Y);
dim3 grid4((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_4_X)), 1);
t_start = rtclock();
hipLaunchKernelGGL(( mean_kernel), dim3(grid1), dim3(block1) , 0, 0, mean_gpu,data_gpu, F_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( std_kernel), dim3(grid2), dim3(block2) , 0, 0, mean_gpu,stddev_gpu,data_gpu, F_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( reduce_kernel), dim3(grid3), dim3(block3) , 0, 0, mean_gpu,stddev_gpu,data_gpu, F_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( corr_kernel), dim3(grid4), dim3(block4) , 0, 0, symmat_gpu,data_gpu, F_gpu);
hipDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
DATA_TYPE valueAtSymmatIndexMTimesMPlus1PlusMPoint = 1.0;
hipMemcpy(&(symmat_gpu[(M)*(M+1) + (M)]), &valueAtSymmatIndexMTimesMPlus1PlusMPoint, sizeof(DATA_TYPE), hipMemcpyHostToDevice);
hipMemcpy(symmat_outputFromGpu, symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1), hipMemcpyDeviceToHost);
hipMemcpy(f, F_gpu, sizeof(DT) *2, hipMemcpyDeviceToHost);
hipFree(data_gpu);
hipFree(symmat_gpu);
hipFree(stddev_gpu);
hipFree(mean_gpu);
}
int main()
{
double t_start, t_end;
DATA_TYPE* data;
DATA_TYPE* mean;
DATA_TYPE* stddev;
DATA_TYPE* symmat;
DATA_TYPE* symmat_outputFromGpu;
DT* f;
f = (DT*)malloc(2*sizeof(DT));
data = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
mean = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE));
stddev = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE));
symmat = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
symmat_outputFromGpu = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
init_arrays(data);
GPU_argv_init();
correlationCuda(data, mean, stddev, symmat, symmat_outputFromGpu, f);
print(symmat_outputFromGpu);
t_start = rtclock();
correlation(data, mean, stddev, symmat);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
printf("%x %x\n",*(int *)&(f[0]),*(int *)&(f[1]));
FILE* fp_reverse;
fp_reverse=fopen("reverse.txt","a");
//fprintf(fp_reverse,"%x %x\n",*(int *)&(f[0]),*(int *)&(f[1]));
if(*(int *)&(f[0])<*(int *)&(f[1]))
fprintf(fp_reverse,"%d\n",1);
else fprintf(fp_reverse,"%d\n",0);
fclose(fp_reverse);
compareResults(symmat, symmat_outputFromGpu);
free(data);
free(mean);
free(stddev);
free(symmat);
free(symmat_outputFromGpu);
return 0;
}
| c911d16395e7adb03c3344585ef523f450fed012.cu | /**
* correlation.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <cuda.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 1.05
#define GPU_DEVICE 0
/* Problem size */
#define M 512
#define N 512
/* Thread block dimensions for kernel 1*/
#define DIM_THREAD_BLOCK_KERNEL_1_X 256
#define DIM_THREAD_BLOCK_KERNEL_1_Y 1
/* Thread block dimensions for kernel 2*/
#define DIM_THREAD_BLOCK_KERNEL_2_X 256
#define DIM_THREAD_BLOCK_KERNEL_2_Y 1
/* Thread block dimensions for kernel 3*/
#define DIM_THREAD_BLOCK_KERNEL_3_X 32
#define DIM_THREAD_BLOCK_KERNEL_3_Y 8
/* Thread block dimensions for kernel 4*/
#define DIM_THREAD_BLOCK_KERNEL_4_X 256
#define DIM_THREAD_BLOCK_KERNEL_4_Y 1
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#define FLOAT_N 3214212.01f
#define EPS 0.005f
#define STR_SIZE 256
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
typedef double DT;
void init_arrays(DATA_TYPE* data)
{
int i, j;
for (i=0; i < (M+1); i++)
{
for (j=0; j< (N+1); j++)
{
data[i*(N+1) + j] = ((DATA_TYPE) i*j)/ (M+1);
}
}
}
void print(DATA_TYPE* symmat_outputFromGpu){
FILE* fp;
fp=fopen("out.txt","w");
char str[STR_SIZE];
if(!fp)
{
printf("Error writing!");
return;
}
//sprintf(str,"%d",NI);
//fputs(str,fp);
int i,j;
for (i = 1 ; i < (M+1) ; ++i)
{
for (j = 1 ; j < (N+1) ; ++j)
{
sprintf(str,"%f\t",symmat_outputFromGpu[i*(N+1) + j]);
fputs(str,fp);
}
sprintf(str,"\n");
fputs(str,fp);
}
fclose(fp);
}
void correlation(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat)
{
int i, j, j1, j2;
// Determine mean of column vectors of input data matrix
for (j = 1; j < (M+1); j++)
{
mean[j] = 0.0;
for (i = 1; i < (N+1); i++)
{
mean[j] += data[i*(M+1) + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
// Determine standard deviations of column vectors of data matrix.
for (j = 1; j < (M+1); j++)
{
stddev[j] = 0.0;
for (i = 1; i < (N+1); i++)
{
stddev[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]);
}
stddev[j] /= FLOAT_N;
stddev[j] = sqrt_of_array_cell(stddev, j);
stddev[j] = stddev[j] <= EPS ? 1.0 : stddev[j];
}
// Center and reduce the column vectors.
for (i = 1; i < (N+1); i++)
{
for (j = 1; j < (M+1); j++)
{
data[i*(M+1) + j] -= mean[j];
data[i*(M+1) + j] /= (sqrt(FLOAT_N)*stddev[j]) ;
}
}
// Calculate the m * m correlation matrix.
for (j1 = 1; j1 < M; j1++)
{
symmat[j1*(M+1) + j1] = 1.0;
for (j2 = j1+1; j2 < (M+1); j2++)
{
symmat[j1*(M+1) + j2] = 0.0;
for (i = 1; i < (N+1); i++)
{
symmat[j1*(M+1) + j2] += (data[i*(M+1) + j1] * data[i*(M+1) + j2]);
}
symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2];
}
}
symmat[M*(M+1) + M] = 1.0;
}
void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu)
{
int i,j,fail;
fail = 0;
for (i=1; i < (M+1); i++)
{
for (j=1; j < (N+1); j++)
{
if (percentDiff(symmat[i*(N+1) + j], symmat_outputFromGpu[i*(N+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
//printf("i: %d j: %d\n1: %f 2: %f\n", i, j, symmat[i*N + j], symmat_outputFromGpu[i*N + j]);
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data, DT *f)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
if ((j >= 1) && (j < (M+1)))
{
mean[j] = 0.0;
int i;
for(i=1; i < (N+1); i++)
{
mean[j] += data[i*(M+1) + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
}
__global__ void std_kernel(DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data, DT *f)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
if ((j >= 1) && (j < (M+1)))
{
std[j] = 0.0;
int i;
for(i = 1; i < (N+1); i++)
{
std[j] += (data[i*(M+1) + j] - mean[j]) * (data[i*(M+1) + j] - mean[j]);
}
std[j] /= (FLOAT_N);
std[j] = sqrt(std[j]);
if(std[j] <= EPS)
{
std[j] = 1.0;
}
}
}
__global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *std, DATA_TYPE *data, DT *f)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((i >= 1) && (i < (N+1)) && (j >= 1) && (j < (M+1)))
{
data[i*(M+1) + j] -= mean[j];
data[i*(M+1) + j] /= (sqrt(FLOAT_N) * std[j]);
}
}
__global__ void corr_kernel(DATA_TYPE *symmat, DATA_TYPE *data, DT *f)
{
int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i, j2;
if ((j1 >= 1) && (j1 < M))
{
symmat[j1*(M+1) + j1] = 1.0;
for (j2 = (j1 + 1); j2 < (M+1); j2++)
{
symmat[j1*(M+1) + j2] = 0.0;
for(i = 1; i < (N+1); i++)
{
symmat[j1*(M+1) + j2] += data[i*(M+1) + j1] * data[i*(M+1) + j2];
}
symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2];
}
}
}
void correlationCuda(DATA_TYPE* data, DATA_TYPE* mean, DATA_TYPE* stddev, DATA_TYPE* symmat,
DATA_TYPE* symmat_outputFromGpu, DT* f)
{
double t_start, t_end;
DATA_TYPE *data_gpu;
DATA_TYPE *stddev_gpu;
DATA_TYPE *mean_gpu;
DATA_TYPE *symmat_gpu;
DT *F_gpu;
cudaMalloc((void **)&data_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1));
cudaMalloc((void **)&symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1));
cudaMalloc((void **)&stddev_gpu, sizeof(DATA_TYPE) * (M+1));
cudaMalloc((void **)&mean_gpu, sizeof(DATA_TYPE) * (M+1));
cudaMalloc((void **)&F_gpu, sizeof(DT) *2);
cudaMemcpy(data_gpu, data, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyHostToDevice);
cudaMemcpy(symmat_gpu, symmat, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyHostToDevice);
cudaMemcpy(stddev_gpu, stddev, sizeof(DATA_TYPE) * (M+1), cudaMemcpyHostToDevice);
cudaMemcpy(mean_gpu, mean, sizeof(DATA_TYPE) * (M+1), cudaMemcpyHostToDevice);
cudaMemcpy(F_gpu, f, sizeof(DT) *2, cudaMemcpyHostToDevice);
dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y);
dim3 grid1((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1);
dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y);
dim3 grid2((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), 1);
dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y);
dim3 grid3((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), (size_t)(ceil((float)(N)) / ((float)DIM_THREAD_BLOCK_KERNEL_3_Y)));
dim3 block4(DIM_THREAD_BLOCK_KERNEL_4_X, DIM_THREAD_BLOCK_KERNEL_4_Y);
dim3 grid4((size_t)(ceil((float)(M)) / ((float)DIM_THREAD_BLOCK_KERNEL_4_X)), 1);
t_start = rtclock();
mean_kernel<<< grid1, block1 >>>(mean_gpu,data_gpu, F_gpu);
cudaThreadSynchronize();
std_kernel<<< grid2, block2 >>>(mean_gpu,stddev_gpu,data_gpu, F_gpu);
cudaThreadSynchronize();
reduce_kernel<<< grid3, block3 >>>(mean_gpu,stddev_gpu,data_gpu, F_gpu);
cudaThreadSynchronize();
corr_kernel<<< grid4, block4 >>>(symmat_gpu,data_gpu, F_gpu);
cudaThreadSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
DATA_TYPE valueAtSymmatIndexMTimesMPlus1PlusMPoint = 1.0;
cudaMemcpy(&(symmat_gpu[(M)*(M+1) + (M)]), &valueAtSymmatIndexMTimesMPlus1PlusMPoint, sizeof(DATA_TYPE), cudaMemcpyHostToDevice);
cudaMemcpy(symmat_outputFromGpu, symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyDeviceToHost);
cudaMemcpy(f, F_gpu, sizeof(DT) *2, cudaMemcpyDeviceToHost);
cudaFree(data_gpu);
cudaFree(symmat_gpu);
cudaFree(stddev_gpu);
cudaFree(mean_gpu);
}
int main()
{
double t_start, t_end;
DATA_TYPE* data;
DATA_TYPE* mean;
DATA_TYPE* stddev;
DATA_TYPE* symmat;
DATA_TYPE* symmat_outputFromGpu;
DT* f;
f = (DT*)malloc(2*sizeof(DT));
data = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
mean = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE));
stddev = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE));
symmat = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
symmat_outputFromGpu = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
init_arrays(data);
GPU_argv_init();
correlationCuda(data, mean, stddev, symmat, symmat_outputFromGpu, f);
print(symmat_outputFromGpu);
t_start = rtclock();
correlation(data, mean, stddev, symmat);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
printf("%x %x\n",*(int *)&(f[0]),*(int *)&(f[1]));
FILE* fp_reverse;
fp_reverse=fopen("reverse.txt","a");
//fprintf(fp_reverse,"%x %x\n",*(int *)&(f[0]),*(int *)&(f[1]));
if(*(int *)&(f[0])<*(int *)&(f[1]))
fprintf(fp_reverse,"%d\n",1);
else fprintf(fp_reverse,"%d\n",0);
fclose(fp_reverse);
compareResults(symmat, symmat_outputFromGpu);
free(data);
free(mean);
free(stddev);
free(symmat);
free(symmat_outputFromGpu);
return 0;
}
|
cb383db4543777f0d214440451f8053b90af916a.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef SUPPORTING_CU
#define SUPPORTING_CU
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <vector>
#include <chrono>
#include "all_structure_undir.cuh"
#include "gpuFunctions_undir.cuh"
#include "bfs.hip"
using namespace std;
using namespace std::chrono;
void transfer_data_to_GPU(vector<ColWtList>& AdjList, int*& AdjListTracker, vector<ColWt>& AdjListFull, ColWt*& AdjListFull_device,
int nodes, int edges, int totalInsertion, int*& AdjListTracker_device, bool zeroInsFlag,
vector<changeEdge>& allChange_Ins, changeEdge*& allChange_Ins_device, int totalChangeEdges_Ins,
int deviceId, int totalChangeEdges_Del, bool zeroDelFlag, changeEdge*& allChange_Del_device,
int*& counter_del, int*& affectedNodeList_del, int*& updatedAffectedNodeList_del, int*& updated_counter_del, vector<changeEdge>& allChange_Del, size_t numberOfBlocks)
{
hipError_t cudaStatus;
//create 1D array from 2D to fit it in GPU
//cout << "creating 1D array from 2D to fit it in GPU" << endl;
AdjListTracker[0] = 0; //start pointer points to the first index of InEdgesList
for (int i = 0; i < nodes; i++) {
AdjListTracker[i + 1] = AdjListTracker[i] + AdjList.at(i).size();
AdjListFull.insert(std::end(AdjListFull), std::begin(AdjList.at(i)), std::end(AdjList.at(i)));
}
//cout << "creating 1D array from 2D completed" << endl;
//Transferring input graph and change edges data to GPU
//cout << "Transferring graph data from CPU to GPU" << endl;
auto startTime_transfer = high_resolution_clock::now();
//printf("edges: %d totalInsertion:%d sizeof(ColWt):%d \n", edges, totalInsertion, sizeof(ColWt));
cudaStatus = hipMallocManaged(&AdjListFull_device, (2 * (edges + totalInsertion)) * sizeof(ColWt));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed at InEdgesListFull structure");
}
//printf("testA1");
std::copy(AdjListFull.begin(), AdjListFull.end(), AdjListFull_device);
//printf("testA2");
cudaStatus = hipMalloc((void**)&AdjListTracker_device, (nodes + 1) * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed at InEdgesListTracker_device");
}
hipMemcpy(AdjListTracker_device, AdjListTracker, (nodes + 1) * sizeof(int), hipMemcpyHostToDevice);
//printf("testB");
//Asynchronous prefetching of data
hipMemPrefetchAsync(AdjListFull_device, edges * sizeof(ColWt), deviceId);
//printf("testC");
if (zeroInsFlag != true) {
cudaStatus = hipMallocManaged(&allChange_Ins_device, totalChangeEdges_Ins * sizeof(changeEdge));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed at allChange_Ins structure");
}
std::copy(allChange_Ins.begin(), allChange_Ins.end(), allChange_Ins_device);
//printf("testD");
//set hipMemAdviseSetReadMostly by the GPU for change edge data
hipMemAdvise(allChange_Ins_device, totalChangeEdges_Ins * sizeof(changeEdge), hipMemAdviseSetReadMostly, deviceId);
//printf("testE");
//Asynchronous prefetching of data
hipMemPrefetchAsync(allChange_Ins_device, totalChangeEdges_Ins * sizeof(changeEdge), deviceId);
//printf("testF");
}
if (zeroDelFlag != true) {
cudaStatus = hipMallocManaged(&allChange_Del_device, totalChangeEdges_Del * sizeof(changeEdge));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed at allChange_Del structure");
}
std::copy(allChange_Del.begin(), allChange_Del.end(), allChange_Del_device);
//set hipMemAdviseSetReadMostly by the GPU for change edge data
hipMemAdvise(allChange_Del_device, totalChangeEdges_Del * sizeof(changeEdge), hipMemAdviseSetReadMostly, deviceId);
//Asynchronous prefetching of data
hipMemPrefetchAsync(allChange_Del_device, totalChangeEdges_Del * sizeof(changeEdge), deviceId);
counter_del = 0;
hipMallocManaged(&counter_del, sizeof(int));
hipMallocManaged(&affectedNodeList_del, nodes * sizeof(int));
hipMallocManaged(&updatedAffectedNodeList_del, nodes * sizeof(int));
updated_counter_del = 0;
hipMallocManaged(&updated_counter_del, sizeof(int));
//modify adjacency list to adapt the deleted edges
deleteEdgeFromAdj << < numberOfBlocks, THREADS_PER_BLOCK >> > (allChange_Del_device, totalChangeEdges_Del, AdjListFull_device, AdjListTracker_device);
hipDeviceSynchronize();
}
auto stopTime_transfer = high_resolution_clock::now();//Time calculation ends
auto duration_transfer = duration_cast<microseconds>(stopTime_transfer - startTime_transfer);// duration calculation
//cout << "**Time taken to transfer graph data from CPU to GPU: "<< float(duration_transfer.count()) / 1000 << " milliseconds**" << endl;
}
void read_and_transfer_input_SSSPtree_to_GPU(char* inputSSSPfile, vector<ColList>& SSSPTreeAdjList, int*& SSSPTreeAdjListTracker, vector<int>& SSSPTreeAdjListFull,
RT_Vertex*& SSSP, int nodes, int edges, int*& SSSPTreeAdjListFull_device, int*& SSSPTreeAdjListTracker_device, vector<int>& hop, int deviceId, int*& d_hop)
{
hipError_t cudaStatus;
SSSPTreeAdjList.resize(nodes);
SSSPTreeAdjListTracker = (int*)malloc((nodes + 1) * sizeof(int));//we take nodes +1 to store the start ptr of the first row
cudaStatus = hipMallocManaged(&SSSP, nodes * sizeof(RT_Vertex));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed at SSSP structure");
}
//cout << "Reading input SSSP tree data..." << endl;
auto readSSSPstartTime = high_resolution_clock::now();//Time calculation starts
read_SSSP(SSSP, inputSSSPfile, &nodes, SSSPTreeAdjList);
//New addition
SSSPTreeAdjListTracker[0] = 0; //start pointer points to the first index of InEdgesList
for (int i = 0; i < nodes; i++) {
SSSPTreeAdjListTracker[i + 1] = SSSPTreeAdjListTracker[i] + SSSPTreeAdjList.at(i).size();
SSSPTreeAdjListFull.insert(std::end(SSSPTreeAdjListFull), std::begin(SSSPTreeAdjList.at(i)), std::end(SSSPTreeAdjList.at(i)));
}
//Transferring SSSP tree data to GPU
cudaStatus = hipMallocManaged(&SSSPTreeAdjListFull_device, (nodes) * sizeof(int)); //1/7/2020:new change to nodes from nodes -1 as 0 0 0 is also a row in SSSP file//SSSP tree has n-1 edges and we consider each edge 1 time
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed at SSSPTreeAdjListFull_device structure");
}
std::copy(SSSPTreeAdjListFull.begin(), SSSPTreeAdjListFull.end(), SSSPTreeAdjListFull_device);
cudaStatus = hipMalloc((void**)&SSSPTreeAdjListTracker_device, (nodes + 1) * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed at SSSPTreeAdjListTracker_device");
}
hipMemcpy(SSSPTreeAdjListTracker_device, SSSPTreeAdjListTracker, (nodes + 1) * sizeof(int), hipMemcpyHostToDevice);
//compute hop
vector<bool> visited;
int startVertex = 0; //0 is considered as root vertex
visited = vector<bool>(nodes);
//hop = vector<int>(nodes);
const int size = nodes * sizeof(int);
hipMalloc((void**)&d_hop, size);
hop = vector<int>(nodes, INT_MAX);
hop[startVertex] = 0;
hipMemcpy(d_hop, hop.data(), size, hipMemcpyHostToDevice);
//??we don't need this hop computing now
auto startTime = chrono::steady_clock::now();
bfsGPU(startVertex, nodes, SSSPTreeAdjListFull_device, SSSPTreeAdjListTracker_device, d_hop, visited);
auto endTime = std::chrono::steady_clock::now();
long duration = chrono::duration_cast<chrono::milliseconds>(endTime - startTime).count();
//printf("Elapsed time for hop computation : %li ms.\n", duration);
//hipDeviceSynchronize();
hipMemcpy(&hop[0], d_hop, size, hipMemcpyDeviceToHost);
//printHop(hop);
auto readSSSPstopTime = high_resolution_clock::now();//Time calculation ends
auto readSSSPduration = duration_cast<microseconds>(readSSSPstopTime - readSSSPstartTime);// duration calculation
//cout << "Reading input SSSP tree data completed" << endl;
//cout << "Time taken to read input input SSSP tree: " << readSSSPduration.count() << " microseconds" << endl;
//set hipMemAdviseSetPreferredLocation at GPU for SSSP data
hipMemAdvise(SSSP, nodes * sizeof(RT_Vertex), hipMemAdviseSetPreferredLocation, deviceId);
//Asynchronous prefetching of data
hipMemPrefetchAsync(SSSP, nodes * sizeof(RT_Vertex), deviceId);
}
#endif | cb383db4543777f0d214440451f8053b90af916a.cu | #ifndef SUPPORTING_CU
#define SUPPORTING_CU
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <vector>
#include <chrono>
#include "all_structure_undir.cuh"
#include "gpuFunctions_undir.cuh"
#include "bfs.cu"
using namespace std;
using namespace std::chrono;
void transfer_data_to_GPU(vector<ColWtList>& AdjList, int*& AdjListTracker, vector<ColWt>& AdjListFull, ColWt*& AdjListFull_device,
int nodes, int edges, int totalInsertion, int*& AdjListTracker_device, bool zeroInsFlag,
vector<changeEdge>& allChange_Ins, changeEdge*& allChange_Ins_device, int totalChangeEdges_Ins,
int deviceId, int totalChangeEdges_Del, bool zeroDelFlag, changeEdge*& allChange_Del_device,
int*& counter_del, int*& affectedNodeList_del, int*& updatedAffectedNodeList_del, int*& updated_counter_del, vector<changeEdge>& allChange_Del, size_t numberOfBlocks)
{
cudaError_t cudaStatus;
//create 1D array from 2D to fit it in GPU
//cout << "creating 1D array from 2D to fit it in GPU" << endl;
AdjListTracker[0] = 0; //start pointer points to the first index of InEdgesList
for (int i = 0; i < nodes; i++) {
AdjListTracker[i + 1] = AdjListTracker[i] + AdjList.at(i).size();
AdjListFull.insert(std::end(AdjListFull), std::begin(AdjList.at(i)), std::end(AdjList.at(i)));
}
//cout << "creating 1D array from 2D completed" << endl;
//Transferring input graph and change edges data to GPU
//cout << "Transferring graph data from CPU to GPU" << endl;
auto startTime_transfer = high_resolution_clock::now();
//printf("edges: %d totalInsertion:%d sizeof(ColWt):%d \n", edges, totalInsertion, sizeof(ColWt));
cudaStatus = cudaMallocManaged(&AdjListFull_device, (2 * (edges + totalInsertion)) * sizeof(ColWt));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed at InEdgesListFull structure");
}
//printf("testA1");
std::copy(AdjListFull.begin(), AdjListFull.end(), AdjListFull_device);
//printf("testA2");
cudaStatus = cudaMalloc((void**)&AdjListTracker_device, (nodes + 1) * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed at InEdgesListTracker_device");
}
cudaMemcpy(AdjListTracker_device, AdjListTracker, (nodes + 1) * sizeof(int), cudaMemcpyHostToDevice);
//printf("testB");
//Asynchronous prefetching of data
cudaMemPrefetchAsync(AdjListFull_device, edges * sizeof(ColWt), deviceId);
//printf("testC");
if (zeroInsFlag != true) {
cudaStatus = cudaMallocManaged(&allChange_Ins_device, totalChangeEdges_Ins * sizeof(changeEdge));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed at allChange_Ins structure");
}
std::copy(allChange_Ins.begin(), allChange_Ins.end(), allChange_Ins_device);
//printf("testD");
//set cudaMemAdviseSetReadMostly by the GPU for change edge data
cudaMemAdvise(allChange_Ins_device, totalChangeEdges_Ins * sizeof(changeEdge), cudaMemAdviseSetReadMostly, deviceId);
//printf("testE");
//Asynchronous prefetching of data
cudaMemPrefetchAsync(allChange_Ins_device, totalChangeEdges_Ins * sizeof(changeEdge), deviceId);
//printf("testF");
}
if (zeroDelFlag != true) {
cudaStatus = cudaMallocManaged(&allChange_Del_device, totalChangeEdges_Del * sizeof(changeEdge));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed at allChange_Del structure");
}
std::copy(allChange_Del.begin(), allChange_Del.end(), allChange_Del_device);
//set cudaMemAdviseSetReadMostly by the GPU for change edge data
cudaMemAdvise(allChange_Del_device, totalChangeEdges_Del * sizeof(changeEdge), cudaMemAdviseSetReadMostly, deviceId);
//Asynchronous prefetching of data
cudaMemPrefetchAsync(allChange_Del_device, totalChangeEdges_Del * sizeof(changeEdge), deviceId);
counter_del = 0;
cudaMallocManaged(&counter_del, sizeof(int));
cudaMallocManaged(&affectedNodeList_del, nodes * sizeof(int));
cudaMallocManaged(&updatedAffectedNodeList_del, nodes * sizeof(int));
updated_counter_del = 0;
cudaMallocManaged(&updated_counter_del, sizeof(int));
//modify adjacency list to adapt the deleted edges
deleteEdgeFromAdj << < numberOfBlocks, THREADS_PER_BLOCK >> > (allChange_Del_device, totalChangeEdges_Del, AdjListFull_device, AdjListTracker_device);
cudaDeviceSynchronize();
}
auto stopTime_transfer = high_resolution_clock::now();//Time calculation ends
auto duration_transfer = duration_cast<microseconds>(stopTime_transfer - startTime_transfer);// duration calculation
//cout << "**Time taken to transfer graph data from CPU to GPU: "<< float(duration_transfer.count()) / 1000 << " milliseconds**" << endl;
}
void read_and_transfer_input_SSSPtree_to_GPU(char* inputSSSPfile, vector<ColList>& SSSPTreeAdjList, int*& SSSPTreeAdjListTracker, vector<int>& SSSPTreeAdjListFull,
RT_Vertex*& SSSP, int nodes, int edges, int*& SSSPTreeAdjListFull_device, int*& SSSPTreeAdjListTracker_device, vector<int>& hop, int deviceId, int*& d_hop)
{
cudaError_t cudaStatus;
SSSPTreeAdjList.resize(nodes);
SSSPTreeAdjListTracker = (int*)malloc((nodes + 1) * sizeof(int));//we take nodes +1 to store the start ptr of the first row
cudaStatus = cudaMallocManaged(&SSSP, nodes * sizeof(RT_Vertex));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed at SSSP structure");
}
//cout << "Reading input SSSP tree data..." << endl;
auto readSSSPstartTime = high_resolution_clock::now();//Time calculation starts
read_SSSP(SSSP, inputSSSPfile, &nodes, SSSPTreeAdjList);
//New addition
SSSPTreeAdjListTracker[0] = 0; //start pointer points to the first index of InEdgesList
for (int i = 0; i < nodes; i++) {
SSSPTreeAdjListTracker[i + 1] = SSSPTreeAdjListTracker[i] + SSSPTreeAdjList.at(i).size();
SSSPTreeAdjListFull.insert(std::end(SSSPTreeAdjListFull), std::begin(SSSPTreeAdjList.at(i)), std::end(SSSPTreeAdjList.at(i)));
}
//Transferring SSSP tree data to GPU
cudaStatus = cudaMallocManaged(&SSSPTreeAdjListFull_device, (nodes) * sizeof(int)); //1/7/2020:new change to nodes from nodes -1 as 0 0 0 is also a row in SSSP file//SSSP tree has n-1 edges and we consider each edge 1 time
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed at SSSPTreeAdjListFull_device structure");
}
std::copy(SSSPTreeAdjListFull.begin(), SSSPTreeAdjListFull.end(), SSSPTreeAdjListFull_device);
cudaStatus = cudaMalloc((void**)&SSSPTreeAdjListTracker_device, (nodes + 1) * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed at SSSPTreeAdjListTracker_device");
}
cudaMemcpy(SSSPTreeAdjListTracker_device, SSSPTreeAdjListTracker, (nodes + 1) * sizeof(int), cudaMemcpyHostToDevice);
//compute hop
vector<bool> visited;
int startVertex = 0; //0 is considered as root vertex
visited = vector<bool>(nodes);
//hop = vector<int>(nodes);
const int size = nodes * sizeof(int);
cudaMalloc((void**)&d_hop, size);
hop = vector<int>(nodes, INT_MAX);
hop[startVertex] = 0;
cudaMemcpy(d_hop, hop.data(), size, cudaMemcpyHostToDevice);
//??we don't need this hop computing now
auto startTime = chrono::steady_clock::now();
bfsGPU(startVertex, nodes, SSSPTreeAdjListFull_device, SSSPTreeAdjListTracker_device, d_hop, visited);
auto endTime = std::chrono::steady_clock::now();
long duration = chrono::duration_cast<chrono::milliseconds>(endTime - startTime).count();
//printf("Elapsed time for hop computation : %li ms.\n", duration);
//cudaDeviceSynchronize();
cudaMemcpy(&hop[0], d_hop, size, cudaMemcpyDeviceToHost);
//printHop(hop);
auto readSSSPstopTime = high_resolution_clock::now();//Time calculation ends
auto readSSSPduration = duration_cast<microseconds>(readSSSPstopTime - readSSSPstartTime);// duration calculation
//cout << "Reading input SSSP tree data completed" << endl;
//cout << "Time taken to read input input SSSP tree: " << readSSSPduration.count() << " microseconds" << endl;
//set cudaMemAdviseSetPreferredLocation at GPU for SSSP data
cudaMemAdvise(SSSP, nodes * sizeof(RT_Vertex), cudaMemAdviseSetPreferredLocation, deviceId);
//Asynchronous prefetching of data
cudaMemPrefetchAsync(SSSP, nodes * sizeof(RT_Vertex), deviceId);
}
#endif |
7cbbbe44269c60d7d463dce4b2ae9887753fab59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************
1 - Install nvidia-cuda-toolkit
2 - Compile this program using:
nvcc add.cu -o add_cuda.out
*******************************/
/*
Program that runs block size dynamically. As the number of threads increase,
the number of blocks is determined as a function of threads and input size.
This provides a constant optimal performance even though the number of threads
change
*/
#include <iostream>
#include <math.h>
#include <ctime>
#include <cstdio>
//CUDA kernel to add elements of the matrix
// __global__ converts a function into a CUDA kernel
__global__
void add(int n, float *x, float *y)
{
// index of the current thread within the block
int index = blockIdx.x * blockDim.x + threadIdx.x;
// number of threads in a block
int stride = blockDim.x * gridDim.x;
// run each addition on a separate thread
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
int main(void)
{
for(int t = 32; t <= 1024; t+=32)
{
int N = 1<<24; // 2^24 elements
// Memory allocation in CUDA is done with hipMallocManaged( , )
float *x; float *y;
hipMallocManaged( &x, N*sizeof(float) );
hipMallocManaged( &y, N*sizeof(float) );
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
std::clock_t start = clock();
// Launch the 'add' kernel, which invokes it in the GPU
int blockSize = t;
int numBlocks = (N + blockSize - 1) / blockSize;
std::cout << "BlockSize = " << t << ",NumBlocks = " << numBlocks << "\n";
hipLaunchKernelGGL(( add), dim3(numBlocks),dim3(blockSize), 0, 0, N, x, y);
// Wait for the GPU to synchronize before accessign through host(CPU)
hipDeviceSynchronize();
std::clock_t stop = clock();
int duration = 1000 * (stop - start) / (double)CLOCKS_PER_SEC;
//std::cout << "Running time using " << t << " threads = " << duration << "\n";
std::cout << duration << "\n";
// Check for errors (all values should be 3.0f)
/*float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
*/
// Deallocating memory using hipFree()
hipFree(x);
hipFree(y);
}
return 0;
}
| 7cbbbe44269c60d7d463dce4b2ae9887753fab59.cu |
/*******************************
1 - Install nvidia-cuda-toolkit
2 - Compile this program using:
nvcc add.cu -o add_cuda.out
*******************************/
/*
Program that runs block size dynamically. As the number of threads increase,
the number of blocks is determined as a function of threads and input size.
This provides a constant optimal performance even though the number of threads
change
*/
#include <iostream>
#include <math.h>
#include <ctime>
#include <cstdio>
//CUDA kernel to add elements of the matrix
// __global__ converts a function into a CUDA kernel
__global__
void add(int n, float *x, float *y)
{
// index of the current thread within the block
int index = blockIdx.x * blockDim.x + threadIdx.x;
// number of threads in a block
int stride = blockDim.x * gridDim.x;
// run each addition on a separate thread
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
int main(void)
{
for(int t = 32; t <= 1024; t+=32)
{
int N = 1<<24; // 2^24 elements
// Memory allocation in CUDA is done with cudaMallocManaged( , )
float *x; float *y;
cudaMallocManaged( &x, N*sizeof(float) );
cudaMallocManaged( &y, N*sizeof(float) );
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
std::clock_t start = clock();
// Launch the 'add' kernel, which invokes it in the GPU
int blockSize = t;
int numBlocks = (N + blockSize - 1) / blockSize;
std::cout << "BlockSize = " << t << ",NumBlocks = " << numBlocks << "\n";
add<<<numBlocks,blockSize>>>(N, x, y);
// Wait for the GPU to synchronize before accessign through host(CPU)
cudaDeviceSynchronize();
std::clock_t stop = clock();
int duration = 1000 * (stop - start) / (double)CLOCKS_PER_SEC;
//std::cout << "Running time using " << t << " threads = " << duration << "\n";
std::cout << duration << "\n";
// Check for errors (all values should be 3.0f)
/*float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
*/
// Deallocating memory using cudaFree()
cudaFree(x);
cudaFree(y);
}
return 0;
}
|
4c0fe62268433ed8417464e907ba12f98a60c580.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_sinf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinf(x[id]);
}
} | 4c0fe62268433ed8417464e907ba12f98a60c580.cu | #include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_sinf (size_t n, float *result, float *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinf(x[id]);
}
} |
a2cb5f45089db581ce5de51d2ffbf40c7878cd4d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pairwise_hist.cuh"
#include "split_properties_helpers.cuh"
#include "compute_pair_hist_loop.cuh"
#include <hip/hip_cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <cstdio>
using namespace cooperative_groups;
namespace NKernel {
template<bool IsFullPass>
struct TSixBitPairwiseHistUnrollTrait {
static constexpr int InnerUnroll() {
#if __CUDA_ARCH__ <= 350
return 4;
#elif __CUDA_ARCH__ < 700
return 2;
#else
return 20;//IsFullPass ? 8 : 16;
#endif
}
static constexpr int OuterUnroll() {
#if __CUDA_ARCH__ <= 350
return 4;
#elif __CUDA_ARCH__ < 700
return IsFullPass ? 4 : 8;
#else
return 1;
#endif
}
};
template<int BlockSize, bool NeedLastBinMask /*is 32 histogram */>
struct TSixBitHistogram {
float* Histogram;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 2048 * (threadIdx.x / 32);
//2 blocks if INNER_HIST_BITS_COUNT = 0, else 1
// x4 feature and x4 histograms, though histStart = blockIdx * 16
return warpOffset + (threadIdx.x & 16);
}
__forceinline__ __device__ TSixBitHistogram(float* buff) {
Histogram = buff;
for (int i = threadIdx.x; i < BlockSize * 64; i += BlockSize) {
Histogram[i] = 0;
}
Histogram += SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPair(const ui32 ci1,
const ui32 ci2,
const float w) {
thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
const int shift = 4 * (threadIdx.x & 6);
const ui32 bins1 = RotateRight(flag ? ci2 : ci1, shift);
const ui32 bins2 = RotateRight(flag ? ci1 : ci2, shift);
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = (threadIdx.x + 2 * i) & 6;
int bin1 = (bins1 >> (24 - 8 * i)) & 255;
int bin2 = (bins2 >> (24 - 8 * i)) & 255;
const float w1 = (!NeedLastBinMask || bin1 < 64) ? w : 0;
const float w2 = (!NeedLastBinMask || bin2 < 64) ? w : 0;
const int tmp = ((bin1 >= bin2) == flag ? 0 : 8) + f;
int offset1 = tmp + ((bin1 & 63) << 5) + flag;
int offset2 = tmp + ((bin2 & 63) << 5) + !flag;
groupTile.sync();
if (groupTile.thread_rank() < 8) {
Histogram[offset1] += w1;
}
groupTile.sync();
if (groupTile.thread_rank() >= 8) {
Histogram[offset1] += w1;
}
groupTile.sync();
if (groupTile.thread_rank() < 8) {
Histogram[offset2] += w2;
}
groupTile.sync();
if (groupTile.thread_rank() >= 8) {
Histogram[offset2] += w2;
}
}
}
//
#if __CUDA_ARCH__ < 700
template <int N>
__forceinline__ __device__ void AddPairs(const ui32* ci1,
const ui32* ci2,
const float* w) {
#pragma unroll
for (int k = 0; k < N; ++k) {
AddPair(ci1[k], ci2[k], w[k]);
}
}
#else
template <int N>
__forceinline__ __device__ void AddPairs(const ui32* ci1,
const ui32* ci2,
const float* w) {
thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
const int shift = 4 * (threadIdx.x & 6);
ui32 bins1[N];
ui32 bins2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins1[k] = RotateRight(flag ? ci2[k] : ci1[k], shift);
bins2[k] = RotateRight(flag ? ci1[k] : ci2[k], shift);
}
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = (threadIdx.x + 2 * i) & 6;
int bin1[N];
int bin2[N];
#pragma unroll
for (int k = 0; k < N;++k) {
bin1[k] = (bins1[k] >> (24 - 8 * i)) & 255;
bin2[k] = (bins2[k] >> (24 - 8 * i)) & 255;
}
float w1[N];
float w2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
w1[k] = (!NeedLastBinMask || bin1[k] < 64) ? w[k] : 0;
w2[k] = (!NeedLastBinMask || bin2[k] < 64) ? w[k] : 0;
}
int offset1[N];
int offset2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
const int tmp = ((bin1[k] >= bin2[k]) == flag ? 0 : 8) + f;
offset1[k] = tmp + ((bin1[k] & 63) * 32) + flag;
offset2[k] = tmp + ((bin2[k] & 63) * 32) + !flag;
}
groupTile.sync();
if (groupTile.thread_rank() < 8) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset1[k]] += w1[k];
}
}
groupTile.sync();
if (groupTile.thread_rank() >= 8) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset1[k]] += w1[k];
}
}
groupTile.sync();
if (groupTile.thread_rank() < 8) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset2[k]] += w2[k];
}
}
groupTile.sync();
if (groupTile.thread_rank() >= 8) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset2[k]] += w2[k];
}
}
}
}
#endif
__forceinline__ __device__ void Reduce() {
Histogram -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 2 * 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BlockSize) {
float sum = 0;
#pragma unroll
for (int i = start; i < 64 * BlockSize; i += warpHistSize) {
sum += Histogram[i];
}
Histogram[start] = sum;
}
}
__syncthreads();
}
};
template<int BlockSize, bool IsFullPass, int M>
#if __CUDA_ARCH__ <= 350
__launch_bounds__(BlockSize, 1)
#else
__launch_bounds__(BlockSize)
#endif
__global__ void ComputeSplitPropertiesNonBinaryPairs6Bit(const TCFeature* feature, int fCount, const ui32* cindex,
const uint2* pairs, const float* weight,
const TDataPartition* partition,
int histLineSize,
float* histogram) {
const int featureOffset = (blockIdx.x / M) * 4;
feature += featureOffset;
cindex += feature->Offset;
fCount = min(fCount - featureOffset, 4);
__shared__ float localHist[64 * BlockSize];
const int maxBinCount = GetMaxBinCount(feature, fCount, (int*) &localHist[0]);
if (maxBinCount > 64 || (maxBinCount <= 32)) {
return;
}
__syncthreads();
if (IsFullPass) {
partition += blockIdx.y;
histogram += blockIdx.y * histLineSize * 4ULL;
} else {
const int depth = (int)log2((float)gridDim.y);
int partId = GetPairwisePartIdToCalculate(partition);
partition += partId;
histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * histLineSize * 4ULL;
}
if (partition->Size == 0) {
return;
}
constexpr int histBlockCount = 1;
constexpr int innerUnroll = TSixBitPairwiseHistUnrollTrait<IsFullPass>::InnerUnroll();
constexpr int outerUnroll = TSixBitPairwiseHistUnrollTrait<IsFullPass>::OuterUnroll();
#define DECLARE_PASS(NEED_MASK) \
{ \
using THist = TSixBitHistogram<BlockSize, NEED_MASK>;\
ComputePairHistogram< BlockSize, histBlockCount, innerUnroll, outerUnroll, M, THist>(partition->Offset, cindex, partition->Size, pairs, weight, &localHist[0]);\
}
if (maxBinCount < 64) {
DECLARE_PASS(false);
} else {
DECLARE_PASS(true);
}
#undef DECLARE_PASS
if (threadIdx.x < 128) {
const int histId = threadIdx.x & 3;
const int binId = (threadIdx.x >> 2) & 7;
const int fid = (threadIdx.x >> 5) & 3;
if (fid < fCount) {
const ui32 bfStart = feature[fid].FirstFoldIndex;
histogram += 4 * bfStart;
const int leqOrGeqShift = histId < 2 ? 0 : 8;
const int isSecond = (histId & 1);
for (int fold = binId; fold < feature[fid].Folds; fold += 8) {
const int readOffset = 32 * fold
+ 2 * fid
+ isSecond
+ leqOrGeqShift;
const float result = localHist[readOffset] + localHist[readOffset + 16];
if (M > 1) {
atomicAdd(histogram + 4 * fold + histId, result);
} else {
histogram[4 * fold + histId] += result;
}
}
}
}
}
void ComputePairwiseHistogramOneByte6Bits(const TCFeature* features,
const ui32 featureCount,
const ui32 sixBitsFeatureCount,
const ui32* compressedIndex,
const uint2* pairs, ui32 pairCount,
const float* weight,
const TDataPartition* partition,
ui32 partCount,
ui32 histLineSize,
bool fullPass,
float* histogram,
TCudaStream stream) {
if (sixBitsFeatureCount > 0) {
const int blockSize = 192;
dim3 numBlocks;
numBlocks.x = (sixBitsFeatureCount+ 3) / 4;
numBlocks.y = fullPass ? partCount : partCount / 4;
numBlocks.z = fullPass ? 1 : 3;
const ui32 blockPerFeatureMultiplier = EstimateBlockPerFeatureMultiplier(numBlocks, pairCount, 64);
numBlocks.x = (featureCount + 3) / 4;
numBlocks.x *= blockPerFeatureMultiplier;
#define NB_HIST(IS_FULL, BLOCKS_PER_FEATURE) \
ComputeSplitPropertiesNonBinaryPairs6Bit < blockSize, IS_FULL, BLOCKS_PER_FEATURE > << <numBlocks, blockSize, 0, stream>>>(\
features, featureCount, compressedIndex, pairs,\
weight, partition, histLineSize, histogram);
#define DISPATCH(BLOCKS_PER_FEATURE) \
if (fullPass) { \
NB_HIST(true, BLOCKS_PER_FEATURE) \
} else { \
NB_HIST(false, BLOCKS_PER_FEATURE)\
}
if (blockPerFeatureMultiplier == 1) {
DISPATCH(1);
} else if (blockPerFeatureMultiplier == 2) {
DISPATCH(2);
} else if (blockPerFeatureMultiplier == 4) {
DISPATCH(4);
} else if (blockPerFeatureMultiplier == 8) {
DISPATCH(8);
} else if (blockPerFeatureMultiplier == 16) {
DISPATCH(16);
} else if (blockPerFeatureMultiplier == 32) {
DISPATCH(32);
} else if (blockPerFeatureMultiplier == 64) {
DISPATCH(64);
} else {
exit(0);
}
#undef NB_HIST
#undef DISPATCH
}
}
}
| a2cb5f45089db581ce5de51d2ffbf40c7878cd4d.cu | #include "pairwise_hist.cuh"
#include "split_properties_helpers.cuh"
#include "compute_pair_hist_loop.cuh"
#include <cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <cstdio>
using namespace cooperative_groups;
namespace NKernel {
template<bool IsFullPass>
struct TSixBitPairwiseHistUnrollTrait {
static constexpr int InnerUnroll() {
#if __CUDA_ARCH__ <= 350
return 4;
#elif __CUDA_ARCH__ < 700
return 2;
#else
return 20;//IsFullPass ? 8 : 16;
#endif
}
static constexpr int OuterUnroll() {
#if __CUDA_ARCH__ <= 350
return 4;
#elif __CUDA_ARCH__ < 700
return IsFullPass ? 4 : 8;
#else
return 1;
#endif
}
};
template<int BlockSize, bool NeedLastBinMask /*is 32 histogram */>
struct TSixBitHistogram {
float* Histogram;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 2048 * (threadIdx.x / 32);
//2 blocks if INNER_HIST_BITS_COUNT = 0, else 1
// x4 feature and x4 histograms, though histStart = blockIdx * 16
return warpOffset + (threadIdx.x & 16);
}
__forceinline__ __device__ TSixBitHistogram(float* buff) {
Histogram = buff;
for (int i = threadIdx.x; i < BlockSize * 64; i += BlockSize) {
Histogram[i] = 0;
}
Histogram += SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPair(const ui32 ci1,
const ui32 ci2,
const float w) {
thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
const int shift = 4 * (threadIdx.x & 6);
const ui32 bins1 = RotateRight(flag ? ci2 : ci1, shift);
const ui32 bins2 = RotateRight(flag ? ci1 : ci2, shift);
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = (threadIdx.x + 2 * i) & 6;
int bin1 = (bins1 >> (24 - 8 * i)) & 255;
int bin2 = (bins2 >> (24 - 8 * i)) & 255;
const float w1 = (!NeedLastBinMask || bin1 < 64) ? w : 0;
const float w2 = (!NeedLastBinMask || bin2 < 64) ? w : 0;
const int tmp = ((bin1 >= bin2) == flag ? 0 : 8) + f;
int offset1 = tmp + ((bin1 & 63) << 5) + flag;
int offset2 = tmp + ((bin2 & 63) << 5) + !flag;
groupTile.sync();
if (groupTile.thread_rank() < 8) {
Histogram[offset1] += w1;
}
groupTile.sync();
if (groupTile.thread_rank() >= 8) {
Histogram[offset1] += w1;
}
groupTile.sync();
if (groupTile.thread_rank() < 8) {
Histogram[offset2] += w2;
}
groupTile.sync();
if (groupTile.thread_rank() >= 8) {
Histogram[offset2] += w2;
}
}
}
//
#if __CUDA_ARCH__ < 700
template <int N>
__forceinline__ __device__ void AddPairs(const ui32* ci1,
const ui32* ci2,
const float* w) {
#pragma unroll
for (int k = 0; k < N; ++k) {
AddPair(ci1[k], ci2[k], w[k]);
}
}
#else
template <int N>
__forceinline__ __device__ void AddPairs(const ui32* ci1,
const ui32* ci2,
const float* w) {
thread_block_tile<16> groupTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
const int shift = 4 * (threadIdx.x & 6);
ui32 bins1[N];
ui32 bins2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
bins1[k] = RotateRight(flag ? ci2[k] : ci1[k], shift);
bins2[k] = RotateRight(flag ? ci1[k] : ci2[k], shift);
}
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = (threadIdx.x + 2 * i) & 6;
int bin1[N];
int bin2[N];
#pragma unroll
for (int k = 0; k < N;++k) {
bin1[k] = (bins1[k] >> (24 - 8 * i)) & 255;
bin2[k] = (bins2[k] >> (24 - 8 * i)) & 255;
}
float w1[N];
float w2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
w1[k] = (!NeedLastBinMask || bin1[k] < 64) ? w[k] : 0;
w2[k] = (!NeedLastBinMask || bin2[k] < 64) ? w[k] : 0;
}
int offset1[N];
int offset2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
const int tmp = ((bin1[k] >= bin2[k]) == flag ? 0 : 8) + f;
offset1[k] = tmp + ((bin1[k] & 63) * 32) + flag;
offset2[k] = tmp + ((bin2[k] & 63) * 32) + !flag;
}
groupTile.sync();
if (groupTile.thread_rank() < 8) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset1[k]] += w1[k];
}
}
groupTile.sync();
if (groupTile.thread_rank() >= 8) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset1[k]] += w1[k];
}
}
groupTile.sync();
if (groupTile.thread_rank() < 8) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset2[k]] += w2[k];
}
}
groupTile.sync();
if (groupTile.thread_rank() >= 8) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset2[k]] += w2[k];
}
}
}
}
#endif
__forceinline__ __device__ void Reduce() {
Histogram -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 2 * 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BlockSize) {
float sum = 0;
#pragma unroll
for (int i = start; i < 64 * BlockSize; i += warpHistSize) {
sum += Histogram[i];
}
Histogram[start] = sum;
}
}
__syncthreads();
}
};
template<int BlockSize, bool IsFullPass, int M>
#if __CUDA_ARCH__ <= 350
__launch_bounds__(BlockSize, 1)
#else
__launch_bounds__(BlockSize)
#endif
__global__ void ComputeSplitPropertiesNonBinaryPairs6Bit(const TCFeature* feature, int fCount, const ui32* cindex,
const uint2* pairs, const float* weight,
const TDataPartition* partition,
int histLineSize,
float* histogram) {
const int featureOffset = (blockIdx.x / M) * 4;
feature += featureOffset;
cindex += feature->Offset;
fCount = min(fCount - featureOffset, 4);
__shared__ float localHist[64 * BlockSize];
const int maxBinCount = GetMaxBinCount(feature, fCount, (int*) &localHist[0]);
if (maxBinCount > 64 || (maxBinCount <= 32)) {
return;
}
__syncthreads();
if (IsFullPass) {
partition += blockIdx.y;
histogram += blockIdx.y * histLineSize * 4ULL;
} else {
const int depth = (int)log2((float)gridDim.y);
int partId = GetPairwisePartIdToCalculate(partition);
partition += partId;
histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * histLineSize * 4ULL;
}
if (partition->Size == 0) {
return;
}
constexpr int histBlockCount = 1;
constexpr int innerUnroll = TSixBitPairwiseHistUnrollTrait<IsFullPass>::InnerUnroll();
constexpr int outerUnroll = TSixBitPairwiseHistUnrollTrait<IsFullPass>::OuterUnroll();
#define DECLARE_PASS(NEED_MASK) \
{ \
using THist = TSixBitHistogram<BlockSize, NEED_MASK>;\
ComputePairHistogram< BlockSize, histBlockCount, innerUnroll, outerUnroll, M, THist>(partition->Offset, cindex, partition->Size, pairs, weight, &localHist[0]);\
}
if (maxBinCount < 64) {
DECLARE_PASS(false);
} else {
DECLARE_PASS(true);
}
#undef DECLARE_PASS
if (threadIdx.x < 128) {
const int histId = threadIdx.x & 3;
const int binId = (threadIdx.x >> 2) & 7;
const int fid = (threadIdx.x >> 5) & 3;
if (fid < fCount) {
const ui32 bfStart = feature[fid].FirstFoldIndex;
histogram += 4 * bfStart;
const int leqOrGeqShift = histId < 2 ? 0 : 8;
const int isSecond = (histId & 1);
for (int fold = binId; fold < feature[fid].Folds; fold += 8) {
const int readOffset = 32 * fold
+ 2 * fid
+ isSecond
+ leqOrGeqShift;
const float result = localHist[readOffset] + localHist[readOffset + 16];
if (M > 1) {
atomicAdd(histogram + 4 * fold + histId, result);
} else {
histogram[4 * fold + histId] += result;
}
}
}
}
}
void ComputePairwiseHistogramOneByte6Bits(const TCFeature* features,
const ui32 featureCount,
const ui32 sixBitsFeatureCount,
const ui32* compressedIndex,
const uint2* pairs, ui32 pairCount,
const float* weight,
const TDataPartition* partition,
ui32 partCount,
ui32 histLineSize,
bool fullPass,
float* histogram,
TCudaStream stream) {
if (sixBitsFeatureCount > 0) {
const int blockSize = 192;
dim3 numBlocks;
numBlocks.x = (sixBitsFeatureCount+ 3) / 4;
numBlocks.y = fullPass ? partCount : partCount / 4;
numBlocks.z = fullPass ? 1 : 3;
const ui32 blockPerFeatureMultiplier = EstimateBlockPerFeatureMultiplier(numBlocks, pairCount, 64);
numBlocks.x = (featureCount + 3) / 4;
numBlocks.x *= blockPerFeatureMultiplier;
#define NB_HIST(IS_FULL, BLOCKS_PER_FEATURE) \
ComputeSplitPropertiesNonBinaryPairs6Bit < blockSize, IS_FULL, BLOCKS_PER_FEATURE > << <numBlocks, blockSize, 0, stream>>>(\
features, featureCount, compressedIndex, pairs,\
weight, partition, histLineSize, histogram);
#define DISPATCH(BLOCKS_PER_FEATURE) \
if (fullPass) { \
NB_HIST(true, BLOCKS_PER_FEATURE) \
} else { \
NB_HIST(false, BLOCKS_PER_FEATURE)\
}
if (blockPerFeatureMultiplier == 1) {
DISPATCH(1);
} else if (blockPerFeatureMultiplier == 2) {
DISPATCH(2);
} else if (blockPerFeatureMultiplier == 4) {
DISPATCH(4);
} else if (blockPerFeatureMultiplier == 8) {
DISPATCH(8);
} else if (blockPerFeatureMultiplier == 16) {
DISPATCH(16);
} else if (blockPerFeatureMultiplier == 32) {
DISPATCH(32);
} else if (blockPerFeatureMultiplier == 64) {
DISPATCH(64);
} else {
exit(0);
}
#undef NB_HIST
#undef DISPATCH
}
}
}
|
feb9778ea3874a21da3bacdb7c52e9695b4b082f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** @file
* @brief Definition of CudaIntFloatFilter class.
*
* @author Jan Bobek
*/
#include "edetect.hxx"
#include "IImage.hxx"
#include "cuda/CudaError.hxx"
#include "cuda/CudaIntFloatFilter.hxx"
/**
* @brief CUDA kernel converting integer-pixels to float-pixels.
*
* @param[out] ddata
* The destination image data.
* @param[in] dstride
* Size of the row stride in destination data.
* @param[in] sdata
* The source image data.
* @param[in] sstride
* Size of the row stride in source data.
* @param[in] rows
* Number of rows in the image.
* @param[in] cols
* Number of columns in the image.
*/
__global__ void
convertInt2FloatKernel(
unsigned char* ddata,
unsigned int dstride,
const unsigned char* sdata,
unsigned int sstride,
unsigned int rows,
unsigned int cols
)
{
const unsigned int col =
blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int row =
blockIdx.y * blockDim.y + threadIdx.y;
if( !(row < rows && col < cols) )
return;
float* const dstp =
(float*)(ddata + row * dstride) + col;
const unsigned char* const srcp =
sdata + row * sstride + col;
*dstp = *srcp / 255.0f;
}
/**
* @brief CUDA kernel converting float-pixels to integer-pixels.
*
* @param[out] ddata
* The destination image data.
* @param[in] dstride
* Size of the row stride in destination data.
* @param[in] sdata
* The source image data.
* @param[in] sstride
* Size of the row stride in source data.
* @param[in] rows
* Number of rows in the image.
* @param[in] cols
* Number of columns in the image.
*/
__global__ void
convertFloat2IntKernel(
unsigned char* ddata,
unsigned int dstride,
const unsigned char* sdata,
unsigned int sstride,
unsigned int rows,
unsigned int cols
)
{
const unsigned int col =
blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int row =
blockIdx.y * blockDim.y + threadIdx.y;
if( !(row < rows && col < cols) )
return;
unsigned char* const dstp =
ddata + row * dstride + col;
const float* const srcp =
(const float*)(sdata + row * sstride) + col;
*dstp = (unsigned char)(__saturatef(*srcp) * 255.0f);
}
/*************************************************************************/
/* CudaIntFloatFilter */
/*************************************************************************/
void
CudaIntFloatFilter::convertInt2Float(
IImage& dest,
const IImage& src
)
{
const unsigned int columns =
src.columns() * Image::channels( src.format() );
// 32 = warp size, 8 * 32 = 256 threads
const dim3 threadsPerBlock(32, 8);
const dim3 numBlocks(
(columns + threadsPerBlock.x - 1) / threadsPerBlock.x,
(src.rows() + threadsPerBlock.y - 1) / threadsPerBlock.y );
hipLaunchKernelGGL(( convertInt2FloatKernel), dim3(numBlocks), dim3(threadsPerBlock) , 0, 0,
dest.data(), dest.stride(),
src.data(), src.stride(),
src.rows(), columns
);
cudaCheckLastError( "CudaIntFloatFilter: Int2Float kernel launch failed" );
cudaMsgCheckError( hipDeviceSynchronize(), "CudaIntFloatFilter: Int2Float kernel run failed" );
}
void
CudaIntFloatFilter::convertFloat2Int(
IImage& dest,
const IImage& src
)
{
const unsigned int columns =
src.columns() * Image::channels( src.format() );
// 32 = warp size, 8 * 32 = 256 threads
const dim3 threadsPerBlock(32, 8);
const dim3 numBlocks(
(columns + threadsPerBlock.x - 1) / threadsPerBlock.x,
(src.rows() + threadsPerBlock.y - 1) / threadsPerBlock.y );
hipLaunchKernelGGL(( convertFloat2IntKernel), dim3(numBlocks), dim3(threadsPerBlock) , 0, 0,
dest.data(), dest.stride(),
src.data(), src.stride(),
src.rows(), columns
);
cudaCheckLastError( "CudaIntFloatFilter: Float2Int kernel launch failed" );
cudaMsgCheckError( hipDeviceSynchronize(), "CudaIntFloatFilter: Float2Int kernel run failed" );
}
| feb9778ea3874a21da3bacdb7c52e9695b4b082f.cu | /** @file
* @brief Definition of CudaIntFloatFilter class.
*
* @author Jan Bobek
*/
#include "edetect.hxx"
#include "IImage.hxx"
#include "cuda/CudaError.hxx"
#include "cuda/CudaIntFloatFilter.hxx"
/**
* @brief CUDA kernel converting integer-pixels to float-pixels.
*
* @param[out] ddata
* The destination image data.
* @param[in] dstride
* Size of the row stride in destination data.
* @param[in] sdata
* The source image data.
* @param[in] sstride
* Size of the row stride in source data.
* @param[in] rows
* Number of rows in the image.
* @param[in] cols
* Number of columns in the image.
*/
__global__ void
convertInt2FloatKernel(
unsigned char* ddata,
unsigned int dstride,
const unsigned char* sdata,
unsigned int sstride,
unsigned int rows,
unsigned int cols
)
{
const unsigned int col =
blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int row =
blockIdx.y * blockDim.y + threadIdx.y;
if( !(row < rows && col < cols) )
return;
float* const dstp =
(float*)(ddata + row * dstride) + col;
const unsigned char* const srcp =
sdata + row * sstride + col;
*dstp = *srcp / 255.0f;
}
/**
* @brief CUDA kernel converting float-pixels to integer-pixels.
*
* @param[out] ddata
* The destination image data.
* @param[in] dstride
* Size of the row stride in destination data.
* @param[in] sdata
* The source image data.
* @param[in] sstride
* Size of the row stride in source data.
* @param[in] rows
* Number of rows in the image.
* @param[in] cols
* Number of columns in the image.
*/
__global__ void
convertFloat2IntKernel(
unsigned char* ddata,
unsigned int dstride,
const unsigned char* sdata,
unsigned int sstride,
unsigned int rows,
unsigned int cols
)
{
const unsigned int col =
blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int row =
blockIdx.y * blockDim.y + threadIdx.y;
if( !(row < rows && col < cols) )
return;
unsigned char* const dstp =
ddata + row * dstride + col;
const float* const srcp =
(const float*)(sdata + row * sstride) + col;
*dstp = (unsigned char)(__saturatef(*srcp) * 255.0f);
}
/*************************************************************************/
/* CudaIntFloatFilter */
/*************************************************************************/
void
CudaIntFloatFilter::convertInt2Float(
IImage& dest,
const IImage& src
)
{
const unsigned int columns =
src.columns() * Image::channels( src.format() );
// 32 = warp size, 8 * 32 = 256 threads
const dim3 threadsPerBlock(32, 8);
const dim3 numBlocks(
(columns + threadsPerBlock.x - 1) / threadsPerBlock.x,
(src.rows() + threadsPerBlock.y - 1) / threadsPerBlock.y );
convertInt2FloatKernel<<< numBlocks, threadsPerBlock >>>(
dest.data(), dest.stride(),
src.data(), src.stride(),
src.rows(), columns
);
cudaCheckLastError( "CudaIntFloatFilter: Int2Float kernel launch failed" );
cudaMsgCheckError( cudaDeviceSynchronize(), "CudaIntFloatFilter: Int2Float kernel run failed" );
}
void
CudaIntFloatFilter::convertFloat2Int(
IImage& dest,
const IImage& src
)
{
const unsigned int columns =
src.columns() * Image::channels( src.format() );
// 32 = warp size, 8 * 32 = 256 threads
const dim3 threadsPerBlock(32, 8);
const dim3 numBlocks(
(columns + threadsPerBlock.x - 1) / threadsPerBlock.x,
(src.rows() + threadsPerBlock.y - 1) / threadsPerBlock.y );
convertFloat2IntKernel<<< numBlocks, threadsPerBlock >>>(
dest.data(), dest.stride(),
src.data(), src.stride(),
src.rows(), columns
);
cudaCheckLastError( "CudaIntFloatFilter: Float2Int kernel launch failed" );
cudaMsgCheckError( cudaDeviceSynchronize(), "CudaIntFloatFilter: Float2Int kernel run failed" );
}
|
fdb70d154835c2852287dda184e41a87a07837d6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* GridTools
*
* Copyright (c) 2014-2021, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <gridtools/fn/backend/gpu.hpp>
#include <gtest/gtest.h>
#include <gridtools/fn/column_stage.hpp>
#include <gridtools/sid/composite.hpp>
#include <gridtools/sid/synthetic.hpp>
#include <cuda_test_helper.hpp>
namespace gridtools::fn::backend {
namespace {
using namespace literals;
using sid::property;
template <int I>
using int_t = integral_constant<int, I>;
struct sum_scan : fwd {
static GT_FUNCTION constexpr auto body() {
return scan_pass(
[](auto acc, auto const &iter) { return tuple(get<0>(acc) + *iter, get<1>(acc) * *iter); },
[](auto acc) { return get<0>(acc); });
}
};
struct make_iterator_mock {
GT_FUNCTION auto operator()() const {
return
[](auto tag, auto const &ptr, auto const &strides) { return device::at_key<decltype(tag)>(ptr); };
}
};
TEST(backend_gpu, apply_column_stage) {
auto in = cuda_util::cuda_malloc<int>(5 * 7 * 3);
auto out = cuda_util::cuda_malloc<int>(5 * 7 * 3);
int inh[5][7][3], outh[5][7][3] = {};
for (int i = 0; i < 5; ++i)
for (int j = 0; j < 7; ++j)
for (int k = 0; k < 3; ++k)
inh[i][j][k] = 21 * i + 3 * j + k;
hipMemcpy(in.get(), inh, 5 * 7 * 3 * sizeof(int), hipMemcpyHostToDevice);
auto as_synthetic = [](int *x) {
return sid::synthetic()
.set<property::origin>(sid::host_device::simple_ptr_holder(x))
.set<property::strides>(tuple(21_c, 3_c, 1_c));
};
auto composite =
sid::composite::keys<int_t<0>, int_t<1>>::make_values(as_synthetic(out.get()), as_synthetic(in.get()));
auto sizes = hymap::keys<int_t<0>, int_t<1>, int_t<2>>::values<int_t<5>, int_t<7>, int_t<3>>();
column_stage<int_t<1>, sum_scan, 0, 1> cs;
using block_sizes_t = meta::list<meta::list<int_t<0>, int_t<4>>, meta::list<int_t<2>, int_t<2>>>;
apply_column_stage(
gpu<block_sizes_t>(), sizes, cs, make_iterator_mock(), composite, int_t<1>(), tuple(42, 1));
hipMemcpy(outh, out.get(), 5 * 7 * 3 * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < 5; ++i)
for (int k = 0; k < 3; ++k) {
int res = 42;
for (int j = 0; j < 7; ++j) {
res += inh[i][j][k];
EXPECT_EQ(outh[i][j][k], res);
}
}
}
TEST(backend_gpu, apply_column_stage_1d) {
auto in = cuda_util::cuda_malloc<int>(5);
auto out = cuda_util::cuda_malloc<int>(5);
int inh[5], outh[5] = {};
for (int i = 0; i < 5; ++i)
inh[i] = i;
hipMemcpy(in.get(), inh, 5 * sizeof(int), hipMemcpyHostToDevice);
auto as_synthetic = [](int *x) {
return sid::synthetic()
.set<property::origin>(sid::host_device::simple_ptr_holder(x))
.set<property::strides>(tuple(1_c));
};
auto composite =
sid::composite::keys<int_t<0>, int_t<1>>::make_values(as_synthetic(out.get()), as_synthetic(in.get()));
auto sizes = hymap::keys<int_t<0>>::values<int_t<5>>();
column_stage<int_t<0>, sum_scan, 0, 1> cs;
using block_sizes_t = meta::list<meta::list<int_t<0>, int_t<4>>, meta::list<int_t<2>, int_t<2>>>;
apply_column_stage(
gpu<block_sizes_t>(), sizes, cs, make_iterator_mock(), composite, int_t<0>(), tuple(42, 1));
hipMemcpy(outh, out.get(), 5 * sizeof(int), hipMemcpyDeviceToHost);
int res = 42;
for (int i = 0; i < 5; ++i) {
res += inh[i];
EXPECT_EQ(outh[i], res);
}
}
TEST(backend_gpu, apply_column_stage_5d) {
auto in = cuda_util::cuda_malloc<int>(5 * 7 * 3 * 2 * 3);
auto out = cuda_util::cuda_malloc<int>(5 * 7 * 3 * 2 * 3);
int inh[5][7][3][2][3], outh[5][7][3][2][3] = {};
for (int i = 0; i < 5; ++i)
for (int j = 0; j < 7; ++j)
for (int k = 0; k < 3; ++k)
for (int l = 0; l < 2; ++l)
for (int m = 0; m < 3; ++m)
inh[i][j][k][l][m] = 126 * i + 18 * j + 6 * k + 3 * l + m;
hipMemcpy(in.get(), inh, 5 * 7 * 3 * 2 * 3 * sizeof(int), hipMemcpyHostToDevice);
auto as_synthetic = [](int *x) {
return sid::synthetic()
.set<property::origin>(sid::host_device::simple_ptr_holder(x))
.set<property::strides>(tuple(126_c, 18_c, 6_c, 3_c, 1_c));
};
auto composite =
sid::composite::keys<int_t<0>, int_t<1>>::make_values(as_synthetic(out.get()), as_synthetic(in.get()));
auto sizes = hymap::keys<int_t<0>, int_t<1>, int_t<2>, int_t<3>, int_t<4>>::
values<int_t<5>, int_t<7>, int_t<3>, int_t<2>, int_t<3>>();
column_stage<int_t<1>, sum_scan, 0, 1> cs;
using block_sizes_t = meta::list<meta::list<int_t<0>, int_t<4>>,
meta::list<int_t<2>, int_t<2>>,
meta::list<int_t<3>, int_t<2>>,
meta::list<int_t<4>, int_t<1>>>;
apply_column_stage(
gpu<block_sizes_t>(), sizes, cs, make_iterator_mock(), composite, int_t<1>(), tuple(42, 1));
hipMemcpy(outh, out.get(), 5 * 7 * 3 * 2 * 3 * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < 5; ++i)
for (int k = 0; k < 3; ++k) {
for (int l = 0; l < 2; ++l) {
for (int m = 0; m < 3; ++m) {
int res = 42;
for (int j = 0; j < 7; ++j) {
res += inh[i][j][k][l][m];
EXPECT_EQ(outh[i][j][k][l][m], res);
}
}
}
}
}
struct global_tmp_check_fun {
template <class PtrHolder, class Strides>
GT_FUNCTION bool operator()(PtrHolder ptr_holder, Strides strides) const {
auto ptr = ptr_holder();
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 7; ++j) {
for (int k = 0; k < 3; ++k) {
*ptr = 21 * i + 3 * j + k;
sid::shift(ptr, sid::get_stride<int_t<2>>(strides), 1_c);
}
sid::shift(ptr, sid::get_stride<int_t<2>>(strides), -3_c);
sid::shift(ptr, sid::get_stride<int_t<1>>(strides), 1_c);
}
sid::shift(ptr, sid::get_stride<int_t<1>>(strides), -7_c);
sid::shift(ptr, sid::get_stride<int_t<0>>(strides), 1_c);
}
sid::shift(ptr, sid::get_stride<int_t<0>>(strides), -5_c);
bool correct = true;
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 7; ++j) {
for (int k = 0; k < 3; ++k) {
correct &= *ptr == 21 * i + 3 * j + k;
sid::shift(ptr, sid::get_stride<int_t<2>>(strides), 1_c);
}
sid::shift(ptr, sid::get_stride<int_t<2>>(strides), -3_c);
sid::shift(ptr, sid::get_stride<int_t<1>>(strides), 1_c);
}
sid::shift(ptr, sid::get_stride<int_t<1>>(strides), -7_c);
sid::shift(ptr, sid::get_stride<int_t<0>>(strides), 1_c);
}
return correct;
}
};
TEST(backend_gpu, global_tmp) {
using block_sizes_t = meta::list<meta::list<int_t<0>, int_t<4>>, meta::list<int_t<2>, int_t<2>>>;
auto alloc = tmp_allocator(gpu<block_sizes_t>());
auto sizes = hymap::keys<int_t<0>, int_t<1>, int_t<2>>::values<int_t<5>, int_t<7>, int_t<3>>();
auto tmp = allocate_global_tmp(alloc, sizes, data_type<int>());
static_assert(sid::is_sid<decltype(tmp)>());
auto ptr_holder = sid::get_origin(tmp);
auto strides = sid::get_strides(tmp);
bool success = on_device::exec(global_tmp_check_fun(), ptr_holder, strides);
EXPECT_TRUE(success);
}
} // namespace
} // namespace gridtools::fn::backend
| fdb70d154835c2852287dda184e41a87a07837d6.cu | /*
* GridTools
*
* Copyright (c) 2014-2021, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <gridtools/fn/backend/gpu.hpp>
#include <gtest/gtest.h>
#include <gridtools/fn/column_stage.hpp>
#include <gridtools/sid/composite.hpp>
#include <gridtools/sid/synthetic.hpp>
#include <cuda_test_helper.hpp>
namespace gridtools::fn::backend {
namespace {
using namespace literals;
using sid::property;
template <int I>
using int_t = integral_constant<int, I>;
struct sum_scan : fwd {
static GT_FUNCTION constexpr auto body() {
return scan_pass(
[](auto acc, auto const &iter) { return tuple(get<0>(acc) + *iter, get<1>(acc) * *iter); },
[](auto acc) { return get<0>(acc); });
}
};
struct make_iterator_mock {
GT_FUNCTION auto operator()() const {
return
[](auto tag, auto const &ptr, auto const &strides) { return device::at_key<decltype(tag)>(ptr); };
}
};
TEST(backend_gpu, apply_column_stage) {
auto in = cuda_util::cuda_malloc<int>(5 * 7 * 3);
auto out = cuda_util::cuda_malloc<int>(5 * 7 * 3);
int inh[5][7][3], outh[5][7][3] = {};
for (int i = 0; i < 5; ++i)
for (int j = 0; j < 7; ++j)
for (int k = 0; k < 3; ++k)
inh[i][j][k] = 21 * i + 3 * j + k;
cudaMemcpy(in.get(), inh, 5 * 7 * 3 * sizeof(int), cudaMemcpyHostToDevice);
auto as_synthetic = [](int *x) {
return sid::synthetic()
.set<property::origin>(sid::host_device::simple_ptr_holder(x))
.set<property::strides>(tuple(21_c, 3_c, 1_c));
};
auto composite =
sid::composite::keys<int_t<0>, int_t<1>>::make_values(as_synthetic(out.get()), as_synthetic(in.get()));
auto sizes = hymap::keys<int_t<0>, int_t<1>, int_t<2>>::values<int_t<5>, int_t<7>, int_t<3>>();
column_stage<int_t<1>, sum_scan, 0, 1> cs;
using block_sizes_t = meta::list<meta::list<int_t<0>, int_t<4>>, meta::list<int_t<2>, int_t<2>>>;
apply_column_stage(
gpu<block_sizes_t>(), sizes, cs, make_iterator_mock(), composite, int_t<1>(), tuple(42, 1));
cudaMemcpy(outh, out.get(), 5 * 7 * 3 * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < 5; ++i)
for (int k = 0; k < 3; ++k) {
int res = 42;
for (int j = 0; j < 7; ++j) {
res += inh[i][j][k];
EXPECT_EQ(outh[i][j][k], res);
}
}
}
TEST(backend_gpu, apply_column_stage_1d) {
auto in = cuda_util::cuda_malloc<int>(5);
auto out = cuda_util::cuda_malloc<int>(5);
int inh[5], outh[5] = {};
for (int i = 0; i < 5; ++i)
inh[i] = i;
cudaMemcpy(in.get(), inh, 5 * sizeof(int), cudaMemcpyHostToDevice);
auto as_synthetic = [](int *x) {
return sid::synthetic()
.set<property::origin>(sid::host_device::simple_ptr_holder(x))
.set<property::strides>(tuple(1_c));
};
auto composite =
sid::composite::keys<int_t<0>, int_t<1>>::make_values(as_synthetic(out.get()), as_synthetic(in.get()));
auto sizes = hymap::keys<int_t<0>>::values<int_t<5>>();
column_stage<int_t<0>, sum_scan, 0, 1> cs;
using block_sizes_t = meta::list<meta::list<int_t<0>, int_t<4>>, meta::list<int_t<2>, int_t<2>>>;
apply_column_stage(
gpu<block_sizes_t>(), sizes, cs, make_iterator_mock(), composite, int_t<0>(), tuple(42, 1));
cudaMemcpy(outh, out.get(), 5 * sizeof(int), cudaMemcpyDeviceToHost);
int res = 42;
for (int i = 0; i < 5; ++i) {
res += inh[i];
EXPECT_EQ(outh[i], res);
}
}
TEST(backend_gpu, apply_column_stage_5d) {
auto in = cuda_util::cuda_malloc<int>(5 * 7 * 3 * 2 * 3);
auto out = cuda_util::cuda_malloc<int>(5 * 7 * 3 * 2 * 3);
int inh[5][7][3][2][3], outh[5][7][3][2][3] = {};
for (int i = 0; i < 5; ++i)
for (int j = 0; j < 7; ++j)
for (int k = 0; k < 3; ++k)
for (int l = 0; l < 2; ++l)
for (int m = 0; m < 3; ++m)
inh[i][j][k][l][m] = 126 * i + 18 * j + 6 * k + 3 * l + m;
cudaMemcpy(in.get(), inh, 5 * 7 * 3 * 2 * 3 * sizeof(int), cudaMemcpyHostToDevice);
auto as_synthetic = [](int *x) {
return sid::synthetic()
.set<property::origin>(sid::host_device::simple_ptr_holder(x))
.set<property::strides>(tuple(126_c, 18_c, 6_c, 3_c, 1_c));
};
auto composite =
sid::composite::keys<int_t<0>, int_t<1>>::make_values(as_synthetic(out.get()), as_synthetic(in.get()));
auto sizes = hymap::keys<int_t<0>, int_t<1>, int_t<2>, int_t<3>, int_t<4>>::
values<int_t<5>, int_t<7>, int_t<3>, int_t<2>, int_t<3>>();
column_stage<int_t<1>, sum_scan, 0, 1> cs;
using block_sizes_t = meta::list<meta::list<int_t<0>, int_t<4>>,
meta::list<int_t<2>, int_t<2>>,
meta::list<int_t<3>, int_t<2>>,
meta::list<int_t<4>, int_t<1>>>;
apply_column_stage(
gpu<block_sizes_t>(), sizes, cs, make_iterator_mock(), composite, int_t<1>(), tuple(42, 1));
cudaMemcpy(outh, out.get(), 5 * 7 * 3 * 2 * 3 * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < 5; ++i)
for (int k = 0; k < 3; ++k) {
for (int l = 0; l < 2; ++l) {
for (int m = 0; m < 3; ++m) {
int res = 42;
for (int j = 0; j < 7; ++j) {
res += inh[i][j][k][l][m];
EXPECT_EQ(outh[i][j][k][l][m], res);
}
}
}
}
}
struct global_tmp_check_fun {
template <class PtrHolder, class Strides>
GT_FUNCTION bool operator()(PtrHolder ptr_holder, Strides strides) const {
auto ptr = ptr_holder();
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 7; ++j) {
for (int k = 0; k < 3; ++k) {
*ptr = 21 * i + 3 * j + k;
sid::shift(ptr, sid::get_stride<int_t<2>>(strides), 1_c);
}
sid::shift(ptr, sid::get_stride<int_t<2>>(strides), -3_c);
sid::shift(ptr, sid::get_stride<int_t<1>>(strides), 1_c);
}
sid::shift(ptr, sid::get_stride<int_t<1>>(strides), -7_c);
sid::shift(ptr, sid::get_stride<int_t<0>>(strides), 1_c);
}
sid::shift(ptr, sid::get_stride<int_t<0>>(strides), -5_c);
bool correct = true;
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 7; ++j) {
for (int k = 0; k < 3; ++k) {
correct &= *ptr == 21 * i + 3 * j + k;
sid::shift(ptr, sid::get_stride<int_t<2>>(strides), 1_c);
}
sid::shift(ptr, sid::get_stride<int_t<2>>(strides), -3_c);
sid::shift(ptr, sid::get_stride<int_t<1>>(strides), 1_c);
}
sid::shift(ptr, sid::get_stride<int_t<1>>(strides), -7_c);
sid::shift(ptr, sid::get_stride<int_t<0>>(strides), 1_c);
}
return correct;
}
};
TEST(backend_gpu, global_tmp) {
using block_sizes_t = meta::list<meta::list<int_t<0>, int_t<4>>, meta::list<int_t<2>, int_t<2>>>;
auto alloc = tmp_allocator(gpu<block_sizes_t>());
auto sizes = hymap::keys<int_t<0>, int_t<1>, int_t<2>>::values<int_t<5>, int_t<7>, int_t<3>>();
auto tmp = allocate_global_tmp(alloc, sizes, data_type<int>());
static_assert(sid::is_sid<decltype(tmp)>());
auto ptr_holder = sid::get_origin(tmp);
auto strides = sid::get_strides(tmp);
bool success = on_device::exec(global_tmp_check_fun(), ptr_holder, strides);
EXPECT_TRUE(success);
}
} // namespace
} // namespace gridtools::fn::backend
|
0b3dd76a84f351a6e4d9d79560509e1a8cf1c5da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "opencv2/opencv.hpp"
#include <Windows.h>
#include <time.h>
using namespace cv;
using namespace std;
//texture that stores the input image data
texture<uchar, 2, hipReadModeElementType> src;
//bools that keep track if the user wants to save the outputs or an error occurred.
bool saveimage;
bool savevideo;
bool record;
bool failedOutput;
bool nocam;
bool fpsfail;
/*5x5 disk structuring element = {0, 1, 1, 1, 0},
{1, 1, 1, 1, 1},
{1, 1, 1, 1, 1},
{1, 1, 1, 1, 1},
{0, 1, 1, 1, 0}*/
__global__ void laplacian_texture(uchar *dev_lap, int rows, int cols) {
int tidx = threadIdx.x + blockIdx.x*blockDim.x;
int tidy = threadIdx.y + blockIdx.y*blockDim.y;
uchar max = 0;
uchar min = 255;
if (tidx >= cols || tidy >= rows) {
return;
}
//loop through the 25 elements that the structuring element covers and keep track of the maximum and minimum value;
for (int i = tidy - 2; i <= tidy + 2; i++) {
for (int j = tidx - 6; j <= tidx + 6; j += 3) {
if (i < 0 || i >= rows || j < 0 || j >= cols || ((i == tidy - 2) && (j == tidx - 6)) || ((i == tidy - 2) && (j == tidx + 6)) || ((i == tidy + 2) && (j == tidx - 6)) || ((i == tidy + 2) && (j == tidx + 6))) {
continue;
}
uchar current = tex2D(src, j, i);
if (current > max) {
max = current;
}
if (current < min) {
min = current;
}
}
}
//perform the laplacian at the current pixel
uchar original = tex2D(src, tidx, tidy);
if ((max - original) < (original - min)) {
dev_lap[tidy * cols + tidx] = 0;
}
else {
dev_lap[tidy * cols + tidx] = (max - original) - (original - min);
}
}
__global__ void laplacian_simple(uchar *dev_data, uchar *dev_lap, int total_pixels, int cols) {
//threadID provides every thread that runs on the GPU an individual value. Every thread works on a pixel in each color channel.
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
uchar max = 0;
uchar min = 255;
//Since the arrays are 1 dimensional the right_edge and left_edge make sure a pixel from a seperate row is not accessed.
int right_edge = 0;
int left_edge = 0;
/*If the image has more pixels than total threads running on the GPU then the thread also works on the next pixel that
would have been missed*/
for (threadID; threadID < total_pixels; threadID += blockDim.x * gridDim.x) {
for (int row = threadID - (2 * cols); row <= threadID + (2 * cols); row += cols) {
right_edge = cols * ((row / cols) + 1);
left_edge = cols * (row / cols);
for (int pos = row - 6; pos <= row + 6; pos+=3) {
if (row < 0 || row >= total_pixels || pos < left_edge || pos >= right_edge || ((row == threadID - (2 * cols)) && (pos == row - 6)) || ((row == threadID - (2 * cols)) && (pos == row + 6)) || ((row == threadID + (2 * cols)) && (pos == row - 6)) || ((row == threadID + (2 * cols)) && (pos == row + 6))) {
continue;
}
//Calculates the maximum and minimum within the area that the structuring element covers at the current pixel.
uchar current = dev_data[pos];
if (current > max) {
max = current;
}
if (current < min) {
min = current;
}
}
}
/*Calculates the dilation - the erosion of the current pixel to get the laplacian.
If the dilation is less than the erosion then the pixel is set to 0 to prevent an overflow*/
uchar original = dev_data[threadID];
if ((max - original) < (original - min)) {
dev_lap[threadID] = 0;
}
else {
dev_lap[threadID] = (max - original) - (original - min);
}
//Reset the maximum and minimum storage for the next pixel
max = 0;
min = 255;
}
}
//Used when the user inputs a video file but does not want to save the output
void videoNoSave() {
//code to make the open file dialog box appear
OPENFILENAME ofn; // common dialog box structure
char szFile[520]; // buffer for file name
HWND hwnd = NULL; // owner window
HANDLE hf; // file handle
// Initialize OPENFILENAME
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hwnd;
ofn.lpstrFile = szFile;
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
ofn.lpstrFilter = "*.avi, *.divx\0*.avi;*.divx;\0\0*\0\0";
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = ".";
ofn.Flags = OFN_PATHMUSTEXIST | OFN_FILEMUSTEXIST;
// Display the Open dialog box.
if (GetOpenFileName(&ofn) == TRUE)
hf = CreateFile(ofn.lpstrFile,
GENERIC_READ,
0,
(LPSECURITY_ATTRIBUTES)NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
(HANDLE)NULL);
if (strlen(ofn.lpstrFile) == 0) {
return;
}
for (int i = 0, int j = 0; i <= strlen(ofn.lpstrFile); i++, j++) {
if (ofn.lpstrFile[i] == '\\') {
ofn.lpstrFile[i] = '/';
}
}
//close the handle because the open file dialog box had a handle on the file which would not allow videocapture to read it
CloseHandle(hf);
VideoCapture cap(ofn.lpstrFile);
double fps = cap.get(CV_CAP_PROP_FPS);
Mat frame;
Mat lap_frame;
namedWindow("Laplacian", 1);
namedWindow("Original", 1);
HWND LAPhwnd = (HWND)cvGetWindowHandle("Laplacian");
HWND ORIhwnd = (HWND)cvGetWindowHandle("Original");
hipArray *dev_data;
uchar *dev_lap;
dim3 gridsize, blocksize;
/*Clamp address mode means that if a value that is outside of the texture array is accessed then instead of
seg faulting the nearest value along the endge is looked at. This is great for this program because the elements
along that would already be part of the structuring element*/
src.addressMode[0] = hipAddressModeClamp;
src.addressMode[1] = hipAddressModeClamp;
if (cap.isOpened() && IsWindowVisible(LAPhwnd)) {
//malloc and calculate constants here to refrain from taking up time during the video loop.
cap >> frame;
lap_frame = frame.clone();
blocksize.x = 32;
blocksize.y = 32;
gridsize.x = ceil(float(3 * frame.cols) / blocksize.x);
gridsize.y = ceil(float(frame.rows) / blocksize.y);
hipMallocArray(&dev_data, &src.channelDesc, 3 * frame.cols, frame.rows);
hipMalloc((void**)&dev_lap, 3 * frame.rows * frame.cols * sizeof(uchar));
}
int size = 3 * frame.cols * frame.rows * sizeof(uchar);
while (cap.isOpened() && IsWindowVisible(LAPhwnd)) {
//Allow the user to close the original video, but keep playing the morphological operation.
//If the user closes the laplacian video then close the rest of the windows as well.
if (IsWindowVisible(ORIhwnd)) {
imshow("Original", frame);
}
hipMemcpyToArray(dev_data, 0, 0, frame.data, size, hipMemcpyHostToDevice);
hipBindTextureToArray(src, dev_data, src.channelDesc);
laplacian_texture << <gridsize, blocksize >> >(dev_lap, frame.rows, 3 * frame.cols);
hipMemcpy(lap_frame.data, dev_lap, size, hipMemcpyDeviceToHost);
imshow("Laplacian", lap_frame);
waitKey(1000 / fps);
cap >> frame; // get a new frame from camera
//If we reached the end of the video then clean up.
if (frame.empty()) {
destroyAllWindows();
break;
}
}
//If the laplacian window was closed then close the original as well
if (IsWindowVisible(ORIhwnd)) {
destroyAllWindows();
}
hipUnbindTexture(src);
hipFree(dev_data);
hipFree(dev_lap);
cap.release();
}
//Very similar to video without save except for the fact that this one has saving involved
void videoSave() {
OPENFILENAME ofn; // common dialog box structure
char szFile[520]; // buffer for file name
HWND hwnd = NULL; // owner window
HANDLE hf; // file handle
// Initialize OPENFILENAME
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hwnd;
ofn.lpstrFile = szFile;
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
ofn.lpstrFilter = "*.avi, *.divx\0*.avi;*.divx;\0\0*\0\0";
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = ".";
ofn.Flags = OFN_PATHMUSTEXIST | OFN_FILEMUSTEXIST;
// Display the Open dialog box.
if (GetOpenFileName(&ofn) == TRUE)
hf = CreateFile(ofn.lpstrFile,
GENERIC_READ,
0,
(LPSECURITY_ATTRIBUTES)NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
(HANDLE)NULL);
if (strlen(ofn.lpstrFile) == 0) {
return;
}
for (int i = 0, int j = 0; i <= strlen(ofn.lpstrFile); i++, j++) {
if (ofn.lpstrFile[i] == '\\') {
ofn.lpstrFile[i] = '/';
}
}
CloseHandle(hf);
VideoCapture cap(ofn.lpstrFile);
Mat frame;
Mat lap_frame;
OPENFILENAME sfn;
char syFile[520];
ZeroMemory(&sfn, sizeof(sfn));
sfn.lStructSize = sizeof(sfn);
sfn.hwndOwner = NULL;
sfn.lpstrFile = syFile;
sfn.lpstrFile[0] = '\0';
sfn.nMaxFile = sizeof(syFile);
sfn.lpstrFilter = "*.avi\0*.avi;\0\0*\0";
sfn.nFilterIndex = 1;
sfn.lpstrFileTitle = NULL;
sfn.nMaxFileTitle = 0;
sfn.lpstrInitialDir = ".";
sfn.Flags = OFN_PATHMUSTEXIST | OFN_OVERWRITEPROMPT | OFN_EXPLORER | OFN_ENABLEHOOK;
sfn.lpstrDefExt = "avi";
if (GetSaveFileName(&sfn) != true)
{
//do nothing
}
else {
for (int i = 0, int j = 0; i <= strlen(sfn.lpstrFile); i++, j++) {
if (sfn.lpstrFile[i] == '\\') {
sfn.lpstrFile[i] = '/';
}
}
remove(sfn.lpstrFile);
double fps = cap.get(CV_CAP_PROP_FPS);
VideoWriter output_cap(sfn.lpstrFile, -1, fps, Size(cap.get(CV_CAP_PROP_FRAME_WIDTH), cap.get(CV_CAP_PROP_FRAME_HEIGHT)));
if (!output_cap.isOpened())
{
failedOutput = true;
return;
}
namedWindow("Laplacian", 1);
namedWindow("Original", 1);
HWND LAPhwnd = (HWND)cvGetWindowHandle("Laplacian");
HWND ORIhwnd = (HWND)cvGetWindowHandle("Original");
hipArray *dev_data;
uchar *dev_lap;
dim3 gridsize, blocksize;
src.addressMode[0] = hipAddressModeClamp;
src.addressMode[1] = hipAddressModeClamp;
if (cap.isOpened() && IsWindowVisible(LAPhwnd)) {
cap >> frame;
lap_frame = frame.clone();
blocksize.x = 32;
blocksize.y = 32;
gridsize.x = ceil(float(3 * frame.cols) / blocksize.x);
gridsize.y = ceil(float(frame.rows) / blocksize.y);
hipMallocArray(&dev_data, &src.channelDesc, 3 * frame.cols, frame.rows);
hipMalloc((void**)&dev_lap, 3 * frame.rows * frame.cols * sizeof(uchar));
}
int size = 3 * frame.cols * frame.rows * sizeof(uchar);
while (cap.isOpened() && IsWindowVisible(LAPhwnd)) {
if (IsWindowVisible(ORIhwnd)) {
imshow("Original", frame);
}
hipMemcpyToArray(dev_data, 0, 0, frame.data, size, hipMemcpyHostToDevice);
hipBindTextureToArray(src, dev_data, src.channelDesc);
laplacian_texture << <gridsize, blocksize >> >(dev_lap, frame.rows, 3 * frame.cols);
hipMemcpy(lap_frame.data, dev_lap, size, hipMemcpyDeviceToHost);
imshow("Laplacian", lap_frame);
output_cap.write(lap_frame);
waitKey(1000 / fps);
cap >> frame; // get a new frame from camera
if (frame.empty()) {
destroyAllWindows();
break;
}
}
if (IsWindowVisible(ORIhwnd)) {
destroyAllWindows();
}
hipUnbindTexture(src);
hipFree(dev_data);
hipFree(dev_lap);
}
cap.release();
}
void camera_feed_nosave() {
VideoCapture cap(0); // open the default camera
if (!cap.isOpened()) {
nocam = true;
cout << "Failed to find default camera" << endl;
return;
}
//Let the user set camera resolution
//cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
//cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
//Find the camera fps here
int num_frames = 120;
time_t start, end;
Mat frame;
Mat lap_frame;
cap >> frame;
if (frame.empty()) {
nocam = true;
cout << "Failed to find default camera" << endl;
return;
}
time(&start);
for (int i = 0; i < num_frames; i++) {
cap >> frame;
}
time(&end);
double seconds = difftime(end, start);
if (seconds == 0) {
cout << "Error with camera. Failed to calculate fps" << endl;
return;
}
double fps = num_frames / seconds;
cout << fps << endl;
namedWindow("Laplacian", 1);
namedWindow("Original", 1);
HWND LAPhwnd = (HWND)cvGetWindowHandle("Laplacian");
HWND ORIhwnd = (HWND)cvGetWindowHandle("Original");
hipArray *dev_data;
uchar *dev_lap;
dim3 gridsize, blocksize;
src.addressMode[0] = hipAddressModeClamp;
src.addressMode[1] = hipAddressModeClamp;
if (IsWindowVisible(LAPhwnd)) {
cap >> frame;
lap_frame = frame.clone();
blocksize.x = 32;
blocksize.y = 32;
gridsize.x = ceil(float(3 * frame.cols) / blocksize.x);
gridsize.y = ceil(float(frame.rows) / blocksize.y);
hipMallocArray(&dev_data, &src.channelDesc, 3 * frame.cols, frame.rows);
hipMalloc((void**)&dev_lap, 3 * frame.rows * frame.cols * sizeof(uchar));
}
int size = 3 * frame.cols * frame.rows * sizeof(uchar);
while (IsWindowVisible(LAPhwnd)) {
if (IsWindowVisible(ORIhwnd)) {
imshow("Original", frame);
}
hipMemcpyToArray(dev_data, 0, 0, frame.data, size, hipMemcpyHostToDevice);
hipBindTextureToArray(src, dev_data, src.channelDesc);
laplacian_texture << <gridsize, blocksize >> >(dev_lap, frame.rows, 3 * frame.cols);
hipMemcpy(lap_frame.data, dev_lap, size, hipMemcpyDeviceToHost);
imshow("Laplacian", lap_frame);
waitKey(1000 / fps);
cap >> frame; // get a new frame from camera
}
if (IsWindowVisible(ORIhwnd)) {
destroyAllWindows();
}
hipUnbindTexture(src);
hipFree(dev_data);
hipFree(dev_lap);
cap.release();
}
void camera_feed_save() {
VideoCapture cap(0);// open the default camera
if (!cap.isOpened()) {
nocam = true;
cout << "Failed to find default camera" << endl;
return;
}
//Let the user set camera resolution
//cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
//cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
OPENFILENAME sfn;
char syFile[520];
ZeroMemory(&sfn, sizeof(sfn));
sfn.lStructSize = sizeof(sfn);
sfn.hwndOwner = NULL;
sfn.lpstrFile = syFile;
sfn.lpstrFile[0] = '\0';
sfn.nMaxFile = sizeof(syFile);
sfn.lpstrFilter = "*.avi\0*.avi;\0\0*\0";
sfn.nFilterIndex = 1;
sfn.lpstrFileTitle = NULL;
sfn.nMaxFileTitle = 0;
sfn.lpstrInitialDir = ".";
sfn.Flags = OFN_PATHMUSTEXIST | OFN_OVERWRITEPROMPT | OFN_EXPLORER | OFN_ENABLEHOOK;
sfn.lpstrDefExt = "avi";
//Find the camera fps here
int num_frames = 120;
time_t start, end;
Mat frame;
Mat lap_frame;
cap >> frame;
if (frame.empty()) {
nocam = true;
cout << "Failed to find default camera" << endl;
return;
}
time(&start);
for (int i = 0; i < num_frames; i++) {
cap >> frame;
}
time(&end);
double seconds = difftime(end, start);
if (seconds == 0) {
fpsfail = true;
cout << "Error with camera. Failed to calculate fps" << endl;
return;
}
double fps = num_frames / seconds;
cout << fps << endl;
if (GetSaveFileName(&sfn) != true)
{
//do nothing
}
else {
for (int i = 0, int j = 0; i <= strlen(sfn.lpstrFile); i++, j++) {
if (sfn.lpstrFile[i] == '\\') {
sfn.lpstrFile[i] = '/';
}
}
remove(sfn.lpstrFile);
//cap.get(CV_CAP_PROP_FPS) is used for input videos not webcam.
VideoWriter output_cap(sfn.lpstrFile, -1, fps, Size(cap.get(CV_CAP_PROP_FRAME_WIDTH), cap.get(CV_CAP_PROP_FRAME_HEIGHT)));
if (!output_cap.isOpened())
{
failedOutput = true;
return;
}
namedWindow("Laplacian", 1);
namedWindow("Original", 1);
HWND LAPhwnd = (HWND)cvGetWindowHandle("Laplacian");
HWND ORIhwnd = (HWND)cvGetWindowHandle("Original");
hipArray *dev_data;
uchar *dev_lap;
dim3 gridsize, blocksize;
src.addressMode[0] = hipAddressModeClamp;
src.addressMode[1] = hipAddressModeClamp;
if (IsWindowVisible(LAPhwnd)) {
cap >> frame;
lap_frame = frame.clone();
blocksize.x = 32;
blocksize.y = 32;
gridsize.x = ceil(float(3 * frame.cols) / blocksize.x);
gridsize.y = ceil(float(frame.rows) / blocksize.y);
hipMallocArray(&dev_data, &src.channelDesc, 3 * frame.cols, frame.rows);
hipMalloc((void**)&dev_lap, 3 * frame.rows * frame.cols * sizeof(uchar));
}
int size = 3 * frame.cols * frame.rows * sizeof(uchar);
while (IsWindowVisible(LAPhwnd)) {
if (IsWindowVisible(ORIhwnd)) {
imshow("Original", frame);
}
hipMemcpyToArray(dev_data, 0, 0, frame.data, size, hipMemcpyHostToDevice);
hipBindTextureToArray(src, dev_data, src.channelDesc);
laplacian_texture << <gridsize, blocksize >> >(dev_lap, frame.rows, 3 * frame.cols);
hipMemcpy(lap_frame.data, dev_lap, size, hipMemcpyDeviceToHost);
imshow("Laplacian", lap_frame);
output_cap.write(lap_frame);
waitKey(1000 / fps);
cap >> frame; // get a new frame from camera
}
if (IsWindowVisible(ORIhwnd)) {
destroyAllWindows();
}
hipUnbindTexture(src);
hipFree(dev_data);
hipFree(dev_lap);
}
cap.release();
}
void image_texture() {
//Read the filename that the user wishes to enter and keep asking for user input until a file can be opened or the user quits
OPENFILENAME ofn; // common dialog box structure
char szFile[520]; // buffer for file name
HWND hwnd = NULL; // owner window
HANDLE hf; // file handle
// Initialize OPENFILENAME
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hwnd;
ofn.lpstrFile = szFile;
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
ofn.lpstrFilter = "*.jpg, *.png, *.bmp, *.dib, *.jpeg, *.jpe, *.jfif, *.tif, *.tiff\0*.jpg;*.png;*.bmp;*.dib;*.jpeg;*.jpe;*.jfif;*.tif;*.tiff\0\0*\0\0\0\0\0\0\0\0\0";
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = ".";
ofn.Flags = OFN_PATHMUSTEXIST | OFN_FILEMUSTEXIST;
// Display the Open dialog box.
if (GetOpenFileName(&ofn) == TRUE)
hf = CreateFile(ofn.lpstrFile,
GENERIC_READ,
0,
(LPSECURITY_ATTRIBUTES)NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
(HANDLE)NULL);
if (strlen(ofn.lpstrFile) == 0) {
return;
}
for (int i = 0, int j = 0; i <= strlen(ofn.lpstrFile); i++, j++) {
if (ofn.lpstrFile[i] == '\\') {
ofn.lpstrFile[i] = '/';
}
}
CloseHandle(hf);
Mat image = imread(ofn.lpstrFile, 1);
namedWindow("INPUT", CV_WINDOW_KEEPRATIO);
imshow("INPUT", image);
uchar *dev_lap;
hipMalloc((void**)&dev_lap, 3 * image.rows * image.cols * sizeof(uchar));
hipArray *dev_data;
hipMallocArray(&dev_data, &src.channelDesc, 3 * image.cols, image.rows);
hipMemcpyToArray(dev_data, 0, 0, image.data, 3 * image.cols * image.rows * sizeof(uchar), hipMemcpyHostToDevice);
hipBindTextureToArray(src, dev_data, src.channelDesc);
dim3 gridsize, blocksize;
blocksize.x = 32;
blocksize.y = 32;
gridsize.x = ceil(float(3 * image.cols) / blocksize.x);
gridsize.y = ceil(float(image.rows) / blocksize.y);
hipLaunchKernelGGL(( laplacian_texture) , dim3(gridsize), dim3(blocksize), 0, 0, dev_lap, image.rows, 3 * image.cols);
hipMemcpy(image.data, dev_lap, 3 * image.rows * image.cols * sizeof(uchar), hipMemcpyDeviceToHost);
hipUnbindTexture(src);
hipFree(dev_data);
hipFree(dev_lap);
namedWindow("OUTPUT", CV_WINDOW_KEEPRATIO);
imshow("OUTPUT", image);
if (saveimage) {
OPENFILENAME sfn;
char syFile[520];
ZeroMemory(&sfn, sizeof(sfn));
sfn.lStructSize = sizeof(sfn);
sfn.hwndOwner = NULL;
sfn.lpstrFile = syFile;
sfn.lpstrFile[0] = '\0';
sfn.nMaxFile = sizeof(syFile);
sfn.lpstrFilter = "*.jpg, *.png, *.bmp, *.dib, *.jpeg, *.jpe, *.jfif, *.tif, *.tiff\0*.jpg;*.png;*.bmp;*.dib;*.jpeg;*.jpe;*.jfif;*.tif;*.tiff\0\0*\0\0\0\0\0\0\0\0\0";
sfn.nFilterIndex = 1;
sfn.lpstrFileTitle = NULL;
sfn.nMaxFileTitle = 0;
sfn.lpstrInitialDir = ".";
sfn.Flags = OFN_PATHMUSTEXIST | OFN_OVERWRITEPROMPT | OFN_EXPLORER | OFN_ENABLEHOOK;
sfn.lpstrDefExt = "jpg";
if (GetSaveFileName(&sfn) != true)
{
//do nothing
}
else {
for (int i = 0, int j = 0; i <= strlen(sfn.lpstrFile); i++, j++) {
if (sfn.lpstrFile[i] == '\\') {
sfn.lpstrFile[i] = '/';
}
}
imwrite(sfn.lpstrFile, image);
}
}
waitKey(0);
return;
}
void image_simple() {
//Read the filename that the user wishes to enter and keep asking for user input until a file can be opened or the user quits
OPENFILENAME ofn; // common dialog box structure
char szFile[520]; // buffer for file name
HWND hwnd = NULL; // owner window
HANDLE hf; // file handle
// Initialize OPENFILENAME
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hwnd;
ofn.lpstrFile = szFile;
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
ofn.lpstrFilter = "Supported Image Files\0*.jpg;*.png;*.bmp;*.dib;*.jpeg;*.jpe;*.jfif;*.tif;*.tiff\0ALL FILES\0*\0\0\0\0\0\0\0\0\0";
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = NULL;
ofn.Flags = OFN_PATHMUSTEXIST | OFN_FILEMUSTEXIST;
// Display the Open dialog box.
if (GetOpenFileName(&ofn) == TRUE)
hf = CreateFile(ofn.lpstrFile,
GENERIC_READ,
0,
(LPSECURITY_ATTRIBUTES)NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
(HANDLE)NULL);
if (strlen(ofn.lpstrFile) == 0) {
return;
}
for (int i = 0, int j = 0; i <= strlen(ofn.lpstrFile); i++, j++) {
if (ofn.lpstrFile[i] == '\\') {
ofn.lpstrFile[i] = '/';
}
}
CloseHandle(hf);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
Mat image = imread(ofn.lpstrFile, 1);
namedWindow("INPUT", CV_WINDOW_KEEPRATIO);
imshow("INPUT", image);
/*Split the image into the 3 image channels Blue, Green, and Red respectively. This makes 3 arrays that contain the intensity values
of each image channel. These arrays are then allocated and passed to the GPU. LapB contains the intensity values after the algorithm
for computing the laplacian completes.*/
uchar *dev_data;
uchar *dev_lap;
hipMalloc((void**)&dev_data, image.rows * image.cols * 3 * sizeof(uchar));
hipMalloc((void**)&dev_lap, image.rows * image.cols * 3 * sizeof(uchar));
hipMemcpy(dev_data, image.data, image.rows * image.cols * 3 * sizeof(uchar), hipMemcpyHostToDevice);
/*Call the CUDA kernel with a grid size of 512 that each one will be run on a Streaming Multiprocessor with each
Multiprocessor running 1024 threads*/
laplacian_simple << <512, 1024 >> >(dev_data, dev_lap, 3 * image.rows * image.cols, 3 * image.cols);
//Transfer the lapB array from the device to the host
hipMemcpy(image.data, dev_lap, image.rows * image.cols * 3 * sizeof(uchar), hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout << milliseconds << endl;
hipFree(dev_data);
hipFree(dev_lap);
//Merge the 3 seperate channel arrays into one array for the output image. Display the input and output image.
namedWindow("OUTPUT", CV_WINDOW_KEEPRATIO);
imshow("OUTPUT", image);
OPENFILENAME sfn;
char syFile[520];
ZeroMemory(&sfn, sizeof(sfn));
sfn.lStructSize = sizeof(sfn);
sfn.hwndOwner = NULL;
sfn.lpstrFile = syFile;
sfn.lpstrFile[0] = '\0';
sfn.nMaxFile = sizeof(syFile);
sfn.lpstrFilter = "*.jpg, *.png, *.bmp, *.dib, *.jpeg, *.jpe, *.jfif, *.tif, *.tiff\0*.jpg;*.png;*.bmp;*.dib;*.jpeg;*.jpe;*.jfif;*.tif;*.tiff\0\0*\0\0\0\0\0\0\0\0\0";
sfn.nFilterIndex = 1;
sfn.lpstrFileTitle = NULL;
sfn.nMaxFileTitle = 0;
sfn.lpstrInitialDir = ".";
sfn.Flags = OFN_PATHMUSTEXIST | OFN_OVERWRITEPROMPT | OFN_EXPLORER | OFN_ENABLEHOOK;
sfn.lpstrDefExt = "jpg";
if (GetSaveFileName(&sfn) != true)
{
cout << "Saving file canceled, closing program in 10 secconds." << endl;
}
else {
for (int i = 0, int j = 0; i <= strlen(sfn.lpstrFile); i++, j++) {
if (sfn.lpstrFile[i] == '\\') {
sfn.lpstrFile[i] = '/';
}
}
imwrite(sfn.lpstrFile, image);
}
waitKey(0);
return;
}
/* This is where all the input to the window goes to */
HWND button1;
HWND check1;
HWND button2;
HWND check2;
HWND button3;
HWND check3;
char input[520];
HWND edit;
HWND text;
LRESULT CALLBACK WndProc(HWND hwnd, UINT Message, WPARAM wParam, LPARAM lParam) {
switch (Message) {
case WM_CREATE: {
text = CreateWindow(TEXT("STATIC"), TEXT("Laplacian Morphological Operation"),
WS_VISIBLE | WS_CHILD,
190, 10,
400, 25,
hwnd, (HMENU) NULL, NULL, NULL);
HFONT text_change = CreateFont(20, 0, 0, 0, FW_DONTCARE, FALSE, FALSE, FALSE, ANSI_CHARSET, OUT_DEFAULT_PRECIS, CLIP_DEFAULT_PRECIS, DEFAULT_QUALITY, DEFAULT_PITCH | FF_SWISS, "Times New Roman");
SendMessage(text, WM_SETFONT, WPARAM(text_change), TRUE);
//GetWindowText(edit, input, 260);
text = CreateWindow(TEXT("STATIC"), TEXT("Live input needs a few seconds to calculate the camera's FPS. Please wait after selecting."),
WS_VISIBLE | WS_CHILD,
190, 255,
400, 50,
hwnd, (HMENU)NULL, NULL, NULL);
button1 = CreateWindow(TEXT("BUTTON"), TEXT("Image Input"),
WS_VISIBLE | WS_CHILD,
10, 50,
150, 50,
hwnd, (HMENU) 1, NULL, NULL);
text_change = CreateFont(30, 10, 0, 0, FW_DONTCARE, FALSE, FALSE, FALSE, ANSI_CHARSET, OUT_DEFAULT_PRECIS, CLIP_DEFAULT_PRECIS, DEFAULT_QUALITY, DEFAULT_PITCH | FF_SWISS, "Arial");
SendMessage(button1, WM_SETFONT, WPARAM(text_change), TRUE);
check1 = CreateWindow(TEXT("button"), TEXT("Save Image"),
WS_VISIBLE | WS_CHILD | BS_CHECKBOX,
20, 100,
100, 20,
hwnd, (HMENU)2, ((LPCREATESTRUCT)lParam)->hInstance, NULL);
CheckDlgButton(hwnd, 2, BST_CHECKED);
button2 = CreateWindow(TEXT("BUTTON"), TEXT("Video Input"),
WS_VISIBLE | WS_CHILD,
10, 150,
150, 50,
hwnd, (HMENU)3, NULL, NULL);
check2 = CreateWindow(TEXT("button"), TEXT("Save Video"),
WS_VISIBLE | WS_CHILD | BS_CHECKBOX,
20, 200,
95, 20,
hwnd, (HMENU)4, ((LPCREATESTRUCT)lParam)->hInstance, NULL);
CheckDlgButton(hwnd, 4, BST_UNCHECKED);
text_change = CreateFont(30, 10, 0, 0, FW_DONTCARE, FALSE, FALSE, FALSE, ANSI_CHARSET, OUT_DEFAULT_PRECIS, CLIP_DEFAULT_PRECIS, DEFAULT_QUALITY, DEFAULT_PITCH | FF_SWISS, "Arial");
SendMessage(button2, WM_SETFONT, WPARAM(text_change), TRUE);
button3 = CreateWindow(TEXT("BUTTON"), TEXT("Live Input"),
WS_VISIBLE | WS_CHILD,
10, 250,
150, 50,
hwnd, (HMENU)5, NULL, NULL);
check3 = CreateWindow(TEXT("button"), TEXT("Record Video"),
WS_VISIBLE | WS_CHILD | BS_CHECKBOX,
20, 300,
105, 20,
hwnd, (HMENU)6, ((LPCREATESTRUCT)lParam)->hInstance, NULL);
CheckDlgButton(hwnd, 6, BST_UNCHECKED);
text_change = CreateFont(30, 10, 0, 0, FW_DONTCARE, FALSE, FALSE, FALSE, ANSI_CHARSET, OUT_DEFAULT_PRECIS, CLIP_DEFAULT_PRECIS, DEFAULT_QUALITY, DEFAULT_PITCH | FF_SWISS, "Arial");
SendMessage(button3, WM_SETFONT, WPARAM(text_change), TRUE);
break;
}
case WM_COMMAND: {
if (LOWORD(wParam) == 1) {
/*GetWindowText(edit, input, 260);
MessageBox(hwnd, input, "title for popup", MB_ICONINFORMATION);*/
if (IsDlgButtonChecked(hwnd, 2)) {
saveimage = true;
}
else {
saveimage = false;
}
EnableWindow(button1, false);
EnableWindow(check1, false);
EnableWindow(button2, false);
EnableWindow(check2, false);
EnableWindow(button3, false);
EnableWindow(check3, false);
image_texture();
EnableWindow(button1, true);
EnableWindow(check1, true);
EnableWindow(button2, true);
EnableWindow(check2, true);
EnableWindow(button3, true);
EnableWindow(check3, true);
}
if (LOWORD(wParam) == 2) {
BOOL checked = IsDlgButtonChecked(hwnd, 2);
if (checked) {
CheckDlgButton(hwnd, 2, BST_UNCHECKED);
}
else {
CheckDlgButton(hwnd, 2, BST_CHECKED);
}
}
if (LOWORD(wParam) == 3) {
/*GetWindowText(edit, input, 260);
MessageBox(hwnd, input, "title for popup", MB_ICONINFORMATION);*/
if (IsDlgButtonChecked(hwnd, 4)) {
savevideo = true;
}
else {
savevideo = false;
}
EnableWindow(button1, false);
EnableWindow(check1, false);
EnableWindow(button2, false);
EnableWindow(check2, false);
EnableWindow(button3, false);
EnableWindow(check3, false);
if (savevideo) {
videoSave();
}
else {
videoNoSave();
}
if (failedOutput) {
MessageBox(hwnd, "Output video could not be opened use different compression option", "Error", MB_ICONINFORMATION);
}
failedOutput = false;
EnableWindow(button1, true);
EnableWindow(check1, true);
EnableWindow(button2, true);
EnableWindow(check2, true);
EnableWindow(button3, true);
EnableWindow(check3, true);
}
if (LOWORD(wParam) == 4) {
BOOL checked = IsDlgButtonChecked(hwnd, 4);
if (checked) {
CheckDlgButton(hwnd, 4, BST_UNCHECKED);
}
else {
CheckDlgButton(hwnd, 4, BST_CHECKED);
}
}
if (LOWORD(wParam) == 5) {
/*GetWindowText(edit, input, 260);
MessageBox(hwnd, input, "title for popup", MB_ICONINFORMATION);*/
if (IsDlgButtonChecked(hwnd, 6)) {
record = true;
}
else {
record = false;
}
EnableWindow(button1, false);
EnableWindow(check1, false);
EnableWindow(button2, false);
EnableWindow(check2, false);
EnableWindow(button3, false);
EnableWindow(check3, false);
if (record) {
camera_feed_save();
}
else {
camera_feed_nosave();
}
if (failedOutput) {
MessageBox(hwnd, "Output video could not be opened use different compression option", "Error", MB_ICONINFORMATION);
}
if (nocam) {
MessageBox(hwnd, "Failed to find default camera", "Error", MB_ICONINFORMATION);
}
if (fpsfail) {
MessageBox(hwnd, "Error with camera. Failed to calculate fps", "Error", MB_ICONINFORMATION);
}
failedOutput = false;
nocam = false;
fpsfail = false;
EnableWindow(button1, true);
EnableWindow(check1, true);
EnableWindow(button2, true);
EnableWindow(check2, true);
EnableWindow(button3, true);
EnableWindow(check3, true);
}
if (LOWORD(wParam) == 6) {
BOOL checked = IsDlgButtonChecked(hwnd, 6);
if (checked) {
CheckDlgButton(hwnd, 6, BST_UNCHECKED);
}
else {
CheckDlgButton(hwnd, 6, BST_CHECKED);
}
}
break;
}
/* Upon destruction, tell the main thread to stop */
case WM_DESTROY: {
PostQuitMessage(0);
break;
}
/* All other messages (a lot of them) are processed using default procedures */
default:
return DefWindowProc(hwnd, Message, wParam, lParam);
}
return 0;
}
/* The 'main' function of Win32 GUI programs: this is where execution starts */
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow) {
WNDCLASSEX wc; /* A properties struct of our window */
HWND hwnd; /* A 'HANDLE', hence the H, or a pointer to our window */
MSG msg; /* A temporary location for all messages */
/* zero out the struct and set the stuff we want to modify */
memset(&wc, 0, sizeof(wc));
wc.cbSize = sizeof(WNDCLASSEX);
wc.lpfnWndProc = WndProc; /* This is where we will send messages to */
wc.hInstance = hInstance;
wc.hCursor = LoadCursor(NULL, IDC_ARROW);
/* White, COLOR_WINDOW is just a #define for a system color, try Ctrl+Clicking it */
//wc.hbrBackground = (HBRUSH)(COLOR_WINDOW + 1);
wc.hbrBackground = GetSysColorBrush(COLOR_3DFACE);
wc.lpszClassName = "WindowClass";
wc.hIcon = (HICON)LoadImage( // returns a HANDLE so we have to cast to HICON
NULL, // hInstance must be NULL when loading from a file
"lapIcon.ico", // the icon file name
IMAGE_ICON, // specifies that the file is an icon
0, // width of the image (we'll specify default later on)
0, // height of the image
LR_LOADFROMFILE | // we want to load a file (as opposed to a resource)
LR_DEFAULTSIZE | // default metrics based on the type (IMAGE_ICON, 32x32)
LR_SHARED // let the system release the handle when it's no longer used
);
wc.hIconSm = LoadIcon(NULL, NULL); /* use the name "A" to use the project icon */
if (!RegisterClassEx(&wc)) {
MessageBox(NULL, "Window Registration Failed!", "Error!", MB_ICONEXCLAMATION | MB_OK);
return 0;
}
hwnd = CreateWindowEx(WS_EX_CLIENTEDGE, "WindowClass", "CUDA Laplacian", WS_VISIBLE | WS_SYSMENU,
CW_USEDEFAULT, /* x */
CW_USEDEFAULT, /* y */
640, /* width */
480, /* height */
NULL, NULL, hInstance, NULL);
if (hwnd == NULL) {
MessageBox(NULL, "Window Creation Failed!", "Error!", MB_ICONEXCLAMATION | MB_OK);
return 0;
}
/*
This is the heart of our program where all input is processed and
sent to WndProc. Note that GetMessage blocks code flow until it receives something, so
this loop will not produce unreasonably high CPU usage
*/
while (GetMessage(&msg, NULL, 0, 0) > 0) { /* If no error is received... */
TranslateMessage(&msg); /* Translate key codes to chars if present */
DispatchMessage(&msg); /* Send it to WndProc */
}
return msg.wParam;
} | 0b3dd76a84f351a6e4d9d79560509e1a8cf1c5da.cu | #include "cuda_runtime.h"
#include "cuda.h"
#include "device_launch_parameters.h"
#include "opencv2/opencv.hpp"
#include <Windows.h>
#include <time.h>
using namespace cv;
using namespace std;
//texture that stores the input image data
texture<uchar, 2, cudaReadModeElementType> src;
//bools that keep track if the user wants to save the outputs or an error occurred.
bool saveimage;
bool savevideo;
bool record;
bool failedOutput;
bool nocam;
bool fpsfail;
/*5x5 disk structuring element = {0, 1, 1, 1, 0},
{1, 1, 1, 1, 1},
{1, 1, 1, 1, 1},
{1, 1, 1, 1, 1},
{0, 1, 1, 1, 0}*/
__global__ void laplacian_texture(uchar *dev_lap, int rows, int cols) {
int tidx = threadIdx.x + blockIdx.x*blockDim.x;
int tidy = threadIdx.y + blockIdx.y*blockDim.y;
uchar max = 0;
uchar min = 255;
if (tidx >= cols || tidy >= rows) {
return;
}
//loop through the 25 elements that the structuring element covers and keep track of the maximum and minimum value;
for (int i = tidy - 2; i <= tidy + 2; i++) {
for (int j = tidx - 6; j <= tidx + 6; j += 3) {
if (i < 0 || i >= rows || j < 0 || j >= cols || ((i == tidy - 2) && (j == tidx - 6)) || ((i == tidy - 2) && (j == tidx + 6)) || ((i == tidy + 2) && (j == tidx - 6)) || ((i == tidy + 2) && (j == tidx + 6))) {
continue;
}
uchar current = tex2D(src, j, i);
if (current > max) {
max = current;
}
if (current < min) {
min = current;
}
}
}
//perform the laplacian at the current pixel
uchar original = tex2D(src, tidx, tidy);
if ((max - original) < (original - min)) {
dev_lap[tidy * cols + tidx] = 0;
}
else {
dev_lap[tidy * cols + tidx] = (max - original) - (original - min);
}
}
__global__ void laplacian_simple(uchar *dev_data, uchar *dev_lap, int total_pixels, int cols) {
//threadID provides every thread that runs on the GPU an individual value. Every thread works on a pixel in each color channel.
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
uchar max = 0;
uchar min = 255;
//Since the arrays are 1 dimensional the right_edge and left_edge make sure a pixel from a seperate row is not accessed.
int right_edge = 0;
int left_edge = 0;
/*If the image has more pixels than total threads running on the GPU then the thread also works on the next pixel that
would have been missed*/
for (threadID; threadID < total_pixels; threadID += blockDim.x * gridDim.x) {
for (int row = threadID - (2 * cols); row <= threadID + (2 * cols); row += cols) {
right_edge = cols * ((row / cols) + 1);
left_edge = cols * (row / cols);
for (int pos = row - 6; pos <= row + 6; pos+=3) {
if (row < 0 || row >= total_pixels || pos < left_edge || pos >= right_edge || ((row == threadID - (2 * cols)) && (pos == row - 6)) || ((row == threadID - (2 * cols)) && (pos == row + 6)) || ((row == threadID + (2 * cols)) && (pos == row - 6)) || ((row == threadID + (2 * cols)) && (pos == row + 6))) {
continue;
}
//Calculates the maximum and minimum within the area that the structuring element covers at the current pixel.
uchar current = dev_data[pos];
if (current > max) {
max = current;
}
if (current < min) {
min = current;
}
}
}
/*Calculates the dilation - the erosion of the current pixel to get the laplacian.
If the dilation is less than the erosion then the pixel is set to 0 to prevent an overflow*/
uchar original = dev_data[threadID];
if ((max - original) < (original - min)) {
dev_lap[threadID] = 0;
}
else {
dev_lap[threadID] = (max - original) - (original - min);
}
//Reset the maximum and minimum storage for the next pixel
max = 0;
min = 255;
}
}
//Used when the user inputs a video file but does not want to save the output
void videoNoSave() {
//code to make the open file dialog box appear
OPENFILENAME ofn; // common dialog box structure
char szFile[520]; // buffer for file name
HWND hwnd = NULL; // owner window
HANDLE hf; // file handle
// Initialize OPENFILENAME
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hwnd;
ofn.lpstrFile = szFile;
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
ofn.lpstrFilter = "*.avi, *.divx\0*.avi;*.divx;\0\0*\0\0";
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = ".";
ofn.Flags = OFN_PATHMUSTEXIST | OFN_FILEMUSTEXIST;
// Display the Open dialog box.
if (GetOpenFileName(&ofn) == TRUE)
hf = CreateFile(ofn.lpstrFile,
GENERIC_READ,
0,
(LPSECURITY_ATTRIBUTES)NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
(HANDLE)NULL);
if (strlen(ofn.lpstrFile) == 0) {
return;
}
for (int i = 0, int j = 0; i <= strlen(ofn.lpstrFile); i++, j++) {
if (ofn.lpstrFile[i] == '\\') {
ofn.lpstrFile[i] = '/';
}
}
//close the handle because the open file dialog box had a handle on the file which would not allow videocapture to read it
CloseHandle(hf);
VideoCapture cap(ofn.lpstrFile);
double fps = cap.get(CV_CAP_PROP_FPS);
Mat frame;
Mat lap_frame;
namedWindow("Laplacian", 1);
namedWindow("Original", 1);
HWND LAPhwnd = (HWND)cvGetWindowHandle("Laplacian");
HWND ORIhwnd = (HWND)cvGetWindowHandle("Original");
cudaArray *dev_data;
uchar *dev_lap;
dim3 gridsize, blocksize;
/*Clamp address mode means that if a value that is outside of the texture array is accessed then instead of
seg faulting the nearest value along the endge is looked at. This is great for this program because the elements
along that would already be part of the structuring element*/
src.addressMode[0] = cudaAddressModeClamp;
src.addressMode[1] = cudaAddressModeClamp;
if (cap.isOpened() && IsWindowVisible(LAPhwnd)) {
//malloc and calculate constants here to refrain from taking up time during the video loop.
cap >> frame;
lap_frame = frame.clone();
blocksize.x = 32;
blocksize.y = 32;
gridsize.x = ceil(float(3 * frame.cols) / blocksize.x);
gridsize.y = ceil(float(frame.rows) / blocksize.y);
cudaMallocArray(&dev_data, &src.channelDesc, 3 * frame.cols, frame.rows);
cudaMalloc((void**)&dev_lap, 3 * frame.rows * frame.cols * sizeof(uchar));
}
int size = 3 * frame.cols * frame.rows * sizeof(uchar);
while (cap.isOpened() && IsWindowVisible(LAPhwnd)) {
//Allow the user to close the original video, but keep playing the morphological operation.
//If the user closes the laplacian video then close the rest of the windows as well.
if (IsWindowVisible(ORIhwnd)) {
imshow("Original", frame);
}
cudaMemcpyToArray(dev_data, 0, 0, frame.data, size, cudaMemcpyHostToDevice);
cudaBindTextureToArray(src, dev_data, src.channelDesc);
laplacian_texture << <gridsize, blocksize >> >(dev_lap, frame.rows, 3 * frame.cols);
cudaMemcpy(lap_frame.data, dev_lap, size, cudaMemcpyDeviceToHost);
imshow("Laplacian", lap_frame);
waitKey(1000 / fps);
cap >> frame; // get a new frame from camera
//If we reached the end of the video then clean up.
if (frame.empty()) {
destroyAllWindows();
break;
}
}
//If the laplacian window was closed then close the original as well
if (IsWindowVisible(ORIhwnd)) {
destroyAllWindows();
}
cudaUnbindTexture(src);
cudaFree(dev_data);
cudaFree(dev_lap);
cap.release();
}
//Very similar to video without save except for the fact that this one has saving involved
void videoSave() {
OPENFILENAME ofn; // common dialog box structure
char szFile[520]; // buffer for file name
HWND hwnd = NULL; // owner window
HANDLE hf; // file handle
// Initialize OPENFILENAME
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hwnd;
ofn.lpstrFile = szFile;
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
ofn.lpstrFilter = "*.avi, *.divx\0*.avi;*.divx;\0\0*\0\0";
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = ".";
ofn.Flags = OFN_PATHMUSTEXIST | OFN_FILEMUSTEXIST;
// Display the Open dialog box.
if (GetOpenFileName(&ofn) == TRUE)
hf = CreateFile(ofn.lpstrFile,
GENERIC_READ,
0,
(LPSECURITY_ATTRIBUTES)NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
(HANDLE)NULL);
if (strlen(ofn.lpstrFile) == 0) {
return;
}
for (int i = 0, int j = 0; i <= strlen(ofn.lpstrFile); i++, j++) {
if (ofn.lpstrFile[i] == '\\') {
ofn.lpstrFile[i] = '/';
}
}
CloseHandle(hf);
VideoCapture cap(ofn.lpstrFile);
Mat frame;
Mat lap_frame;
OPENFILENAME sfn;
char syFile[520];
ZeroMemory(&sfn, sizeof(sfn));
sfn.lStructSize = sizeof(sfn);
sfn.hwndOwner = NULL;
sfn.lpstrFile = syFile;
sfn.lpstrFile[0] = '\0';
sfn.nMaxFile = sizeof(syFile);
sfn.lpstrFilter = "*.avi\0*.avi;\0\0*\0";
sfn.nFilterIndex = 1;
sfn.lpstrFileTitle = NULL;
sfn.nMaxFileTitle = 0;
sfn.lpstrInitialDir = ".";
sfn.Flags = OFN_PATHMUSTEXIST | OFN_OVERWRITEPROMPT | OFN_EXPLORER | OFN_ENABLEHOOK;
sfn.lpstrDefExt = "avi";
if (GetSaveFileName(&sfn) != true)
{
//do nothing
}
else {
for (int i = 0, int j = 0; i <= strlen(sfn.lpstrFile); i++, j++) {
if (sfn.lpstrFile[i] == '\\') {
sfn.lpstrFile[i] = '/';
}
}
remove(sfn.lpstrFile);
double fps = cap.get(CV_CAP_PROP_FPS);
VideoWriter output_cap(sfn.lpstrFile, -1, fps, Size(cap.get(CV_CAP_PROP_FRAME_WIDTH), cap.get(CV_CAP_PROP_FRAME_HEIGHT)));
if (!output_cap.isOpened())
{
failedOutput = true;
return;
}
namedWindow("Laplacian", 1);
namedWindow("Original", 1);
HWND LAPhwnd = (HWND)cvGetWindowHandle("Laplacian");
HWND ORIhwnd = (HWND)cvGetWindowHandle("Original");
cudaArray *dev_data;
uchar *dev_lap;
dim3 gridsize, blocksize;
src.addressMode[0] = cudaAddressModeClamp;
src.addressMode[1] = cudaAddressModeClamp;
if (cap.isOpened() && IsWindowVisible(LAPhwnd)) {
cap >> frame;
lap_frame = frame.clone();
blocksize.x = 32;
blocksize.y = 32;
gridsize.x = ceil(float(3 * frame.cols) / blocksize.x);
gridsize.y = ceil(float(frame.rows) / blocksize.y);
cudaMallocArray(&dev_data, &src.channelDesc, 3 * frame.cols, frame.rows);
cudaMalloc((void**)&dev_lap, 3 * frame.rows * frame.cols * sizeof(uchar));
}
int size = 3 * frame.cols * frame.rows * sizeof(uchar);
while (cap.isOpened() && IsWindowVisible(LAPhwnd)) {
if (IsWindowVisible(ORIhwnd)) {
imshow("Original", frame);
}
cudaMemcpyToArray(dev_data, 0, 0, frame.data, size, cudaMemcpyHostToDevice);
cudaBindTextureToArray(src, dev_data, src.channelDesc);
laplacian_texture << <gridsize, blocksize >> >(dev_lap, frame.rows, 3 * frame.cols);
cudaMemcpy(lap_frame.data, dev_lap, size, cudaMemcpyDeviceToHost);
imshow("Laplacian", lap_frame);
output_cap.write(lap_frame);
waitKey(1000 / fps);
cap >> frame; // get a new frame from camera
if (frame.empty()) {
destroyAllWindows();
break;
}
}
if (IsWindowVisible(ORIhwnd)) {
destroyAllWindows();
}
cudaUnbindTexture(src);
cudaFree(dev_data);
cudaFree(dev_lap);
}
cap.release();
}
void camera_feed_nosave() {
VideoCapture cap(0); // open the default camera
if (!cap.isOpened()) {
nocam = true;
cout << "Failed to find default camera" << endl;
return;
}
//Let the user set camera resolution
//cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
//cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
//Find the camera fps here
int num_frames = 120;
time_t start, end;
Mat frame;
Mat lap_frame;
cap >> frame;
if (frame.empty()) {
nocam = true;
cout << "Failed to find default camera" << endl;
return;
}
time(&start);
for (int i = 0; i < num_frames; i++) {
cap >> frame;
}
time(&end);
double seconds = difftime(end, start);
if (seconds == 0) {
cout << "Error with camera. Failed to calculate fps" << endl;
return;
}
double fps = num_frames / seconds;
cout << fps << endl;
namedWindow("Laplacian", 1);
namedWindow("Original", 1);
HWND LAPhwnd = (HWND)cvGetWindowHandle("Laplacian");
HWND ORIhwnd = (HWND)cvGetWindowHandle("Original");
cudaArray *dev_data;
uchar *dev_lap;
dim3 gridsize, blocksize;
src.addressMode[0] = cudaAddressModeClamp;
src.addressMode[1] = cudaAddressModeClamp;
if (IsWindowVisible(LAPhwnd)) {
cap >> frame;
lap_frame = frame.clone();
blocksize.x = 32;
blocksize.y = 32;
gridsize.x = ceil(float(3 * frame.cols) / blocksize.x);
gridsize.y = ceil(float(frame.rows) / blocksize.y);
cudaMallocArray(&dev_data, &src.channelDesc, 3 * frame.cols, frame.rows);
cudaMalloc((void**)&dev_lap, 3 * frame.rows * frame.cols * sizeof(uchar));
}
int size = 3 * frame.cols * frame.rows * sizeof(uchar);
while (IsWindowVisible(LAPhwnd)) {
if (IsWindowVisible(ORIhwnd)) {
imshow("Original", frame);
}
cudaMemcpyToArray(dev_data, 0, 0, frame.data, size, cudaMemcpyHostToDevice);
cudaBindTextureToArray(src, dev_data, src.channelDesc);
laplacian_texture << <gridsize, blocksize >> >(dev_lap, frame.rows, 3 * frame.cols);
cudaMemcpy(lap_frame.data, dev_lap, size, cudaMemcpyDeviceToHost);
imshow("Laplacian", lap_frame);
waitKey(1000 / fps);
cap >> frame; // get a new frame from camera
}
if (IsWindowVisible(ORIhwnd)) {
destroyAllWindows();
}
cudaUnbindTexture(src);
cudaFree(dev_data);
cudaFree(dev_lap);
cap.release();
}
void camera_feed_save() {
VideoCapture cap(0);// open the default camera
if (!cap.isOpened()) {
nocam = true;
cout << "Failed to find default camera" << endl;
return;
}
//Let the user set camera resolution
//cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
//cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
OPENFILENAME sfn;
char syFile[520];
ZeroMemory(&sfn, sizeof(sfn));
sfn.lStructSize = sizeof(sfn);
sfn.hwndOwner = NULL;
sfn.lpstrFile = syFile;
sfn.lpstrFile[0] = '\0';
sfn.nMaxFile = sizeof(syFile);
sfn.lpstrFilter = "*.avi\0*.avi;\0\0*\0";
sfn.nFilterIndex = 1;
sfn.lpstrFileTitle = NULL;
sfn.nMaxFileTitle = 0;
sfn.lpstrInitialDir = ".";
sfn.Flags = OFN_PATHMUSTEXIST | OFN_OVERWRITEPROMPT | OFN_EXPLORER | OFN_ENABLEHOOK;
sfn.lpstrDefExt = "avi";
//Find the camera fps here
int num_frames = 120;
time_t start, end;
Mat frame;
Mat lap_frame;
cap >> frame;
if (frame.empty()) {
nocam = true;
cout << "Failed to find default camera" << endl;
return;
}
time(&start);
for (int i = 0; i < num_frames; i++) {
cap >> frame;
}
time(&end);
double seconds = difftime(end, start);
if (seconds == 0) {
fpsfail = true;
cout << "Error with camera. Failed to calculate fps" << endl;
return;
}
double fps = num_frames / seconds;
cout << fps << endl;
if (GetSaveFileName(&sfn) != true)
{
//do nothing
}
else {
for (int i = 0, int j = 0; i <= strlen(sfn.lpstrFile); i++, j++) {
if (sfn.lpstrFile[i] == '\\') {
sfn.lpstrFile[i] = '/';
}
}
remove(sfn.lpstrFile);
//cap.get(CV_CAP_PROP_FPS) is used for input videos not webcam.
VideoWriter output_cap(sfn.lpstrFile, -1, fps, Size(cap.get(CV_CAP_PROP_FRAME_WIDTH), cap.get(CV_CAP_PROP_FRAME_HEIGHT)));
if (!output_cap.isOpened())
{
failedOutput = true;
return;
}
namedWindow("Laplacian", 1);
namedWindow("Original", 1);
HWND LAPhwnd = (HWND)cvGetWindowHandle("Laplacian");
HWND ORIhwnd = (HWND)cvGetWindowHandle("Original");
cudaArray *dev_data;
uchar *dev_lap;
dim3 gridsize, blocksize;
src.addressMode[0] = cudaAddressModeClamp;
src.addressMode[1] = cudaAddressModeClamp;
if (IsWindowVisible(LAPhwnd)) {
cap >> frame;
lap_frame = frame.clone();
blocksize.x = 32;
blocksize.y = 32;
gridsize.x = ceil(float(3 * frame.cols) / blocksize.x);
gridsize.y = ceil(float(frame.rows) / blocksize.y);
cudaMallocArray(&dev_data, &src.channelDesc, 3 * frame.cols, frame.rows);
cudaMalloc((void**)&dev_lap, 3 * frame.rows * frame.cols * sizeof(uchar));
}
int size = 3 * frame.cols * frame.rows * sizeof(uchar);
while (IsWindowVisible(LAPhwnd)) {
if (IsWindowVisible(ORIhwnd)) {
imshow("Original", frame);
}
cudaMemcpyToArray(dev_data, 0, 0, frame.data, size, cudaMemcpyHostToDevice);
cudaBindTextureToArray(src, dev_data, src.channelDesc);
laplacian_texture << <gridsize, blocksize >> >(dev_lap, frame.rows, 3 * frame.cols);
cudaMemcpy(lap_frame.data, dev_lap, size, cudaMemcpyDeviceToHost);
imshow("Laplacian", lap_frame);
output_cap.write(lap_frame);
waitKey(1000 / fps);
cap >> frame; // get a new frame from camera
}
if (IsWindowVisible(ORIhwnd)) {
destroyAllWindows();
}
cudaUnbindTexture(src);
cudaFree(dev_data);
cudaFree(dev_lap);
}
cap.release();
}
void image_texture() {
//Read the filename that the user wishes to enter and keep asking for user input until a file can be opened or the user quits
OPENFILENAME ofn; // common dialog box structure
char szFile[520]; // buffer for file name
HWND hwnd = NULL; // owner window
HANDLE hf; // file handle
// Initialize OPENFILENAME
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hwnd;
ofn.lpstrFile = szFile;
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
ofn.lpstrFilter = "*.jpg, *.png, *.bmp, *.dib, *.jpeg, *.jpe, *.jfif, *.tif, *.tiff\0*.jpg;*.png;*.bmp;*.dib;*.jpeg;*.jpe;*.jfif;*.tif;*.tiff\0\0*\0\0\0\0\0\0\0\0\0";
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = ".";
ofn.Flags = OFN_PATHMUSTEXIST | OFN_FILEMUSTEXIST;
// Display the Open dialog box.
if (GetOpenFileName(&ofn) == TRUE)
hf = CreateFile(ofn.lpstrFile,
GENERIC_READ,
0,
(LPSECURITY_ATTRIBUTES)NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
(HANDLE)NULL);
if (strlen(ofn.lpstrFile) == 0) {
return;
}
for (int i = 0, int j = 0; i <= strlen(ofn.lpstrFile); i++, j++) {
if (ofn.lpstrFile[i] == '\\') {
ofn.lpstrFile[i] = '/';
}
}
CloseHandle(hf);
Mat image = imread(ofn.lpstrFile, 1);
namedWindow("INPUT", CV_WINDOW_KEEPRATIO);
imshow("INPUT", image);
uchar *dev_lap;
cudaMalloc((void**)&dev_lap, 3 * image.rows * image.cols * sizeof(uchar));
cudaArray *dev_data;
cudaMallocArray(&dev_data, &src.channelDesc, 3 * image.cols, image.rows);
cudaMemcpyToArray(dev_data, 0, 0, image.data, 3 * image.cols * image.rows * sizeof(uchar), cudaMemcpyHostToDevice);
cudaBindTextureToArray(src, dev_data, src.channelDesc);
dim3 gridsize, blocksize;
blocksize.x = 32;
blocksize.y = 32;
gridsize.x = ceil(float(3 * image.cols) / blocksize.x);
gridsize.y = ceil(float(image.rows) / blocksize.y);
laplacian_texture <<<gridsize, blocksize>>>(dev_lap, image.rows, 3 * image.cols);
cudaMemcpy(image.data, dev_lap, 3 * image.rows * image.cols * sizeof(uchar), cudaMemcpyDeviceToHost);
cudaUnbindTexture(src);
cudaFree(dev_data);
cudaFree(dev_lap);
namedWindow("OUTPUT", CV_WINDOW_KEEPRATIO);
imshow("OUTPUT", image);
if (saveimage) {
OPENFILENAME sfn;
char syFile[520];
ZeroMemory(&sfn, sizeof(sfn));
sfn.lStructSize = sizeof(sfn);
sfn.hwndOwner = NULL;
sfn.lpstrFile = syFile;
sfn.lpstrFile[0] = '\0';
sfn.nMaxFile = sizeof(syFile);
sfn.lpstrFilter = "*.jpg, *.png, *.bmp, *.dib, *.jpeg, *.jpe, *.jfif, *.tif, *.tiff\0*.jpg;*.png;*.bmp;*.dib;*.jpeg;*.jpe;*.jfif;*.tif;*.tiff\0\0*\0\0\0\0\0\0\0\0\0";
sfn.nFilterIndex = 1;
sfn.lpstrFileTitle = NULL;
sfn.nMaxFileTitle = 0;
sfn.lpstrInitialDir = ".";
sfn.Flags = OFN_PATHMUSTEXIST | OFN_OVERWRITEPROMPT | OFN_EXPLORER | OFN_ENABLEHOOK;
sfn.lpstrDefExt = "jpg";
if (GetSaveFileName(&sfn) != true)
{
//do nothing
}
else {
for (int i = 0, int j = 0; i <= strlen(sfn.lpstrFile); i++, j++) {
if (sfn.lpstrFile[i] == '\\') {
sfn.lpstrFile[i] = '/';
}
}
imwrite(sfn.lpstrFile, image);
}
}
waitKey(0);
return;
}
void image_simple() {
//Read the filename that the user wishes to enter and keep asking for user input until a file can be opened or the user quits
OPENFILENAME ofn; // common dialog box structure
char szFile[520]; // buffer for file name
HWND hwnd = NULL; // owner window
HANDLE hf; // file handle
// Initialize OPENFILENAME
ZeroMemory(&ofn, sizeof(ofn));
ofn.lStructSize = sizeof(ofn);
ofn.hwndOwner = hwnd;
ofn.lpstrFile = szFile;
// Set lpstrFile[0] to '\0' so that GetOpenFileName does not
// use the contents of szFile to initialize itself.
ofn.lpstrFile[0] = '\0';
ofn.nMaxFile = sizeof(szFile);
ofn.lpstrFilter = "Supported Image Files\0*.jpg;*.png;*.bmp;*.dib;*.jpeg;*.jpe;*.jfif;*.tif;*.tiff\0ALL FILES\0*\0\0\0\0\0\0\0\0\0";
ofn.nFilterIndex = 1;
ofn.lpstrFileTitle = NULL;
ofn.nMaxFileTitle = 0;
ofn.lpstrInitialDir = NULL;
ofn.Flags = OFN_PATHMUSTEXIST | OFN_FILEMUSTEXIST;
// Display the Open dialog box.
if (GetOpenFileName(&ofn) == TRUE)
hf = CreateFile(ofn.lpstrFile,
GENERIC_READ,
0,
(LPSECURITY_ATTRIBUTES)NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
(HANDLE)NULL);
if (strlen(ofn.lpstrFile) == 0) {
return;
}
for (int i = 0, int j = 0; i <= strlen(ofn.lpstrFile); i++, j++) {
if (ofn.lpstrFile[i] == '\\') {
ofn.lpstrFile[i] = '/';
}
}
CloseHandle(hf);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
Mat image = imread(ofn.lpstrFile, 1);
namedWindow("INPUT", CV_WINDOW_KEEPRATIO);
imshow("INPUT", image);
/*Split the image into the 3 image channels Blue, Green, and Red respectively. This makes 3 arrays that contain the intensity values
of each image channel. These arrays are then allocated and passed to the GPU. LapB contains the intensity values after the algorithm
for computing the laplacian completes.*/
uchar *dev_data;
uchar *dev_lap;
cudaMalloc((void**)&dev_data, image.rows * image.cols * 3 * sizeof(uchar));
cudaMalloc((void**)&dev_lap, image.rows * image.cols * 3 * sizeof(uchar));
cudaMemcpy(dev_data, image.data, image.rows * image.cols * 3 * sizeof(uchar), cudaMemcpyHostToDevice);
/*Call the CUDA kernel with a grid size of 512 that each one will be run on a Streaming Multiprocessor with each
Multiprocessor running 1024 threads*/
laplacian_simple << <512, 1024 >> >(dev_data, dev_lap, 3 * image.rows * image.cols, 3 * image.cols);
//Transfer the lapB array from the device to the host
cudaMemcpy(image.data, dev_lap, image.rows * image.cols * 3 * sizeof(uchar), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << milliseconds << endl;
cudaFree(dev_data);
cudaFree(dev_lap);
//Merge the 3 seperate channel arrays into one array for the output image. Display the input and output image.
namedWindow("OUTPUT", CV_WINDOW_KEEPRATIO);
imshow("OUTPUT", image);
OPENFILENAME sfn;
char syFile[520];
ZeroMemory(&sfn, sizeof(sfn));
sfn.lStructSize = sizeof(sfn);
sfn.hwndOwner = NULL;
sfn.lpstrFile = syFile;
sfn.lpstrFile[0] = '\0';
sfn.nMaxFile = sizeof(syFile);
sfn.lpstrFilter = "*.jpg, *.png, *.bmp, *.dib, *.jpeg, *.jpe, *.jfif, *.tif, *.tiff\0*.jpg;*.png;*.bmp;*.dib;*.jpeg;*.jpe;*.jfif;*.tif;*.tiff\0\0*\0\0\0\0\0\0\0\0\0";
sfn.nFilterIndex = 1;
sfn.lpstrFileTitle = NULL;
sfn.nMaxFileTitle = 0;
sfn.lpstrInitialDir = ".";
sfn.Flags = OFN_PATHMUSTEXIST | OFN_OVERWRITEPROMPT | OFN_EXPLORER | OFN_ENABLEHOOK;
sfn.lpstrDefExt = "jpg";
if (GetSaveFileName(&sfn) != true)
{
cout << "Saving file canceled, closing program in 10 secconds." << endl;
}
else {
for (int i = 0, int j = 0; i <= strlen(sfn.lpstrFile); i++, j++) {
if (sfn.lpstrFile[i] == '\\') {
sfn.lpstrFile[i] = '/';
}
}
imwrite(sfn.lpstrFile, image);
}
waitKey(0);
return;
}
/* This is where all the input to the window goes to */
HWND button1;
HWND check1;
HWND button2;
HWND check2;
HWND button3;
HWND check3;
char input[520];
HWND edit;
HWND text;
LRESULT CALLBACK WndProc(HWND hwnd, UINT Message, WPARAM wParam, LPARAM lParam) {
switch (Message) {
case WM_CREATE: {
text = CreateWindow(TEXT("STATIC"), TEXT("Laplacian Morphological Operation"),
WS_VISIBLE | WS_CHILD,
190, 10,
400, 25,
hwnd, (HMENU) NULL, NULL, NULL);
HFONT text_change = CreateFont(20, 0, 0, 0, FW_DONTCARE, FALSE, FALSE, FALSE, ANSI_CHARSET, OUT_DEFAULT_PRECIS, CLIP_DEFAULT_PRECIS, DEFAULT_QUALITY, DEFAULT_PITCH | FF_SWISS, "Times New Roman");
SendMessage(text, WM_SETFONT, WPARAM(text_change), TRUE);
//GetWindowText(edit, input, 260);
text = CreateWindow(TEXT("STATIC"), TEXT("Live input needs a few seconds to calculate the camera's FPS. Please wait after selecting."),
WS_VISIBLE | WS_CHILD,
190, 255,
400, 50,
hwnd, (HMENU)NULL, NULL, NULL);
button1 = CreateWindow(TEXT("BUTTON"), TEXT("Image Input"),
WS_VISIBLE | WS_CHILD,
10, 50,
150, 50,
hwnd, (HMENU) 1, NULL, NULL);
text_change = CreateFont(30, 10, 0, 0, FW_DONTCARE, FALSE, FALSE, FALSE, ANSI_CHARSET, OUT_DEFAULT_PRECIS, CLIP_DEFAULT_PRECIS, DEFAULT_QUALITY, DEFAULT_PITCH | FF_SWISS, "Arial");
SendMessage(button1, WM_SETFONT, WPARAM(text_change), TRUE);
check1 = CreateWindow(TEXT("button"), TEXT("Save Image"),
WS_VISIBLE | WS_CHILD | BS_CHECKBOX,
20, 100,
100, 20,
hwnd, (HMENU)2, ((LPCREATESTRUCT)lParam)->hInstance, NULL);
CheckDlgButton(hwnd, 2, BST_CHECKED);
button2 = CreateWindow(TEXT("BUTTON"), TEXT("Video Input"),
WS_VISIBLE | WS_CHILD,
10, 150,
150, 50,
hwnd, (HMENU)3, NULL, NULL);
check2 = CreateWindow(TEXT("button"), TEXT("Save Video"),
WS_VISIBLE | WS_CHILD | BS_CHECKBOX,
20, 200,
95, 20,
hwnd, (HMENU)4, ((LPCREATESTRUCT)lParam)->hInstance, NULL);
CheckDlgButton(hwnd, 4, BST_UNCHECKED);
text_change = CreateFont(30, 10, 0, 0, FW_DONTCARE, FALSE, FALSE, FALSE, ANSI_CHARSET, OUT_DEFAULT_PRECIS, CLIP_DEFAULT_PRECIS, DEFAULT_QUALITY, DEFAULT_PITCH | FF_SWISS, "Arial");
SendMessage(button2, WM_SETFONT, WPARAM(text_change), TRUE);
button3 = CreateWindow(TEXT("BUTTON"), TEXT("Live Input"),
WS_VISIBLE | WS_CHILD,
10, 250,
150, 50,
hwnd, (HMENU)5, NULL, NULL);
check3 = CreateWindow(TEXT("button"), TEXT("Record Video"),
WS_VISIBLE | WS_CHILD | BS_CHECKBOX,
20, 300,
105, 20,
hwnd, (HMENU)6, ((LPCREATESTRUCT)lParam)->hInstance, NULL);
CheckDlgButton(hwnd, 6, BST_UNCHECKED);
text_change = CreateFont(30, 10, 0, 0, FW_DONTCARE, FALSE, FALSE, FALSE, ANSI_CHARSET, OUT_DEFAULT_PRECIS, CLIP_DEFAULT_PRECIS, DEFAULT_QUALITY, DEFAULT_PITCH | FF_SWISS, "Arial");
SendMessage(button3, WM_SETFONT, WPARAM(text_change), TRUE);
break;
}
case WM_COMMAND: {
if (LOWORD(wParam) == 1) {
/*GetWindowText(edit, input, 260);
MessageBox(hwnd, input, "title for popup", MB_ICONINFORMATION);*/
if (IsDlgButtonChecked(hwnd, 2)) {
saveimage = true;
}
else {
saveimage = false;
}
EnableWindow(button1, false);
EnableWindow(check1, false);
EnableWindow(button2, false);
EnableWindow(check2, false);
EnableWindow(button3, false);
EnableWindow(check3, false);
image_texture();
EnableWindow(button1, true);
EnableWindow(check1, true);
EnableWindow(button2, true);
EnableWindow(check2, true);
EnableWindow(button3, true);
EnableWindow(check3, true);
}
if (LOWORD(wParam) == 2) {
BOOL checked = IsDlgButtonChecked(hwnd, 2);
if (checked) {
CheckDlgButton(hwnd, 2, BST_UNCHECKED);
}
else {
CheckDlgButton(hwnd, 2, BST_CHECKED);
}
}
if (LOWORD(wParam) == 3) {
/*GetWindowText(edit, input, 260);
MessageBox(hwnd, input, "title for popup", MB_ICONINFORMATION);*/
if (IsDlgButtonChecked(hwnd, 4)) {
savevideo = true;
}
else {
savevideo = false;
}
EnableWindow(button1, false);
EnableWindow(check1, false);
EnableWindow(button2, false);
EnableWindow(check2, false);
EnableWindow(button3, false);
EnableWindow(check3, false);
if (savevideo) {
videoSave();
}
else {
videoNoSave();
}
if (failedOutput) {
MessageBox(hwnd, "Output video could not be opened use different compression option", "Error", MB_ICONINFORMATION);
}
failedOutput = false;
EnableWindow(button1, true);
EnableWindow(check1, true);
EnableWindow(button2, true);
EnableWindow(check2, true);
EnableWindow(button3, true);
EnableWindow(check3, true);
}
if (LOWORD(wParam) == 4) {
BOOL checked = IsDlgButtonChecked(hwnd, 4);
if (checked) {
CheckDlgButton(hwnd, 4, BST_UNCHECKED);
}
else {
CheckDlgButton(hwnd, 4, BST_CHECKED);
}
}
if (LOWORD(wParam) == 5) {
/*GetWindowText(edit, input, 260);
MessageBox(hwnd, input, "title for popup", MB_ICONINFORMATION);*/
if (IsDlgButtonChecked(hwnd, 6)) {
record = true;
}
else {
record = false;
}
EnableWindow(button1, false);
EnableWindow(check1, false);
EnableWindow(button2, false);
EnableWindow(check2, false);
EnableWindow(button3, false);
EnableWindow(check3, false);
if (record) {
camera_feed_save();
}
else {
camera_feed_nosave();
}
if (failedOutput) {
MessageBox(hwnd, "Output video could not be opened use different compression option", "Error", MB_ICONINFORMATION);
}
if (nocam) {
MessageBox(hwnd, "Failed to find default camera", "Error", MB_ICONINFORMATION);
}
if (fpsfail) {
MessageBox(hwnd, "Error with camera. Failed to calculate fps", "Error", MB_ICONINFORMATION);
}
failedOutput = false;
nocam = false;
fpsfail = false;
EnableWindow(button1, true);
EnableWindow(check1, true);
EnableWindow(button2, true);
EnableWindow(check2, true);
EnableWindow(button3, true);
EnableWindow(check3, true);
}
if (LOWORD(wParam) == 6) {
BOOL checked = IsDlgButtonChecked(hwnd, 6);
if (checked) {
CheckDlgButton(hwnd, 6, BST_UNCHECKED);
}
else {
CheckDlgButton(hwnd, 6, BST_CHECKED);
}
}
break;
}
/* Upon destruction, tell the main thread to stop */
case WM_DESTROY: {
PostQuitMessage(0);
break;
}
/* All other messages (a lot of them) are processed using default procedures */
default:
return DefWindowProc(hwnd, Message, wParam, lParam);
}
return 0;
}
/* The 'main' function of Win32 GUI programs: this is where execution starts */
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow) {
WNDCLASSEX wc; /* A properties struct of our window */
HWND hwnd; /* A 'HANDLE', hence the H, or a pointer to our window */
MSG msg; /* A temporary location for all messages */
/* zero out the struct and set the stuff we want to modify */
memset(&wc, 0, sizeof(wc));
wc.cbSize = sizeof(WNDCLASSEX);
wc.lpfnWndProc = WndProc; /* This is where we will send messages to */
wc.hInstance = hInstance;
wc.hCursor = LoadCursor(NULL, IDC_ARROW);
/* White, COLOR_WINDOW is just a #define for a system color, try Ctrl+Clicking it */
//wc.hbrBackground = (HBRUSH)(COLOR_WINDOW + 1);
wc.hbrBackground = GetSysColorBrush(COLOR_3DFACE);
wc.lpszClassName = "WindowClass";
wc.hIcon = (HICON)LoadImage( // returns a HANDLE so we have to cast to HICON
NULL, // hInstance must be NULL when loading from a file
"lapIcon.ico", // the icon file name
IMAGE_ICON, // specifies that the file is an icon
0, // width of the image (we'll specify default later on)
0, // height of the image
LR_LOADFROMFILE | // we want to load a file (as opposed to a resource)
LR_DEFAULTSIZE | // default metrics based on the type (IMAGE_ICON, 32x32)
LR_SHARED // let the system release the handle when it's no longer used
);
wc.hIconSm = LoadIcon(NULL, NULL); /* use the name "A" to use the project icon */
if (!RegisterClassEx(&wc)) {
MessageBox(NULL, "Window Registration Failed!", "Error!", MB_ICONEXCLAMATION | MB_OK);
return 0;
}
hwnd = CreateWindowEx(WS_EX_CLIENTEDGE, "WindowClass", "CUDA Laplacian", WS_VISIBLE | WS_SYSMENU,
CW_USEDEFAULT, /* x */
CW_USEDEFAULT, /* y */
640, /* width */
480, /* height */
NULL, NULL, hInstance, NULL);
if (hwnd == NULL) {
MessageBox(NULL, "Window Creation Failed!", "Error!", MB_ICONEXCLAMATION | MB_OK);
return 0;
}
/*
This is the heart of our program where all input is processed and
sent to WndProc. Note that GetMessage blocks code flow until it receives something, so
this loop will not produce unreasonably high CPU usage
*/
while (GetMessage(&msg, NULL, 0, 0) > 0) { /* If no error is received... */
TranslateMessage(&msg); /* Translate key codes to chars if present */
DispatchMessage(&msg); /* Send it to WndProc */
}
return msg.wParam;
} |
c88fe7e4935122ac6dfbd66b4cd8266a09dbd6ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include <vector>
#include "modules/perception/inference/tensorrt/plugins/slice_plugin.h"
namespace apollo {
namespace perception {
namespace inference {
typedef int8_t int8;
template <typename Dtype>
__global__ void Slice(const int nthreads, const Dtype *in_data,
const int num_slices, const int slice_size,
const int bottom_slice_axis, const int top_slice_axis,
const int offset_slice_axis, Dtype *out_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index =
slice_index +
(slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
out_data[index] = in_data[bottom_index];
}
}
int SLICEPlugin::enqueue(int batchSize, const void *const *inputs,
void **outputs, void *workspace, hipStream_t stream) {
int slice_size = 1;
for (size_t index = axis_ + 1; index < input_dims_.nbDims; index++) {
slice_size *= input_dims_.d[index];
}
int num_slices = batchSize;
for (size_t index = 0; index < axis_; index++) {
num_slices *= input_dims_.d[index];
}
int offset_slice_axis = 0;
for (int i = 0; i < out_slice_dims_.size(); i++) {
const int top_slice_axis = out_slice_dims_[i];
const int top_slice_size = top_slice_axis * slice_size;
const int nthreads = top_slice_size * num_slices;
const int block_num = (nthreads + 511) / 512;
Slice // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(block_num), dim3(512), 0, stream,
nthreads, (const float *)(inputs[0]), num_slices, slice_size,
input_dims_.d[axis_], top_slice_axis, offset_slice_axis,
reinterpret_cast<float *>(outputs[i]));
offset_slice_axis += top_slice_axis;
}
return 1;
}
} // namespace inference
} // namespace perception
} // namespace apollo
| c88fe7e4935122ac6dfbd66b4cd8266a09dbd6ba.cu | /******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include <vector>
#include "modules/perception/inference/tensorrt/plugins/slice_plugin.h"
namespace apollo {
namespace perception {
namespace inference {
typedef int8_t int8;
template <typename Dtype>
__global__ void Slice(const int nthreads, const Dtype *in_data,
const int num_slices, const int slice_size,
const int bottom_slice_axis, const int top_slice_axis,
const int offset_slice_axis, Dtype *out_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index =
slice_index +
(slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
out_data[index] = in_data[bottom_index];
}
}
int SLICEPlugin::enqueue(int batchSize, const void *const *inputs,
void **outputs, void *workspace, cudaStream_t stream) {
int slice_size = 1;
for (size_t index = axis_ + 1; index < input_dims_.nbDims; index++) {
slice_size *= input_dims_.d[index];
}
int num_slices = batchSize;
for (size_t index = 0; index < axis_; index++) {
num_slices *= input_dims_.d[index];
}
int offset_slice_axis = 0;
for (int i = 0; i < out_slice_dims_.size(); i++) {
const int top_slice_axis = out_slice_dims_[i];
const int top_slice_size = top_slice_axis * slice_size;
const int nthreads = top_slice_size * num_slices;
const int block_num = (nthreads + 511) / 512;
Slice // NOLINT_NEXT_LINE(whitespace/operators)
<<<block_num, 512, 0, stream>>>(
nthreads, (const float *)(inputs[0]), num_slices, slice_size,
input_dims_.d[axis_], top_slice_axis, offset_slice_axis,
reinterpret_cast<float *>(outputs[i]));
offset_slice_axis += top_slice_axis;
}
return 1;
}
} // namespace inference
} // namespace perception
} // namespace apollo
|
1f669a58714e456ca1ebfdd4d0263e2a9e6df6d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define GPU_KERNEL
#ifdef MULTIPLE_FILE
#include <bench_gtc.h>
#include <cutil.h>
//#include <papi.h>
extern __device__ __constant__ real temp[MAX_MPSI] __align__ (16);extern __device__ __constant__ real dtemp[MAX_MPSI] __align__ (16);
extern __device__ __constant__ real rtemi[MAX_MPSI] __align__ (16);
//extern __device__ __constant__ real pfluxpsi[MFLUX] __align__ (16);
extern __device__ gtc_global_params_t params __align__ (16);
extern __device__ __constant__ real qtinv[MAX_MPSI] __align__ (16);
extern __device__ __constant__ real delt[MAX_MPSI] __align__ (16);
extern __device__ __constant__ int igrid[MAX_MPSI] __align__ (16);
extern __device__ __constant__ int mtheta[MAX_MPSI] __align__ (16);
extern __device__ gtc_radial_decomp_t radial_decomp __align__ (16);
//extern __device__ __constant__ int igrid_in __align__ (16);
//extern __device__ __constant__ int ipsi_in __align__ (16);
//extern __device__ __constant__ int ipsi_out __align__ (16);
#if SINGLE_PRECISION
extern texture<float, 1, hipReadModeElementType> evectorTexRef;
#else
extern texture<int2, 1, hipReadModeElementType> evectorTexRef;
#endif
#endif
#if OPTIMIZE_ACCESS
#if USE_TEXTURE
static __inline__ __device__ real fetch_evector(int i)
{
int2 e = tex1Dfetch(evectorTexRef,i);
return __hiloint2double(e.y,e.x);
}
#define EVECTOR(i) (fetch_evector(i))
#else
#define EVECTOR(i) (evector[i])
#endif //USE_TEXTURE
#else
#define EVECTOR(i) (evector[i])
#endif // OPTIMIZE_ACCESS
__global__ static void
__launch_bounds__(THREAD_BLOCK/*maxThreadsPerBlock*/, 1/*minBlocksPerMultiprocessor*/)
gpu_push_gyro_interpolation(gtc_particle_data_t *zion, gtc_aux_particle_point_t *point, gtc_field_data_t *grid, gtc_diagnosis_data_t *diagnosis, int irk, int istep, int idiag)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int nblocks = gridDim.x;
const int nthreads = blockDim.x;
const int gid = tid + bid*nthreads;
const int np = nblocks * nthreads;
const int mflux = MFLUX;
extern __shared__ real shared_buffer_gyro[];
real *vdrtmp = shared_buffer_gyro;
// data for diagnosis
real *scalar_data_s = &vdrtmp[5];
// real *eflux_s = &scalar_data_s[6*nthreads];
// real *rmarker_s = &eflux_s[mflux*nthreads];
real *flux_s = &scalar_data_s[7*nthreads];
if (idiag==0){
for (int i=tid; i<7*nthreads; i+=nthreads){
scalar_data_s[i] = 0.0;
}
for (int i=tid; i<4*mflux*nthreads; i+=nthreads){
// eflux_s[i] = 0.0;
// rmarker_s[i] = 0.0;
flux_s[i] = 0.0;
}
__syncthreads();
}
int mi = params.mi; int mimax=params.mimax;
int mpsi = params.mpsi;
const real a = params.a;
const real a0 = params.a0;
const real a1 = params.a1;
const real delr = params.delr;
const real pi2 = 2.0*params.pi;
const real pi2_inv = params.pi2_inv;
const int nbound = params.nbound;
const real gyroradius = params.gyroradius;
const real qion = params.qion;
const real aion = params.aion;
const real tstep =params.tstep;
const real nonlinear = params.nonlinear;
const real paranl = params.paranl;
const real* __restrict__ point_vect = point->point_vect;
const int* __restrict__ point_index = point->point_index;
real* __restrict__ scalar_data = diagnosis->scalar_data;
real* __restrict__ flux_data = diagnosis->flux_data;
/*
real* __restrict__ eflux = diagnosis->eflux;
real* __restrict__ rmarker = diagnosis->rmarker;
real* __restrict__ dmark = diagnosis->dmark;
real* __restrict__ dden = diagnosis->dden;
*/
#if !ASSUME_MZETA_EQUALS1
const int mzeta = params.mzeta;
#endif
#if !USE_TEXTURE
const real * __restrict__ evector = grid->evector;
#endif
const real * __restrict__ pfluxpsi = grid->pfluxpsi;
real * __restrict__ zion1 = zion->z0;
real * __restrict__ zion2 = zion->z1;
real * __restrict__ zion3 = zion->z2;
real * __restrict__ zion4 = zion->z3;
real * __restrict__ zion5 = zion->z4;
real * __restrict__ zion6 = zion->z5;
real * __restrict__ zion01 = zion->z00;
real * __restrict__ zion02 = zion->z01;
real * __restrict__ zion03 = zion->z02;
real * __restrict__ zion04 = zion->z03;
real * __restrict__ zion05 = zion->z04;
real * __restrict__ zion06 = zion->z05;
real zion1m, zion2m, zion3m, zion4m, zion5m, zion6m;
real wpi1, wpi2, wpi3;
real dtime;
real sbound=1.0;
if (nbound==0) sbound = 0.0;
real psimax=0.5*a1*a1;
real psimin=0.5*a0*a0;
real cmratio=qion/aion;
real cinv=1.0/qion;
real vthi=gyroradius*fabs(qion)/aion;
real d_inv=real(mflux)/(a1-a0);
real zion01m, zion02m, zion03m, zion04m, zion05m;
for (int m=gid; m<mi; m+=np){
if(irk==1) {
dtime=0.5*tstep;
if(tid<mflux)
vdrtmp[tid] = 0.0;
//if(istep ==1) {
#if PTX_STREAM_INTRINSICS
TunedLoad<real,CS>::Ld(zion1m,zion1+m,0);
TunedLoad<real,CS>::Ld(zion2m,zion2+m,0);
TunedLoad<real,CS>::Ld(zion3m,zion3+m,0);
TunedLoad<real,CS>::Ld(zion4m,zion4+m,0);
TunedLoad<real,CS>::Ld(zion5m,zion5+m,0);
TunedStore<real,CS>::St(zion1m,zion01+m,0);
TunedStore<real,CS>::St(zion2m,zion02+m,0);
TunedStore<real,CS>::St(zion3m,zion03+m,0);
TunedStore<real,CS>::St(zion4m,zion04+m,0);
TunedStore<real,CS>::St(zion5m,zion05+m,0);
#else
zion01[m] = zion1[m];
zion02[m] = zion2[m];
zion03[m] = zion3[m];
zion04[m] = zion4[m];
zion05[m] = zion5[m];
#endif
//}
} else {
dtime=tstep;
if(nonlinear<0.5) {
printf("Error! decoupling modes for "
"nonlinear = 0.0 not implemented\n");
if(tid<mflux)
vdrtmp[tid] = 0.0;
} else {
if(tid<mflux)
vdrtmp[tid] = pfluxpsi[tid];
}
}
__syncthreads();
#if PTX_STREAM_INTRINSICS
//if((istep != 1)||(irk!=1)) {
if (irk!=1){
TunedLoad<real,CS>::Ld(zion1m,zion1+m,0);
TunedLoad<real,CS>::Ld(zion2m,zion2+m,0);
TunedLoad<real,CS>::Ld(zion3m,zion3+m,0);
TunedLoad<real,CS>::Ld(zion4m,zion4+m,0);
TunedLoad<real,CS>::Ld(zion5m,zion5+m,0);
}
TunedLoad<real,CS>::Ld(zion6m,zion6+m,0);
#else
zion1m = zion1[m];
zion2m = zion2[m];
zion3m = zion3[m];
zion4m = zion4[m];
zion5m = zion5[m];
zion6m = zion6[m];
#endif
wpi1 = wpi2 = wpi3 = 0.0;
int index;
for (int larmor=0; larmor<4; larmor++){
index = point_index[larmor*mi+m];
#ifdef _DEBUG_GPU
if (index>=4*EXTRA_BUFFER*mimax){
printf("index>EXTRA_BUFFER in push\n");
printf("index=%d mimax=%d 4*EXTRA_BUFFER*mimax=%d\n", index, mimax, 4*EXTRA_BUFFER*mimax);
CudaAssert(index<(4*EXTRA_BUFFER*mimax));
}
#endif
// if (index!=-1){
wpi1 += point_vect[4*index];
wpi2 += point_vect[4*index+1];
wpi3 += point_vect[4*index+2];
// }
}
wpi1 = 0.25*wpi1; wpi2 = 0.25*wpi2; wpi3 = 0.25*wpi3;
if(irk ==1){
zion01m = zion1m;
zion02m = zion2m;
zion03m = zion3m;
zion04m = zion4m;
zion05m = zion5m;
} else {
#if PTX_STREAM_INTRINSICS
TunedLoad<real,CS>::Ld(zion01m,zion01+m,0);
TunedLoad<real,CS>::Ld(zion02m,zion02+m,0);
TunedLoad<real,CS>::Ld(zion03m,zion03+m,0);
TunedLoad<real,CS>::Ld(zion04m,zion04+m,0);
TunedLoad<real,CS>::Ld(zion05m,zion05+m,0);
#else
zion01m = zion01[m];
zion02m = zion02[m];
zion03m = zion03[m];
zion04m = zion04[m];
zion05m = zion05[m];
#endif
}
// primary ion marker temperature and parallel flow velocity
real ainv=1.0/a;
/* update GC position */
//#if !ONTHEFLY_PUSHAUX
#if SQRT_PRECOMPUTED
real r = zion1m;
#else
real r=sqrt(2.0*zion1m);
#endif
//#endif
real rinv=1.0/r;
const real q0 = params.q0;
const real q1 = params.q1;
const real q2 = params.q2;
const real rw = params.rw;
const real rc = params.rc;
int ii=d_abs_min_int(mpsi-1,int((r-a0)*delr));
int ip=d_abs_min_int(mflux-1,1+int((r-a0)*d_inv));
real wp0=real(ii+1)-(r-a0)*delr;
real wp1=1.0-wp0;
real tem=wp0*temp[ii]+wp1*temp[ii+1];
real q=q0+q1*r*ainv+q2*r*r*ainv*ainv;
real qinv=1.0/q;
real cost=cos(zion2m);
real sint=sin(zion2m);
real b=1.0/(1.0+r*cost);
real g=1.0;
real gp=0.0;
real ri=0.0;
real rip=0.0;
real dbdp=-1.0*b*b*cost*rinv;
real dbdt=b*b*r*sint;
real dedb=cinv*(zion4m*zion4m*qion*b*cmratio+zion6m*zion6m);
real deni=1.0/(g*q + ri + zion4m*(g*rip-ri*gp));
real upara=zion4m*b*cmratio;
real energy=0.5*aion*upara*upara+zion6m*zion6m*b;
real rfac=rw*(r-rc);
#if PROFILE_SHAPE==0
rfac=rfac*rfac;
rfac=rfac*rfac*rfac;
rfac=exp(-1*rfac);
#elif PROFILE_SHAPE==1
rfac=tanh(rfac)*tanh(rfac);
rfac=1.0-rfac;
#endif
real kappa=1.0-sbound+sbound*rfac;
const real kappati = params.kappati;
const real kappan = params.kappan;
kappa=((energy*tem-1.5)*kappati+kappan)*kappa*rinv;
// perturbed quantities
real dptdp=wpi1;
real dptdt=wpi2;
real dptdz=wpi3-wpi2*qinv;
real epara=-1.0*wpi3*b*q*deni;
// subtract net particle flow
dptdt=dptdt+vdrtmp[ip];
// ExB drift in radial direction for w-dot and flux diagnostics
real vdr=q*(ri*dptdz-g*dptdt)*deni;
real wdrive=vdr*kappa;
real wpara=epara*(upara-dtemp[ii])*qion*tem;
// Common subexpression elimination
real wdrift=q*(g*dbdt*dptdp-g*dbdp*dptdt+ri*dbdp*dptdz)*deni*dedb*qion*tem;
real wdot=(zion06[m]-paranl*zion5m)*(wdrive+wpara+wdrift);
// self-consistent and external electric field for marker orbits
const real flow0 = params.flow0;
const real flow1 = params.flow1;
const real flow2 = params.flow2;
dptdp=dptdp*nonlinear+gyroradius*(flow0+flow1*r*ainv+flow2*r*r*ainv*ainv);
dptdt=dptdt*nonlinear;
dptdz=dptdz*nonlinear;
// particle velocity
real pdot = q*(-g*dedb*dbdt - g*dptdt + ri*dptdz)*deni;
real tdot = (upara*b*(1.0-q*gp*zion4m) + q*g*(dedb*dbdp + dptdp))*deni;
real zdot = (upara*b*q*(1.0+rip*zion4m) - q*ri*(dedb*dbdp + dptdp))*deni;
real rdot = ((gp*zion4m-1.0)*(dedb*dbdt + paranl*dptdt)-paranl*q*(1.0+rip*zion4m)*dptdz)*deni;
// update particle position
#if PTX_STREAM_INTRINSICS
#if SQRT_PRECOMPUTED
zion1m = max(1.0e-8*psimax,0.5*zion01m*zion01m + dtime*pdot);
zion1m = sqrt(2.0*zion1m);
#else
zion1m = max(1.0e-8*psimax,zion01m+dtime*pdot);
#endif
TunedStore<real,CS>::St(zion1m,zion1+m,0);
zion2m = zion02m+dtime*tdot;
zion3m = zion03m+dtime*zdot;
zion4m = zion04m + dtime*rdot;
TunedStore<real,CS>::St(zion4m,zion4+m,0);
zion5m = zion05m + dtime*wdot;
TunedStore<real,CS>::St(zion5m,zion5+m,0);
real z1t = zion2m *pi2_inv+10;
zion2m = pi2*(z1t-((int)z1t));
TunedStore<real,CS>::St(zion2m,zion2+m,0);
z1t = zion3m*pi2_inv+10;
zion3m = pi2*(z1t - ((int)z1t));
TunedStore<real,CS>::St(zion3m,zion3+m,0);
if(irk==2) {
#if SQRT_PRECOMPUTED
if((zion1m > a1)||(zion1m < a0)) {
#else
if((zion1m > psimax)||(zion1m < psimin)) {
#endif
TunedStore<real,CS>::St(zion01m,zion1+m,0);
TunedStore<real,CS>::St(pi2-zion02m,zion2+m,0);
TunedStore<real,CS>::St(zion03m,zion3+m,0);
TunedStore<real,CS>::St(zion04m,zion4+m,0);
TunedStore<real,CS>::St(zion05m,zion5+m,0);
TunedStore<real,CS>::St(pi2-zion02m,zion02+m,0);
} /*else {
TunedStore<real,CS>::St(zion1m,zion01+m,0);
TunedStore<real,CS>::St(zion2m,zion02+m,0);
TunedStore<real,CS>::St(zion3m,zion03+m,0);
TunedStore<real,CS>::St(zion4m,zion04+m,0);
TunedStore<real,CS>::St(zion5m,zion05+m,0);
}*/
}
#else
#if SQRT_PRECOMPUTE
zion1m = max(1.0e-8*psimax,0.5*zion01m*zion01m + dtime*pdot);
zion1m = sqrt(2.0*zion1m);
#else
zion1m = max(1.0e-8*psimax,zion01m+dtime*pdot);
#endif
zion2m = zion02m+dtime*tdot;
zion3m = zion03m+dtime*zdot;
zion4[m] = zion04m+dtime*rdot;
zion5[m] = zion05m+dtime*wdot;
// theta and zeta normalize to [0,2*pi), modulo is slower than hand coded
// procedure on Seaborg. However, modulo works better and is preferable.
real z1t = zion2m *pi2_inv+10;
zion2[m]=pi2*(z1t-((int)z1t));
z1t = zion3m*pi2_inv+10;
zion3[m]=pi2*(z1t - ((int)z1t));
if(irk==2) {
#if SQRT_PRECOMPUTED
if(zion1[m] > a1) {
#else
if(zion1[m] > psimax) {
#endif
zion1[m]=zion01m;
zion2[m]=pi2-zion02m;
zion3[m]=zion03m;
zion4[m]=zion04m;
zion5[m]=zion05m;
#if SQRT_PRECOMPUTED
else if(zion1[m] < a0) {
#else
} else if (zion1[m] < psimin) {
#endif
zion1[m]=zion01m;
zion2[m]=pi2-zion02m;
zion3[m]=zion03m;
zion4[m]=zion04m;
zion5[m]=zion05m;
}
/*
zion01[m] = zion1[m];
zion02[m] = zion2[m];
zion03[m] = zion3[m];
zion04[m] = zion4[m];
zion05[m] = zion5[m];
*/
}
#endif
if (idiag==0){
ip = d_abs_min_int(mflux-1, (int)((r-a0)*d_inv));
// ii = d_abs_min_int(mpsi, (int)((r-a0)*delr+0.5));
real vdrenergy = vdr*rinv*(energy-1.5*aion*vthi*vthi*rtemi[ii])*zion05m;
// rmarker_s[ip*nthreads+tid] += zion06[m];
// eflux_s[ip*nthreads+tid] += vdrenergy;
flux_s[ip*nthreads+tid] += vdrenergy; // eflux
flux_s[mflux*nthreads+ip*nthreads+tid] += zion06[m]; //rmarker
flux_s[2*mflux*nthreads + ip*nthreads+tid] += vdr*rinv*r; // dmark
flux_s[3*mflux*nthreads + ip*nthreads+tid] += 1.0; //dden
scalar_data_s[0*nthreads+tid] += vdrenergy; // efluxi
scalar_data_s[1*nthreads+tid] += vdr*rinv*zion05m; // pfluxi
scalar_data_s[2*nthreads+tid] += b*zion04m*zion05m; // dflowi
scalar_data_s[3*nthreads+tid] += zion05m*zion05m; // entropyi
scalar_data_s[4*nthreads+tid] += energy*zion05m; // particles_energy[0]
scalar_data_s[5*nthreads+tid] += energy; // particles_energy[1]
scalar_data_s[6*nthreads+tid] += zion05m;
}
} // end m=gid
__syncthreads();
if (idiag==0){
int nTotalThreads = nthreads;
while (nTotalThreads>1){
int half = (nTotalThreads >> 1);
if (tid < half){
for (int i=0; i<7; i++){
scalar_data_s[i*nthreads+tid] += scalar_data_s[i*nthreads+tid+half];
}
for (int i=0; i<mflux; i++)
{
//eflux_s[i*nthreads+tid] += eflux_s[i*nthreads+tid+half];
//rmarker_s[i*nthreads+tid] += rmarker_s[i*nthreads+tid+half];
flux_s[i*nthreads+tid] += flux_s[i*nthreads+tid+half];
flux_s[mflux*nthreads+i*nthreads+tid] += flux_s[mflux*nthreads+i*nthreads+tid+half];
flux_s[2*mflux*nthreads+i*nthreads+tid] += flux_s[2*mflux*nthreads+i*nthreads+tid+half];
flux_s[3*mflux*nthreads+i*nthreads+tid] += flux_s[3*mflux*nthreads+i*nthreads+tid+half];
}
}
__syncthreads();
nTotalThreads = (nTotalThreads >> 1);
}
if (tid==0){
atomicDPupdate(scalar_data, scalar_data_s[0]);
atomicDPupdate(scalar_data+2, scalar_data_s[nthreads]);
atomicDPupdate(scalar_data+6, scalar_data_s[2*nthreads]);
atomicDPupdate(scalar_data+8, scalar_data_s[3*nthreads]);
atomicDPupdate(scalar_data+12, scalar_data_s[4*nthreads]);
atomicDPupdate(scalar_data+13, scalar_data_s[5*nthreads]);
atomicDPupdate(scalar_data+15, scalar_data_s[6*nthreads]);
}
/*
if (tid<5)
//atomicDPupdate(eflux+tid,eflux_s[tid*nthreads]);
atomicDPupdate(eflux+tid, flux_s[tid*nthreads]);
if (tid>=5&&tid<10)
//atomicDPupdate(rmarker+tid-5,rmarker_s[(tid-5)*nthreads]);
atomicDPupdate(rmarker+tid-5, flux_s[mflux*nthreads+(tid-5)*nthreads]);
if (tid>=10&&tid<15)
atomicDPupdate(dmark+tid-10, flux_s[2*mflux*nthreads+(tid-10)*nthreads]);
if (tid>=15&&tid<20)
atomicDPupdate(dden+tid-15, flux_s[3*mflux*nthreads+(tid-15)*nthreads]);
*/
if (tid<5)
//atomicDPupdate(eflux+tid,eflux_s[tid*nthreads]);
atomicDPupdate(flux_data+tid, flux_s[tid*nthreads]);
if (tid>=5&&tid<10)
//atomicDPupdate(rmarker+tid-5,rmarker_s[(tid-5)*nthreads]);
atomicDPupdate(flux_data+tid, flux_s[mflux*nthreads+(tid-5)*nthreads]);
if (tid>=10&&tid<15)
atomicDPupdate(flux_data+tid, flux_s[2*mflux*nthreads+(tid-10)*nthreads]);
if (tid>=15&&tid<20)
atomicDPupdate(flux_data+tid, flux_s[3*mflux*nthreads+(tid-15)*nthreads]);
}
}
__global__ static void gpu_push_point_interpolation(gtc_field_data_t* grid, gtc_aux_particle_point_t* point, int nloc_over_cluster, int mcellperthread)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int nthreadsx = blockDim.x;
const int nthreadsy = blockDim.y;
const int gidx=tidx+blockIdx.x*nthreadsx;
my_int* __restrict__ point_index_count;
real* __restrict__ point_vect;
int mpsi = params.mpsi;
const real pi2_inv = params.pi2_inv;
const int mzeta = params.mzeta;
real delz, zetamin, zetatmp;
int mpsi_max;
real wzt, rdum, tflr, tdumtmp, tdum;
real wtion0tmp, wtion1tmp;
int jtion0tmp, jtion1tmp, j00, j01;
real wz1, wz0;
int ij1,ij2,ij3,ij4;
int im, ii,idx1,idx2,idx3,idx4;
real wp0, wp1, wt00, wt01, wt10, wt11;
int kk;
real e1, e2, e3;
int igrid_in;
zetamin = params.zetamin;
delz = params.delz;
igrid_in = radial_decomp.igrid_in;
point_index_count = point->point_index_count;
point_vect = point->point_vect;
#if !USE_TEXTURE
const real * __restrict__ evector = grid->evector;
#endif
mpsi_max = mpsi - 1;
int maxcount = 0;
if (gidx<nloc_over_cluster)
maxcount = (int)point_index_count[gidx];
extern __shared__ int shared_buffer_sc[];
int *offset0 = shared_buffer_sc;
int *offset1 = &shared_buffer_sc[nthreadsx];
real * evector_s0 = (real *)&offset1[nthreadsx];
real * evector_s1 = &evector_s0[3*(mzeta+1)*(nthreadsx*mcellperthread+1)];
// find the starting index for the array in shared memory
if (tidy==0){
offset0[tidx] = 1000000000;
offset1[tidx] = 1000000000;
if (gidx<nloc_over_cluster){
if (maxcount>0){
rdum = point_vect[4*gidx];
tflr = point_vect[4*gidx+1];
zetatmp = point_vect[4*gidx+2];
ii = d_abs_min_int(mpsi_max, (int) rdum);
im = ii;
tdumtmp = pi2_inv * (tflr - zetatmp * qtinv[im]) + 10.0;
tdum = (tdumtmp - (int) tdumtmp) * delt[im];
j00 = d_abs_min_int(mtheta[im]-1, (int) tdum);
jtion0tmp = igrid[im] + j00;
#ifdef _DEBUG_GPU
if ((jtion0tmp-igrid_in)/mcellperthread!=gidx){
printf("jtion0tmp=%d mcellperthread=%d gidx=%d\n", jtion0tmp, mcellperthread, gidx);
CudaAssert(jtion0tmp/mcellperthread==gidx);
}
#endif
im = ii + 1;
tdumtmp = pi2_inv * (tflr - zetatmp * qtinv[im]) + 10.0;
tdum = (tdumtmp - (int) tdumtmp) * delt[im];
j01 = d_abs_min_int(mtheta[im]-1, (int) tdum);
jtion1tmp = igrid[im] + j01;
offset0[tidx] = 3*(mzeta+1)*gidx*mcellperthread;
if (gidx==(igrid[ii+1]-2-igrid_in)/mcellperthread||gidx==(igrid[ii+1]-3-igrid_in)/mcellperthread){
offset1[tidx] = 1000000000;
}
else{
offset1[tidx] = 3*((mzeta+1)*(jtion1tmp-igrid_in) - 16);
offset1[tidx] -= 3*(mzeta+1)*(mcellperthread-1);
}
}
}
}
__syncthreads();
int nTotalThreads = nthreadsx;
while (nTotalThreads>1){
int half = (nTotalThreads >> 1);
if (tidy==0){
if (tidx < half){
int temp0 = offset0[tidx+half];
if (temp0<offset0[tidx]) offset0[tidx]=temp0;
int temp1 = offset1[tidx+half];
if (temp1<offset1[tidx]) offset1[tidx]=temp1;
}
}
__syncthreads();
nTotalThreads = (nTotalThreads >> 1);
}
if (tidy==0){
offset0[tidx] = offset0[0];
offset1[tidx] = offset1[0];
}
__syncthreads();
// copy E field from global or texture to shared memory
for (int ij=tidx; ij<nthreadsx*mcellperthread+1; ij+=nthreadsx){
if (offset1[tidx]!=1000000000){
int ij_off0 = 3*(mzeta+1)*ij+offset0[tidx];
for (int ii=tidy; ii<3*(mzeta+1); ii+= nthreadsy){
evector_s0[ij*3*(mzeta+1)+ii] = EVECTOR(ij_off0+ii);
}
}
else{
for (int ii=tidy; ii<3*(mzeta+1); ii+= nthreadsy){
evector_s0[ij*3*(mzeta+1)+ii] = 0.0;
}
}
}
for (int ij=tidx; ij<1.5*nthreadsx*mcellperthread; ij+=nthreadsx){
if (offset1[tidx]!=1000000000){
int ij_off1 = 3*(mzeta+1)*ij+offset1[tidx];
for (int ii=tidy; ii<3*(mzeta+1); ii+= nthreadsy){
evector_s1[ij*3*(mzeta+1)+ii] = EVECTOR(ij_off1+ii);
}
}
else{
for (int ii=tidy; ii<3*(mzeta+1); ii+= nthreadsy){
evector_s1[ij*3*(mzeta+1)+ii] = 0.0;
}
}
}
__syncthreads();
// field interpolation from shared memory
for (int m=tidy*nloc_over_cluster+gidx, iter=0; iter<maxcount; iter+=blockDim.y, m+=nloc_over_cluster*blockDim.y){
if (iter+tidy<maxcount){
e1 = 0.0;
e2 = 0.0;
e3 = 0.0;
rdum = point_vect[4*m];
tflr = point_vect[4*m+1];
zetatmp = point_vect[4*m+2];
wzt = (zetatmp-zetamin)*delz;
kk = d_abs_min_int(mzeta-1, (int) wzt);
wz1 = wzt - (real) kk;
wz0 = 1.0 - wz1;
ii = d_abs_min_int(mpsi_max, (int) rdum);
wp1 = rdum - (real) ii;
wp0 = 1.0 - wp1;
im = ii;
tdumtmp = pi2_inv * (tflr - zetatmp * qtinv[im]) + 10.0;
tdum = (tdumtmp - (int) tdumtmp) * delt[im];
j00 = d_abs_min_int(mtheta[im]-1, (int) tdum);
jtion0tmp = igrid[im] + j00;
wtion0tmp = tdum - (real) j00;
im = ii + 1;
tdumtmp = pi2_inv * (tflr - zetatmp * qtinv[im]) + 10.0;
tdum = (tdumtmp - (int) tdumtmp) * delt[im];
j01 = d_abs_min_int(mtheta[im]-1, (int) tdum);
jtion1tmp = igrid[im] + j01;
wtion1tmp = tdum - (real) j01;
#ifdef _DEBUG_GPU
if ((jtion0tmp-igrid_in)/mcellperthread!=gidx){
printf("jtion0tmp=%d mcellperthread=%d gidx=%d\n", jtion0tmp, mcellperthread, gidx);
CudaAssert(jtion0tmp/mcellperthread==gidx);
}
#endif
wt10 = wtion0tmp;
wt00 = 1.0 - wt10;
wt11 = wtion1tmp;
wt01 = 1.0 - wt11;
ij1 = jtion0tmp - igrid_in;
ij3 = jtion1tmp - igrid_in;
ij2 = ij1 + 1;
ij4 = ij3 + 1;
#if ASSUME_MZETA_EQUALS1
idx1 = 6*ij1;
// idx2 = 6*ij2;
idx3 = 6*ij3;
// idx4 = 6*ij4;
idx1 = idx1 - offset0[tidx];
#ifdef _DEBUG_GPU
if (idx1<0||idx1>=3*(mzeta+1)*nthreadsx*mcellperthread)
printf("jtion0tmp=%d gidx=%d idx1=%d offset0=%d\n", jtion0tmp, gidx, idx1, offset0[tidx \
]);
CudaAssert(idx1>=0);
CudaAssert(idx1<3*(mzeta+1)*nthreadsx*mcellperthread);
#endif
e1 = e1+wp0*wt00*(wz0*evector_s0[idx1+0]+wz1*evector_s0[idx1+3]);
e2 = e2+wp0*wt00*(wz0*evector_s0[idx1+1]+wz1*evector_s0[idx1+4]);
e3 = e3+wp0*wt00*(wz0*evector_s0[idx1+2]+wz1*evector_s0[idx1+5]);
e1 = e1+wp0*wt10*(wz0*evector_s0[idx1+6+0]+wz1*evector_s0[idx1+6+3]);
e2 = e2+wp0*wt10*(wz0*evector_s0[idx1+6+1]+wz1*evector_s0[idx1+6+4]);
e3 = e3+wp0*wt10*(wz0*evector_s0[idx1+6+2]+wz1*evector_s0[idx1+6+5]);
idx3 = idx3 - offset1[tidx];
if (idx3<0||idx3>=3*(mzeta+1)*(1.5*nthreadsx*mcellperthread-1)){
idx3 = idx3 + offset1[tidx];
e1 = e1+wp1*wt01*(wz0*EVECTOR(idx3+0)+wz1*EVECTOR(idx3+3));
e2 = e2+wp1*wt01*(wz0*EVECTOR(idx3+1)+wz1*EVECTOR(idx3+4));
e3 = e3+wp1*wt01*(wz0*EVECTOR(idx3+2)+wz1*EVECTOR(idx3+5));
e1 = e1+wp1*wt11*(wz0*EVECTOR(idx3+6+0)+wz1*EVECTOR(idx3+6+3));
e2 = e2+wp1*wt11*(wz0*EVECTOR(idx3+6+1)+wz1*EVECTOR(idx3+6+4));
e3 = e3+wp1*wt11*(wz0*EVECTOR(idx3+6+2)+wz1*EVECTOR(idx3+6+5));
}
else {
e1 = e1+wp1*wt01*(wz0*evector_s1[idx3+0]+wz1*evector_s1[idx3+3]);
e2 = e2+wp1*wt01*(wz0*evector_s1[idx3+1]+wz1*evector_s1[idx3+4]);
e3 = e3+wp1*wt01*(wz0*evector_s1[idx3+2]+wz1*evector_s1[idx3+5]);
e1 = e1+wp1*wt11*(wz0*evector_s1[idx3+6+0]+wz1*evector_s1[idx3+6+3]);
e2 = e2+wp1*wt11*(wz0*evector_s1[idx3+6+1]+wz1*evector_s1[idx3+6+4]);
e3 = e3+wp1*wt11*(wz0*evector_s1[idx3+6+2]+wz1*evector_s1[idx3+6+5]);
}
/*
// debug
e1 =e1+wp0*wt00*(wz0*EVECTOR(idx1+0)+wz1*EVECTOR(idx1+3));
e2 =e2+wp0*wt00*(wz0*EVECTOR(idx1+1)+wz1*EVECTOR(idx1+4));
e3 =e3+wp0*wt00*(wz0*EVECTOR(idx1+2)+wz1*EVECTOR(idx1+5));
e1 =e1+wp0*wt10*(wz0*EVECTOR(idx1+6+0)+wz1*EVECTOR(idx1+6+3));
e2 =e2+wp0*wt10*(wz0*EVECTOR(idx1+6+1)+wz1*EVECTOR(idx1+6+4));
e3 =e3+wp0*wt10*(wz0*EVECTOR(idx1+6+2)+wz1*EVECTOR(idx1+6+5));
e1 =e1+wp1*wt01*(wz0*EVECTOR(idx3+0)+wz1*EVECTOR(idx3+3));
e2 =e2+wp1*wt01*(wz0*EVECTOR(idx3+1)+wz1*EVECTOR(idx3+4));
e3 =e3+wp1*wt01*(wz0*EVECTOR(idx3+2)+wz1*EVECTOR(idx3+5));
e1 =e1+wp1*wt11*(wz0*EVECTOR(idx3+6+0)+wz1*EVECTOR(idx3+6+3));
e2 =e2+wp1*wt11*(wz0*EVECTOR(idx3+6+1)+wz1*EVECTOR(idx3+6+4));
e3 =e3+wp1*wt11*(wz0*EVECTOR(idx3+6+2)+wz1*EVECTOR(idx3+6+5));
*/
#else
idx1 = 3*(mzeta+1)*ij1+3*kk;
idx2 = 3*(mzeta+1)*ij2+3*kk;
idx3 = 3*(mzeta+1)*ij3+3*kk;
idx4 = 3*(mzeta+1)*ij4+3*kk;
idx1 = idx1 - offset0[tidx];
idx2 = idx2 - offset0[tidx];
idx3 = idx3 - offset1[tidx];
idx4 = idx4 - offset1[tidx];
e1 = e1+wp0*wt00*(wz0*evector_s0[idx1+0]+wz1*evector_s0[idx1+3]);
e2 = e2+wp0*wt00*(wz0*evector_s0[idx1+1]+wz1*evector_s0[idx1+4]);
e3 = e3+wp0*wt00*(wz0*evector_s0[idx1+2]+wz1*evector_s0[idx1+5]);
e1 = e1+wp0*wt10*(wz0*evector_s0[idx2+0]+wz1*evector_s0[idx2+3]);
e2 = e2+wp0*wt10*(wz0*evector_s0[idx2+1]+wz1*evector_s0[idx2+4]);
e3 = e3+wp0*wt10*(wz0*evector_s0[idx2+2]+wz1*evector_s0[idx2+5]);
if (idx3<0||idx3>=3*(mzeta+1)*(1.5*nthreadsx*mcellperthread-1)){
e1 = e1+wp1*wt01*(wz0*evector_s1[idx3+0]+wz1*evector_s1[idx3+3]);
e2 = e2+wp1*wt01*(wz0*evector_s1[idx3+1]+wz1*evector_s1[idx3+4]);
e3 = e3+wp1*wt01*(wz0*evector_s1[idx3+2]+wz1*evector_s1[idx3+5]);
e1 = e1+wp1*wt11*(wz0*evector_s1[idx4+0]+wz1*evector_s1[idx4+3]);
e2 = e2+wp1*wt11*(wz0*evector_s1[idx4+1]+wz1*evector_s1[idx4+4]);
e3 = e3+wp1*wt11*(wz0*evector_s1[idx4+2]+wz1*evector_s1[idx4+5]);
}
else{
idx3 = idx3 + offset1[tidx];
idx4 = idx4 + offset1[tidx];
e1 = e1+wp1*wt01*(wz0*EVECTOR(idx3+0)+wz1*EVECTOR(idx3+3));
e2 = e2+wp1*wt01*(wz0*EVECTOR(idx3+1)+wz1*EVECTOR(idx3+4));
e3 = e3+wp1*wt01*(wz0*EVECTOR(idx3+2)+wz1*EVECTOR(idx3+5));
e1 = e1+wp1*wt11*(wz0*EVECTOR(idx4+0)+wz1*EVECTOR(idx4+3));
e2 = e2+wp1*wt11*(wz0*EVECTOR(idx4+1)+wz1*EVECTOR(idx4+4));
e3 = e3+wp1*wt11*(wz0*EVECTOR(idx4+2)+wz1*EVECTOR(idx4+5));
}
#endif
point_vect[4*m] = e1;
point_vect[4*m+1] = e2;
point_vect[4*m+2] = e3;
}
}
}
extern "C"
void call_gpu_push_4p_kernel(gtc_bench_data_t *gtc_input, gpu_kernel_args_t* gpu_kernel_input, int idiag){
gtc_global_params_t *h_params = &(gtc_input->global_params);
gtc_field_data_t *h_grid = &(gtc_input->field_data);
gtc_field_data_t *d_grid = &(gpu_kernel_input->d_grid);
gtc_diagnosis_data_t *h_diagnosis = &(gtc_input->diagnosis_data);
gtc_diagnosis_data_t *d_diagnosis = &(gpu_kernel_input->d_diagnosis);
gtc_radial_decomp_t *h_radial_decomp = &(gtc_input->radial_decomp);
int mzeta = h_params->mzeta; //int mgrid = h_params->mgrid; int mpsi = h_params->mpsi;
int mi = h_params->mi; int mype = gtc_input->parallel_decomp.mype;
int nloc_over = h_radial_decomp->nloc_over;
// int mgrid_cluster = (mgrid-h_grid->mtheta[mpsi]+CLUSTER_SIZE-1)/CLUSTER_SIZE;
int nloc_over_cluster = (nloc_over+CLUSTER_SIZE-1)/CLUSTER_SIZE;
/************** copy E field to GPU *****************/
gpu_timer_start(gpu_kernel_input);
//modified by cxx 2017/5 8 10:50
// CUDA_SAFE_CALL(hipMemcpy((void *)d_grid->evector, h_grid->evector, 3*(mzeta+1)*nloc_over*sizeof(real) , hipMemcpyHostToDevice));
memcpy(d_grid->evector, h_grid->evector, 3*(mzeta+1)*nloc_over*sizeof(real));
//modified by cxx 2017/5/8
// CUDA_SAFE_CALL(hipMemcpy((void *)d_grid->pfluxpsi, h_grid->pfluxpsi, MFLUX*sizeof(real), hipMemcpyHostToDevice));
memcpy(d_grid->pfluxpsi, h_grid->pfluxpsi, MFLUX*sizeof(real));
/**************** copy and reset diagnosis data****************/
if (idiag==0){
// notice: host-device memory copy of 64KB or less is NOT synchronous
//modified by cxx 2017/5/8
// CUDA_SAFE_CALL(hipMemcpy((void*)(d_diagnosis->scalar_data), (h_diagnosis->scalar_data), 16*sizeof(real),hipMemcpyHostToDevice));
memcpy((d_diagnosis->scalar_data), (h_diagnosis->scalar_data), 16*sizeof(real));
// CUDA_SAFE_CALL(hipMemset(d_diagnosis->eflux, 0, 4*MFLUX*sizeof(real)));
CUDA_SAFE_CALL(hipMemset(d_diagnosis->flux_data, 0, 4*MFLUX*sizeof(real)));
}
gpu_kernel_input->gpu_timing.memtransfer_push_time += gpu_timer_measure(gpu_kernel_input);
/************** interpolate grid-based E to point-based E************/
int mcellperthread=CLUSTER_SIZE;
int nthreadsx = 32; int nthreadsy=8;
dim3 nthread_3d(nthreadsx, nthreadsy);
int nblocks = (nloc_over_cluster + nthreadsx - 1)/nthreadsx;
int shared_buffer = 2*nthreadsx*sizeof(int) + int(3*(mzeta+1)*mcellperthread*1.5*nthreadsx)*sizeof(real)+3*(mzeta+1)*(mcellperthread*nthreadsx+1)*sizeof(real);
hipLaunchKernelGGL(( gpu_push_point_interpolation), dim3(nblocks), dim3(nthread_3d), shared_buffer, 0, gpu_kernel_input->ptr_d_grid, gpu_kernel_input->ptr_d_aux_point, nloc_over_cluster, mcellperthread);
hipDeviceSynchronize();
gpu_kernel_input->gpu_timing.interpolation_push_point_time += gpu_timer_measure(gpu_kernel_input);
/*************** interpolate point-based E to gyroparticle ***************/
int mi_per_thread = gpu_kernel_input->charge_mi_per_thread;
int nthreads = gpu_kernel_input->nthreads/2;
int mp = gpu_kernel_input->deviceProp.multiProcessorCount;
int m = (mi + nthreads*mp - 1)/ (nthreads*mp);
m = (m + mi_per_thread - 1)/mi_per_thread;
nblocks = mp*m;
mi_per_thread = (mi + nblocks*nthreads - 1)/ mi_per_thread;
shared_buffer = 5*sizeof(real) + (7*nthreads+4*MFLUX*nthreads)*sizeof(real);
hipLaunchKernelGGL(( gpu_push_gyro_interpolation), dim3(nblocks), dim3(nthreads), shared_buffer, 0, gpu_kernel_input->ptr_d_zion, gpu_kernel_input->ptr_d_aux_point, gpu_kernel_input->ptr_d_grid, gpu_kernel_input->ptr_d_diagnosis, gpu_kernel_input->irk, gpu_kernel_input->istep, gpu_kernel_input->idiag);
gpu_kernel_input->gpu_timing.interpolation_push_gyro_time += gpu_timer_measure(gpu_kernel_input);
/********************* copy diagnosis data back to host********************/
if (idiag==0){
// notice: host-device memory copy of 64KB or less is NOT synchronous
//modified by cxx 2017/5/8
// CUDA_SAFE_CALL(hipMemcpy((void*)(h_diagnosis->scalar_data), (d_diagnosis->scalar_data), 16*sizeof(real),hipMemcpyDeviceToHost));
memcpy((h_diagnosis->scalar_data), (d_diagnosis->scalar_data), 16*sizeof(real));
/*
CUDA_SAFE_CALL(hipMemcpy((void*)(h_diagnosis->eflux), (d_diagnosis->eflux), MFLUX*sizeof(real),hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy((void*)(h_diagnosis->rmarker), (d_diagnosis->rmarker), MFLUX*sizeof(real),hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy((void*)(h_diagnosis->dmark), (d_diagnosis->dmark), MFLUX*sizeof(real),hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy((void*)(h_diagnosis->dden), (d_diagnosis->dden), MFLUX*sizeof(real),hipMemcpyDeviceToHost));
*/
//modified by cxx 2017/5/8
// CUDA_SAFE_CALL(hipMemcpy((void*)(h_diagnosis->flux_data), (d_diagnosis->flux_data), 4*MFLUX*sizeof(real),hipMemcpyDeviceToHost));
memcpy((h_diagnosis->flux_data), (d_diagnosis->flux_data), 4*MFLUX*sizeof(real));
gpu_kernel_input->gpu_timing.memtransfer_push_time += gpu_timer_measure(gpu_kernel_input);
}
gpu_kernel_input->gpu_timing.device_push_time += gpu_timer_measure_end(gpu_kernel_input);
}
| 1f669a58714e456ca1ebfdd4d0263e2a9e6df6d3.cu | #define GPU_KERNEL
#ifdef MULTIPLE_FILE
#include <bench_gtc.h>
#include <cutil.h>
//#include <papi.h>
extern __device__ __constant__ real temp[MAX_MPSI] __align__ (16);extern __device__ __constant__ real dtemp[MAX_MPSI] __align__ (16);
extern __device__ __constant__ real rtemi[MAX_MPSI] __align__ (16);
//extern __device__ __constant__ real pfluxpsi[MFLUX] __align__ (16);
extern __device__ gtc_global_params_t params __align__ (16);
extern __device__ __constant__ real qtinv[MAX_MPSI] __align__ (16);
extern __device__ __constant__ real delt[MAX_MPSI] __align__ (16);
extern __device__ __constant__ int igrid[MAX_MPSI] __align__ (16);
extern __device__ __constant__ int mtheta[MAX_MPSI] __align__ (16);
extern __device__ gtc_radial_decomp_t radial_decomp __align__ (16);
//extern __device__ __constant__ int igrid_in __align__ (16);
//extern __device__ __constant__ int ipsi_in __align__ (16);
//extern __device__ __constant__ int ipsi_out __align__ (16);
#if SINGLE_PRECISION
extern texture<float, 1, cudaReadModeElementType> evectorTexRef;
#else
extern texture<int2, 1, cudaReadModeElementType> evectorTexRef;
#endif
#endif
#if OPTIMIZE_ACCESS
#if USE_TEXTURE
static __inline__ __device__ real fetch_evector(int i)
{
int2 e = tex1Dfetch(evectorTexRef,i);
return __hiloint2double(e.y,e.x);
}
#define EVECTOR(i) (fetch_evector(i))
#else
#define EVECTOR(i) (evector[i])
#endif //USE_TEXTURE
#else
#define EVECTOR(i) (evector[i])
#endif // OPTIMIZE_ACCESS
__global__ static void
__launch_bounds__(THREAD_BLOCK/*maxThreadsPerBlock*/, 1/*minBlocksPerMultiprocessor*/)
gpu_push_gyro_interpolation(gtc_particle_data_t *zion, gtc_aux_particle_point_t *point, gtc_field_data_t *grid, gtc_diagnosis_data_t *diagnosis, int irk, int istep, int idiag)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int nblocks = gridDim.x;
const int nthreads = blockDim.x;
const int gid = tid + bid*nthreads;
const int np = nblocks * nthreads;
const int mflux = MFLUX;
extern __shared__ real shared_buffer_gyro[];
real *vdrtmp = shared_buffer_gyro;
// data for diagnosis
real *scalar_data_s = &vdrtmp[5];
// real *eflux_s = &scalar_data_s[6*nthreads];
// real *rmarker_s = &eflux_s[mflux*nthreads];
real *flux_s = &scalar_data_s[7*nthreads];
if (idiag==0){
for (int i=tid; i<7*nthreads; i+=nthreads){
scalar_data_s[i] = 0.0;
}
for (int i=tid; i<4*mflux*nthreads; i+=nthreads){
// eflux_s[i] = 0.0;
// rmarker_s[i] = 0.0;
flux_s[i] = 0.0;
}
__syncthreads();
}
int mi = params.mi; int mimax=params.mimax;
int mpsi = params.mpsi;
const real a = params.a;
const real a0 = params.a0;
const real a1 = params.a1;
const real delr = params.delr;
const real pi2 = 2.0*params.pi;
const real pi2_inv = params.pi2_inv;
const int nbound = params.nbound;
const real gyroradius = params.gyroradius;
const real qion = params.qion;
const real aion = params.aion;
const real tstep =params.tstep;
const real nonlinear = params.nonlinear;
const real paranl = params.paranl;
const real* __restrict__ point_vect = point->point_vect;
const int* __restrict__ point_index = point->point_index;
real* __restrict__ scalar_data = diagnosis->scalar_data;
real* __restrict__ flux_data = diagnosis->flux_data;
/*
real* __restrict__ eflux = diagnosis->eflux;
real* __restrict__ rmarker = diagnosis->rmarker;
real* __restrict__ dmark = diagnosis->dmark;
real* __restrict__ dden = diagnosis->dden;
*/
#if !ASSUME_MZETA_EQUALS1
const int mzeta = params.mzeta;
#endif
#if !USE_TEXTURE
const real * __restrict__ evector = grid->evector;
#endif
const real * __restrict__ pfluxpsi = grid->pfluxpsi;
real * __restrict__ zion1 = zion->z0;
real * __restrict__ zion2 = zion->z1;
real * __restrict__ zion3 = zion->z2;
real * __restrict__ zion4 = zion->z3;
real * __restrict__ zion5 = zion->z4;
real * __restrict__ zion6 = zion->z5;
real * __restrict__ zion01 = zion->z00;
real * __restrict__ zion02 = zion->z01;
real * __restrict__ zion03 = zion->z02;
real * __restrict__ zion04 = zion->z03;
real * __restrict__ zion05 = zion->z04;
real * __restrict__ zion06 = zion->z05;
real zion1m, zion2m, zion3m, zion4m, zion5m, zion6m;
real wpi1, wpi2, wpi3;
real dtime;
real sbound=1.0;
if (nbound==0) sbound = 0.0;
real psimax=0.5*a1*a1;
real psimin=0.5*a0*a0;
real cmratio=qion/aion;
real cinv=1.0/qion;
real vthi=gyroradius*fabs(qion)/aion;
real d_inv=real(mflux)/(a1-a0);
real zion01m, zion02m, zion03m, zion04m, zion05m;
for (int m=gid; m<mi; m+=np){
if(irk==1) {
dtime=0.5*tstep;
if(tid<mflux)
vdrtmp[tid] = 0.0;
//if(istep ==1) {
#if PTX_STREAM_INTRINSICS
TunedLoad<real,CS>::Ld(zion1m,zion1+m,0);
TunedLoad<real,CS>::Ld(zion2m,zion2+m,0);
TunedLoad<real,CS>::Ld(zion3m,zion3+m,0);
TunedLoad<real,CS>::Ld(zion4m,zion4+m,0);
TunedLoad<real,CS>::Ld(zion5m,zion5+m,0);
TunedStore<real,CS>::St(zion1m,zion01+m,0);
TunedStore<real,CS>::St(zion2m,zion02+m,0);
TunedStore<real,CS>::St(zion3m,zion03+m,0);
TunedStore<real,CS>::St(zion4m,zion04+m,0);
TunedStore<real,CS>::St(zion5m,zion05+m,0);
#else
zion01[m] = zion1[m];
zion02[m] = zion2[m];
zion03[m] = zion3[m];
zion04[m] = zion4[m];
zion05[m] = zion5[m];
#endif
//}
} else {
dtime=tstep;
if(nonlinear<0.5) {
printf("Error! decoupling modes for "
"nonlinear = 0.0 not implemented\n");
if(tid<mflux)
vdrtmp[tid] = 0.0;
} else {
if(tid<mflux)
vdrtmp[tid] = pfluxpsi[tid];
}
}
__syncthreads();
#if PTX_STREAM_INTRINSICS
//if((istep != 1)||(irk!=1)) {
if (irk!=1){
TunedLoad<real,CS>::Ld(zion1m,zion1+m,0);
TunedLoad<real,CS>::Ld(zion2m,zion2+m,0);
TunedLoad<real,CS>::Ld(zion3m,zion3+m,0);
TunedLoad<real,CS>::Ld(zion4m,zion4+m,0);
TunedLoad<real,CS>::Ld(zion5m,zion5+m,0);
}
TunedLoad<real,CS>::Ld(zion6m,zion6+m,0);
#else
zion1m = zion1[m];
zion2m = zion2[m];
zion3m = zion3[m];
zion4m = zion4[m];
zion5m = zion5[m];
zion6m = zion6[m];
#endif
wpi1 = wpi2 = wpi3 = 0.0;
int index;
for (int larmor=0; larmor<4; larmor++){
index = point_index[larmor*mi+m];
#ifdef _DEBUG_GPU
if (index>=4*EXTRA_BUFFER*mimax){
printf("index>EXTRA_BUFFER in push\n");
printf("index=%d mimax=%d 4*EXTRA_BUFFER*mimax=%d\n", index, mimax, 4*EXTRA_BUFFER*mimax);
CudaAssert(index<(4*EXTRA_BUFFER*mimax));
}
#endif
// if (index!=-1){
wpi1 += point_vect[4*index];
wpi2 += point_vect[4*index+1];
wpi3 += point_vect[4*index+2];
// }
}
wpi1 = 0.25*wpi1; wpi2 = 0.25*wpi2; wpi3 = 0.25*wpi3;
if(irk ==1){
zion01m = zion1m;
zion02m = zion2m;
zion03m = zion3m;
zion04m = zion4m;
zion05m = zion5m;
} else {
#if PTX_STREAM_INTRINSICS
TunedLoad<real,CS>::Ld(zion01m,zion01+m,0);
TunedLoad<real,CS>::Ld(zion02m,zion02+m,0);
TunedLoad<real,CS>::Ld(zion03m,zion03+m,0);
TunedLoad<real,CS>::Ld(zion04m,zion04+m,0);
TunedLoad<real,CS>::Ld(zion05m,zion05+m,0);
#else
zion01m = zion01[m];
zion02m = zion02[m];
zion03m = zion03[m];
zion04m = zion04[m];
zion05m = zion05[m];
#endif
}
// primary ion marker temperature and parallel flow velocity
real ainv=1.0/a;
/* update GC position */
//#if !ONTHEFLY_PUSHAUX
#if SQRT_PRECOMPUTED
real r = zion1m;
#else
real r=sqrt(2.0*zion1m);
#endif
//#endif
real rinv=1.0/r;
const real q0 = params.q0;
const real q1 = params.q1;
const real q2 = params.q2;
const real rw = params.rw;
const real rc = params.rc;
int ii=d_abs_min_int(mpsi-1,int((r-a0)*delr));
int ip=d_abs_min_int(mflux-1,1+int((r-a0)*d_inv));
real wp0=real(ii+1)-(r-a0)*delr;
real wp1=1.0-wp0;
real tem=wp0*temp[ii]+wp1*temp[ii+1];
real q=q0+q1*r*ainv+q2*r*r*ainv*ainv;
real qinv=1.0/q;
real cost=cos(zion2m);
real sint=sin(zion2m);
real b=1.0/(1.0+r*cost);
real g=1.0;
real gp=0.0;
real ri=0.0;
real rip=0.0;
real dbdp=-1.0*b*b*cost*rinv;
real dbdt=b*b*r*sint;
real dedb=cinv*(zion4m*zion4m*qion*b*cmratio+zion6m*zion6m);
real deni=1.0/(g*q + ri + zion4m*(g*rip-ri*gp));
real upara=zion4m*b*cmratio;
real energy=0.5*aion*upara*upara+zion6m*zion6m*b;
real rfac=rw*(r-rc);
#if PROFILE_SHAPE==0
rfac=rfac*rfac;
rfac=rfac*rfac*rfac;
rfac=exp(-1*rfac);
#elif PROFILE_SHAPE==1
rfac=tanh(rfac)*tanh(rfac);
rfac=1.0-rfac;
#endif
real kappa=1.0-sbound+sbound*rfac;
const real kappati = params.kappati;
const real kappan = params.kappan;
kappa=((energy*tem-1.5)*kappati+kappan)*kappa*rinv;
// perturbed quantities
real dptdp=wpi1;
real dptdt=wpi2;
real dptdz=wpi3-wpi2*qinv;
real epara=-1.0*wpi3*b*q*deni;
// subtract net particle flow
dptdt=dptdt+vdrtmp[ip];
// ExB drift in radial direction for w-dot and flux diagnostics
real vdr=q*(ri*dptdz-g*dptdt)*deni;
real wdrive=vdr*kappa;
real wpara=epara*(upara-dtemp[ii])*qion*tem;
// Common subexpression elimination
real wdrift=q*(g*dbdt*dptdp-g*dbdp*dptdt+ri*dbdp*dptdz)*deni*dedb*qion*tem;
real wdot=(zion06[m]-paranl*zion5m)*(wdrive+wpara+wdrift);
// self-consistent and external electric field for marker orbits
const real flow0 = params.flow0;
const real flow1 = params.flow1;
const real flow2 = params.flow2;
dptdp=dptdp*nonlinear+gyroradius*(flow0+flow1*r*ainv+flow2*r*r*ainv*ainv);
dptdt=dptdt*nonlinear;
dptdz=dptdz*nonlinear;
// particle velocity
real pdot = q*(-g*dedb*dbdt - g*dptdt + ri*dptdz)*deni;
real tdot = (upara*b*(1.0-q*gp*zion4m) + q*g*(dedb*dbdp + dptdp))*deni;
real zdot = (upara*b*q*(1.0+rip*zion4m) - q*ri*(dedb*dbdp + dptdp))*deni;
real rdot = ((gp*zion4m-1.0)*(dedb*dbdt + paranl*dptdt)-paranl*q*(1.0+rip*zion4m)*dptdz)*deni;
// update particle position
#if PTX_STREAM_INTRINSICS
#if SQRT_PRECOMPUTED
zion1m = max(1.0e-8*psimax,0.5*zion01m*zion01m + dtime*pdot);
zion1m = sqrt(2.0*zion1m);
#else
zion1m = max(1.0e-8*psimax,zion01m+dtime*pdot);
#endif
TunedStore<real,CS>::St(zion1m,zion1+m,0);
zion2m = zion02m+dtime*tdot;
zion3m = zion03m+dtime*zdot;
zion4m = zion04m + dtime*rdot;
TunedStore<real,CS>::St(zion4m,zion4+m,0);
zion5m = zion05m + dtime*wdot;
TunedStore<real,CS>::St(zion5m,zion5+m,0);
real z1t = zion2m *pi2_inv+10;
zion2m = pi2*(z1t-((int)z1t));
TunedStore<real,CS>::St(zion2m,zion2+m,0);
z1t = zion3m*pi2_inv+10;
zion3m = pi2*(z1t - ((int)z1t));
TunedStore<real,CS>::St(zion3m,zion3+m,0);
if(irk==2) {
#if SQRT_PRECOMPUTED
if((zion1m > a1)||(zion1m < a0)) {
#else
if((zion1m > psimax)||(zion1m < psimin)) {
#endif
TunedStore<real,CS>::St(zion01m,zion1+m,0);
TunedStore<real,CS>::St(pi2-zion02m,zion2+m,0);
TunedStore<real,CS>::St(zion03m,zion3+m,0);
TunedStore<real,CS>::St(zion04m,zion4+m,0);
TunedStore<real,CS>::St(zion05m,zion5+m,0);
TunedStore<real,CS>::St(pi2-zion02m,zion02+m,0);
} /*else {
TunedStore<real,CS>::St(zion1m,zion01+m,0);
TunedStore<real,CS>::St(zion2m,zion02+m,0);
TunedStore<real,CS>::St(zion3m,zion03+m,0);
TunedStore<real,CS>::St(zion4m,zion04+m,0);
TunedStore<real,CS>::St(zion5m,zion05+m,0);
}*/
}
#else
#if SQRT_PRECOMPUTE
zion1m = max(1.0e-8*psimax,0.5*zion01m*zion01m + dtime*pdot);
zion1m = sqrt(2.0*zion1m);
#else
zion1m = max(1.0e-8*psimax,zion01m+dtime*pdot);
#endif
zion2m = zion02m+dtime*tdot;
zion3m = zion03m+dtime*zdot;
zion4[m] = zion04m+dtime*rdot;
zion5[m] = zion05m+dtime*wdot;
// theta and zeta normalize to [0,2*pi), modulo is slower than hand coded
// procedure on Seaborg. However, modulo works better and is preferable.
real z1t = zion2m *pi2_inv+10;
zion2[m]=pi2*(z1t-((int)z1t));
z1t = zion3m*pi2_inv+10;
zion3[m]=pi2*(z1t - ((int)z1t));
if(irk==2) {
#if SQRT_PRECOMPUTED
if(zion1[m] > a1) {
#else
if(zion1[m] > psimax) {
#endif
zion1[m]=zion01m;
zion2[m]=pi2-zion02m;
zion3[m]=zion03m;
zion4[m]=zion04m;
zion5[m]=zion05m;
#if SQRT_PRECOMPUTED
else if(zion1[m] < a0) {
#else
} else if (zion1[m] < psimin) {
#endif
zion1[m]=zion01m;
zion2[m]=pi2-zion02m;
zion3[m]=zion03m;
zion4[m]=zion04m;
zion5[m]=zion05m;
}
/*
zion01[m] = zion1[m];
zion02[m] = zion2[m];
zion03[m] = zion3[m];
zion04[m] = zion4[m];
zion05[m] = zion5[m];
*/
}
#endif
if (idiag==0){
ip = d_abs_min_int(mflux-1, (int)((r-a0)*d_inv));
// ii = d_abs_min_int(mpsi, (int)((r-a0)*delr+0.5));
real vdrenergy = vdr*rinv*(energy-1.5*aion*vthi*vthi*rtemi[ii])*zion05m;
// rmarker_s[ip*nthreads+tid] += zion06[m];
// eflux_s[ip*nthreads+tid] += vdrenergy;
flux_s[ip*nthreads+tid] += vdrenergy; // eflux
flux_s[mflux*nthreads+ip*nthreads+tid] += zion06[m]; //rmarker
flux_s[2*mflux*nthreads + ip*nthreads+tid] += vdr*rinv*r; // dmark
flux_s[3*mflux*nthreads + ip*nthreads+tid] += 1.0; //dden
scalar_data_s[0*nthreads+tid] += vdrenergy; // efluxi
scalar_data_s[1*nthreads+tid] += vdr*rinv*zion05m; // pfluxi
scalar_data_s[2*nthreads+tid] += b*zion04m*zion05m; // dflowi
scalar_data_s[3*nthreads+tid] += zion05m*zion05m; // entropyi
scalar_data_s[4*nthreads+tid] += energy*zion05m; // particles_energy[0]
scalar_data_s[5*nthreads+tid] += energy; // particles_energy[1]
scalar_data_s[6*nthreads+tid] += zion05m;
}
} // end m=gid
__syncthreads();
if (idiag==0){
int nTotalThreads = nthreads;
while (nTotalThreads>1){
int half = (nTotalThreads >> 1);
if (tid < half){
for (int i=0; i<7; i++){
scalar_data_s[i*nthreads+tid] += scalar_data_s[i*nthreads+tid+half];
}
for (int i=0; i<mflux; i++)
{
//eflux_s[i*nthreads+tid] += eflux_s[i*nthreads+tid+half];
//rmarker_s[i*nthreads+tid] += rmarker_s[i*nthreads+tid+half];
flux_s[i*nthreads+tid] += flux_s[i*nthreads+tid+half];
flux_s[mflux*nthreads+i*nthreads+tid] += flux_s[mflux*nthreads+i*nthreads+tid+half];
flux_s[2*mflux*nthreads+i*nthreads+tid] += flux_s[2*mflux*nthreads+i*nthreads+tid+half];
flux_s[3*mflux*nthreads+i*nthreads+tid] += flux_s[3*mflux*nthreads+i*nthreads+tid+half];
}
}
__syncthreads();
nTotalThreads = (nTotalThreads >> 1);
}
if (tid==0){
atomicDPupdate(scalar_data, scalar_data_s[0]);
atomicDPupdate(scalar_data+2, scalar_data_s[nthreads]);
atomicDPupdate(scalar_data+6, scalar_data_s[2*nthreads]);
atomicDPupdate(scalar_data+8, scalar_data_s[3*nthreads]);
atomicDPupdate(scalar_data+12, scalar_data_s[4*nthreads]);
atomicDPupdate(scalar_data+13, scalar_data_s[5*nthreads]);
atomicDPupdate(scalar_data+15, scalar_data_s[6*nthreads]);
}
/*
if (tid<5)
//atomicDPupdate(eflux+tid,eflux_s[tid*nthreads]);
atomicDPupdate(eflux+tid, flux_s[tid*nthreads]);
if (tid>=5&&tid<10)
//atomicDPupdate(rmarker+tid-5,rmarker_s[(tid-5)*nthreads]);
atomicDPupdate(rmarker+tid-5, flux_s[mflux*nthreads+(tid-5)*nthreads]);
if (tid>=10&&tid<15)
atomicDPupdate(dmark+tid-10, flux_s[2*mflux*nthreads+(tid-10)*nthreads]);
if (tid>=15&&tid<20)
atomicDPupdate(dden+tid-15, flux_s[3*mflux*nthreads+(tid-15)*nthreads]);
*/
if (tid<5)
//atomicDPupdate(eflux+tid,eflux_s[tid*nthreads]);
atomicDPupdate(flux_data+tid, flux_s[tid*nthreads]);
if (tid>=5&&tid<10)
//atomicDPupdate(rmarker+tid-5,rmarker_s[(tid-5)*nthreads]);
atomicDPupdate(flux_data+tid, flux_s[mflux*nthreads+(tid-5)*nthreads]);
if (tid>=10&&tid<15)
atomicDPupdate(flux_data+tid, flux_s[2*mflux*nthreads+(tid-10)*nthreads]);
if (tid>=15&&tid<20)
atomicDPupdate(flux_data+tid, flux_s[3*mflux*nthreads+(tid-15)*nthreads]);
}
}
__global__ static void gpu_push_point_interpolation(gtc_field_data_t* grid, gtc_aux_particle_point_t* point, int nloc_over_cluster, int mcellperthread)
{
const int tidx = threadIdx.x;
const int tidy = threadIdx.y;
const int nthreadsx = blockDim.x;
const int nthreadsy = blockDim.y;
const int gidx=tidx+blockIdx.x*nthreadsx;
my_int* __restrict__ point_index_count;
real* __restrict__ point_vect;
int mpsi = params.mpsi;
const real pi2_inv = params.pi2_inv;
const int mzeta = params.mzeta;
real delz, zetamin, zetatmp;
int mpsi_max;
real wzt, rdum, tflr, tdumtmp, tdum;
real wtion0tmp, wtion1tmp;
int jtion0tmp, jtion1tmp, j00, j01;
real wz1, wz0;
int ij1,ij2,ij3,ij4;
int im, ii,idx1,idx2,idx3,idx4;
real wp0, wp1, wt00, wt01, wt10, wt11;
int kk;
real e1, e2, e3;
int igrid_in;
zetamin = params.zetamin;
delz = params.delz;
igrid_in = radial_decomp.igrid_in;
point_index_count = point->point_index_count;
point_vect = point->point_vect;
#if !USE_TEXTURE
const real * __restrict__ evector = grid->evector;
#endif
mpsi_max = mpsi - 1;
int maxcount = 0;
if (gidx<nloc_over_cluster)
maxcount = (int)point_index_count[gidx];
extern __shared__ int shared_buffer_sc[];
int *offset0 = shared_buffer_sc;
int *offset1 = &shared_buffer_sc[nthreadsx];
real * evector_s0 = (real *)&offset1[nthreadsx];
real * evector_s1 = &evector_s0[3*(mzeta+1)*(nthreadsx*mcellperthread+1)];
// find the starting index for the array in shared memory
if (tidy==0){
offset0[tidx] = 1000000000;
offset1[tidx] = 1000000000;
if (gidx<nloc_over_cluster){
if (maxcount>0){
rdum = point_vect[4*gidx];
tflr = point_vect[4*gidx+1];
zetatmp = point_vect[4*gidx+2];
ii = d_abs_min_int(mpsi_max, (int) rdum);
im = ii;
tdumtmp = pi2_inv * (tflr - zetatmp * qtinv[im]) + 10.0;
tdum = (tdumtmp - (int) tdumtmp) * delt[im];
j00 = d_abs_min_int(mtheta[im]-1, (int) tdum);
jtion0tmp = igrid[im] + j00;
#ifdef _DEBUG_GPU
if ((jtion0tmp-igrid_in)/mcellperthread!=gidx){
printf("jtion0tmp=%d mcellperthread=%d gidx=%d\n", jtion0tmp, mcellperthread, gidx);
CudaAssert(jtion0tmp/mcellperthread==gidx);
}
#endif
im = ii + 1;
tdumtmp = pi2_inv * (tflr - zetatmp * qtinv[im]) + 10.0;
tdum = (tdumtmp - (int) tdumtmp) * delt[im];
j01 = d_abs_min_int(mtheta[im]-1, (int) tdum);
jtion1tmp = igrid[im] + j01;
offset0[tidx] = 3*(mzeta+1)*gidx*mcellperthread;
if (gidx==(igrid[ii+1]-2-igrid_in)/mcellperthread||gidx==(igrid[ii+1]-3-igrid_in)/mcellperthread){
offset1[tidx] = 1000000000;
}
else{
offset1[tidx] = 3*((mzeta+1)*(jtion1tmp-igrid_in) - 16);
offset1[tidx] -= 3*(mzeta+1)*(mcellperthread-1);
}
}
}
}
__syncthreads();
int nTotalThreads = nthreadsx;
while (nTotalThreads>1){
int half = (nTotalThreads >> 1);
if (tidy==0){
if (tidx < half){
int temp0 = offset0[tidx+half];
if (temp0<offset0[tidx]) offset0[tidx]=temp0;
int temp1 = offset1[tidx+half];
if (temp1<offset1[tidx]) offset1[tidx]=temp1;
}
}
__syncthreads();
nTotalThreads = (nTotalThreads >> 1);
}
if (tidy==0){
offset0[tidx] = offset0[0];
offset1[tidx] = offset1[0];
}
__syncthreads();
// copy E field from global or texture to shared memory
for (int ij=tidx; ij<nthreadsx*mcellperthread+1; ij+=nthreadsx){
if (offset1[tidx]!=1000000000){
int ij_off0 = 3*(mzeta+1)*ij+offset0[tidx];
for (int ii=tidy; ii<3*(mzeta+1); ii+= nthreadsy){
evector_s0[ij*3*(mzeta+1)+ii] = EVECTOR(ij_off0+ii);
}
}
else{
for (int ii=tidy; ii<3*(mzeta+1); ii+= nthreadsy){
evector_s0[ij*3*(mzeta+1)+ii] = 0.0;
}
}
}
for (int ij=tidx; ij<1.5*nthreadsx*mcellperthread; ij+=nthreadsx){
if (offset1[tidx]!=1000000000){
int ij_off1 = 3*(mzeta+1)*ij+offset1[tidx];
for (int ii=tidy; ii<3*(mzeta+1); ii+= nthreadsy){
evector_s1[ij*3*(mzeta+1)+ii] = EVECTOR(ij_off1+ii);
}
}
else{
for (int ii=tidy; ii<3*(mzeta+1); ii+= nthreadsy){
evector_s1[ij*3*(mzeta+1)+ii] = 0.0;
}
}
}
__syncthreads();
// field interpolation from shared memory
for (int m=tidy*nloc_over_cluster+gidx, iter=0; iter<maxcount; iter+=blockDim.y, m+=nloc_over_cluster*blockDim.y){
if (iter+tidy<maxcount){
e1 = 0.0;
e2 = 0.0;
e3 = 0.0;
rdum = point_vect[4*m];
tflr = point_vect[4*m+1];
zetatmp = point_vect[4*m+2];
wzt = (zetatmp-zetamin)*delz;
kk = d_abs_min_int(mzeta-1, (int) wzt);
wz1 = wzt - (real) kk;
wz0 = 1.0 - wz1;
ii = d_abs_min_int(mpsi_max, (int) rdum);
wp1 = rdum - (real) ii;
wp0 = 1.0 - wp1;
im = ii;
tdumtmp = pi2_inv * (tflr - zetatmp * qtinv[im]) + 10.0;
tdum = (tdumtmp - (int) tdumtmp) * delt[im];
j00 = d_abs_min_int(mtheta[im]-1, (int) tdum);
jtion0tmp = igrid[im] + j00;
wtion0tmp = tdum - (real) j00;
im = ii + 1;
tdumtmp = pi2_inv * (tflr - zetatmp * qtinv[im]) + 10.0;
tdum = (tdumtmp - (int) tdumtmp) * delt[im];
j01 = d_abs_min_int(mtheta[im]-1, (int) tdum);
jtion1tmp = igrid[im] + j01;
wtion1tmp = tdum - (real) j01;
#ifdef _DEBUG_GPU
if ((jtion0tmp-igrid_in)/mcellperthread!=gidx){
printf("jtion0tmp=%d mcellperthread=%d gidx=%d\n", jtion0tmp, mcellperthread, gidx);
CudaAssert(jtion0tmp/mcellperthread==gidx);
}
#endif
wt10 = wtion0tmp;
wt00 = 1.0 - wt10;
wt11 = wtion1tmp;
wt01 = 1.0 - wt11;
ij1 = jtion0tmp - igrid_in;
ij3 = jtion1tmp - igrid_in;
ij2 = ij1 + 1;
ij4 = ij3 + 1;
#if ASSUME_MZETA_EQUALS1
idx1 = 6*ij1;
// idx2 = 6*ij2;
idx3 = 6*ij3;
// idx4 = 6*ij4;
idx1 = idx1 - offset0[tidx];
#ifdef _DEBUG_GPU
if (idx1<0||idx1>=3*(mzeta+1)*nthreadsx*mcellperthread)
printf("jtion0tmp=%d gidx=%d idx1=%d offset0=%d\n", jtion0tmp, gidx, idx1, offset0[tidx \
]);
CudaAssert(idx1>=0);
CudaAssert(idx1<3*(mzeta+1)*nthreadsx*mcellperthread);
#endif
e1 = e1+wp0*wt00*(wz0*evector_s0[idx1+0]+wz1*evector_s0[idx1+3]);
e2 = e2+wp0*wt00*(wz0*evector_s0[idx1+1]+wz1*evector_s0[idx1+4]);
e3 = e3+wp0*wt00*(wz0*evector_s0[idx1+2]+wz1*evector_s0[idx1+5]);
e1 = e1+wp0*wt10*(wz0*evector_s0[idx1+6+0]+wz1*evector_s0[idx1+6+3]);
e2 = e2+wp0*wt10*(wz0*evector_s0[idx1+6+1]+wz1*evector_s0[idx1+6+4]);
e3 = e3+wp0*wt10*(wz0*evector_s0[idx1+6+2]+wz1*evector_s0[idx1+6+5]);
idx3 = idx3 - offset1[tidx];
if (idx3<0||idx3>=3*(mzeta+1)*(1.5*nthreadsx*mcellperthread-1)){
idx3 = idx3 + offset1[tidx];
e1 = e1+wp1*wt01*(wz0*EVECTOR(idx3+0)+wz1*EVECTOR(idx3+3));
e2 = e2+wp1*wt01*(wz0*EVECTOR(idx3+1)+wz1*EVECTOR(idx3+4));
e3 = e3+wp1*wt01*(wz0*EVECTOR(idx3+2)+wz1*EVECTOR(idx3+5));
e1 = e1+wp1*wt11*(wz0*EVECTOR(idx3+6+0)+wz1*EVECTOR(idx3+6+3));
e2 = e2+wp1*wt11*(wz0*EVECTOR(idx3+6+1)+wz1*EVECTOR(idx3+6+4));
e3 = e3+wp1*wt11*(wz0*EVECTOR(idx3+6+2)+wz1*EVECTOR(idx3+6+5));
}
else {
e1 = e1+wp1*wt01*(wz0*evector_s1[idx3+0]+wz1*evector_s1[idx3+3]);
e2 = e2+wp1*wt01*(wz0*evector_s1[idx3+1]+wz1*evector_s1[idx3+4]);
e3 = e3+wp1*wt01*(wz0*evector_s1[idx3+2]+wz1*evector_s1[idx3+5]);
e1 = e1+wp1*wt11*(wz0*evector_s1[idx3+6+0]+wz1*evector_s1[idx3+6+3]);
e2 = e2+wp1*wt11*(wz0*evector_s1[idx3+6+1]+wz1*evector_s1[idx3+6+4]);
e3 = e3+wp1*wt11*(wz0*evector_s1[idx3+6+2]+wz1*evector_s1[idx3+6+5]);
}
/*
// debug
e1 =e1+wp0*wt00*(wz0*EVECTOR(idx1+0)+wz1*EVECTOR(idx1+3));
e2 =e2+wp0*wt00*(wz0*EVECTOR(idx1+1)+wz1*EVECTOR(idx1+4));
e3 =e3+wp0*wt00*(wz0*EVECTOR(idx1+2)+wz1*EVECTOR(idx1+5));
e1 =e1+wp0*wt10*(wz0*EVECTOR(idx1+6+0)+wz1*EVECTOR(idx1+6+3));
e2 =e2+wp0*wt10*(wz0*EVECTOR(idx1+6+1)+wz1*EVECTOR(idx1+6+4));
e3 =e3+wp0*wt10*(wz0*EVECTOR(idx1+6+2)+wz1*EVECTOR(idx1+6+5));
e1 =e1+wp1*wt01*(wz0*EVECTOR(idx3+0)+wz1*EVECTOR(idx3+3));
e2 =e2+wp1*wt01*(wz0*EVECTOR(idx3+1)+wz1*EVECTOR(idx3+4));
e3 =e3+wp1*wt01*(wz0*EVECTOR(idx3+2)+wz1*EVECTOR(idx3+5));
e1 =e1+wp1*wt11*(wz0*EVECTOR(idx3+6+0)+wz1*EVECTOR(idx3+6+3));
e2 =e2+wp1*wt11*(wz0*EVECTOR(idx3+6+1)+wz1*EVECTOR(idx3+6+4));
e3 =e3+wp1*wt11*(wz0*EVECTOR(idx3+6+2)+wz1*EVECTOR(idx3+6+5));
*/
#else
idx1 = 3*(mzeta+1)*ij1+3*kk;
idx2 = 3*(mzeta+1)*ij2+3*kk;
idx3 = 3*(mzeta+1)*ij3+3*kk;
idx4 = 3*(mzeta+1)*ij4+3*kk;
idx1 = idx1 - offset0[tidx];
idx2 = idx2 - offset0[tidx];
idx3 = idx3 - offset1[tidx];
idx4 = idx4 - offset1[tidx];
e1 = e1+wp0*wt00*(wz0*evector_s0[idx1+0]+wz1*evector_s0[idx1+3]);
e2 = e2+wp0*wt00*(wz0*evector_s0[idx1+1]+wz1*evector_s0[idx1+4]);
e3 = e3+wp0*wt00*(wz0*evector_s0[idx1+2]+wz1*evector_s0[idx1+5]);
e1 = e1+wp0*wt10*(wz0*evector_s0[idx2+0]+wz1*evector_s0[idx2+3]);
e2 = e2+wp0*wt10*(wz0*evector_s0[idx2+1]+wz1*evector_s0[idx2+4]);
e3 = e3+wp0*wt10*(wz0*evector_s0[idx2+2]+wz1*evector_s0[idx2+5]);
if (idx3<0||idx3>=3*(mzeta+1)*(1.5*nthreadsx*mcellperthread-1)){
e1 = e1+wp1*wt01*(wz0*evector_s1[idx3+0]+wz1*evector_s1[idx3+3]);
e2 = e2+wp1*wt01*(wz0*evector_s1[idx3+1]+wz1*evector_s1[idx3+4]);
e3 = e3+wp1*wt01*(wz0*evector_s1[idx3+2]+wz1*evector_s1[idx3+5]);
e1 = e1+wp1*wt11*(wz0*evector_s1[idx4+0]+wz1*evector_s1[idx4+3]);
e2 = e2+wp1*wt11*(wz0*evector_s1[idx4+1]+wz1*evector_s1[idx4+4]);
e3 = e3+wp1*wt11*(wz0*evector_s1[idx4+2]+wz1*evector_s1[idx4+5]);
}
else{
idx3 = idx3 + offset1[tidx];
idx4 = idx4 + offset1[tidx];
e1 = e1+wp1*wt01*(wz0*EVECTOR(idx3+0)+wz1*EVECTOR(idx3+3));
e2 = e2+wp1*wt01*(wz0*EVECTOR(idx3+1)+wz1*EVECTOR(idx3+4));
e3 = e3+wp1*wt01*(wz0*EVECTOR(idx3+2)+wz1*EVECTOR(idx3+5));
e1 = e1+wp1*wt11*(wz0*EVECTOR(idx4+0)+wz1*EVECTOR(idx4+3));
e2 = e2+wp1*wt11*(wz0*EVECTOR(idx4+1)+wz1*EVECTOR(idx4+4));
e3 = e3+wp1*wt11*(wz0*EVECTOR(idx4+2)+wz1*EVECTOR(idx4+5));
}
#endif
point_vect[4*m] = e1;
point_vect[4*m+1] = e2;
point_vect[4*m+2] = e3;
}
}
}
extern "C"
void call_gpu_push_4p_kernel(gtc_bench_data_t *gtc_input, gpu_kernel_args_t* gpu_kernel_input, int idiag){
gtc_global_params_t *h_params = &(gtc_input->global_params);
gtc_field_data_t *h_grid = &(gtc_input->field_data);
gtc_field_data_t *d_grid = &(gpu_kernel_input->d_grid);
gtc_diagnosis_data_t *h_diagnosis = &(gtc_input->diagnosis_data);
gtc_diagnosis_data_t *d_diagnosis = &(gpu_kernel_input->d_diagnosis);
gtc_radial_decomp_t *h_radial_decomp = &(gtc_input->radial_decomp);
int mzeta = h_params->mzeta; //int mgrid = h_params->mgrid; int mpsi = h_params->mpsi;
int mi = h_params->mi; int mype = gtc_input->parallel_decomp.mype;
int nloc_over = h_radial_decomp->nloc_over;
// int mgrid_cluster = (mgrid-h_grid->mtheta[mpsi]+CLUSTER_SIZE-1)/CLUSTER_SIZE;
int nloc_over_cluster = (nloc_over+CLUSTER_SIZE-1)/CLUSTER_SIZE;
/************** copy E field to GPU *****************/
gpu_timer_start(gpu_kernel_input);
//modified by cxx 2017/5 8 10:50
// CUDA_SAFE_CALL(cudaMemcpy((void *)d_grid->evector, h_grid->evector, 3*(mzeta+1)*nloc_over*sizeof(real) , cudaMemcpyHostToDevice));
memcpy(d_grid->evector, h_grid->evector, 3*(mzeta+1)*nloc_over*sizeof(real));
//modified by cxx 2017/5/8
// CUDA_SAFE_CALL(cudaMemcpy((void *)d_grid->pfluxpsi, h_grid->pfluxpsi, MFLUX*sizeof(real), cudaMemcpyHostToDevice));
memcpy(d_grid->pfluxpsi, h_grid->pfluxpsi, MFLUX*sizeof(real));
/**************** copy and reset diagnosis data****************/
if (idiag==0){
// notice: host-device memory copy of 64KB or less is NOT synchronous
//modified by cxx 2017/5/8
// CUDA_SAFE_CALL(cudaMemcpy((void*)(d_diagnosis->scalar_data), (h_diagnosis->scalar_data), 16*sizeof(real),cudaMemcpyHostToDevice));
memcpy((d_diagnosis->scalar_data), (h_diagnosis->scalar_data), 16*sizeof(real));
// CUDA_SAFE_CALL(cudaMemset(d_diagnosis->eflux, 0, 4*MFLUX*sizeof(real)));
CUDA_SAFE_CALL(cudaMemset(d_diagnosis->flux_data, 0, 4*MFLUX*sizeof(real)));
}
gpu_kernel_input->gpu_timing.memtransfer_push_time += gpu_timer_measure(gpu_kernel_input);
/************** interpolate grid-based E to point-based E************/
int mcellperthread=CLUSTER_SIZE;
int nthreadsx = 32; int nthreadsy=8;
dim3 nthread_3d(nthreadsx, nthreadsy);
int nblocks = (nloc_over_cluster + nthreadsx - 1)/nthreadsx;
int shared_buffer = 2*nthreadsx*sizeof(int) + int(3*(mzeta+1)*mcellperthread*1.5*nthreadsx)*sizeof(real)+3*(mzeta+1)*(mcellperthread*nthreadsx+1)*sizeof(real);
gpu_push_point_interpolation<<<nblocks, nthread_3d, shared_buffer>>>(gpu_kernel_input->ptr_d_grid, gpu_kernel_input->ptr_d_aux_point, nloc_over_cluster, mcellperthread);
cudaDeviceSynchronize();
gpu_kernel_input->gpu_timing.interpolation_push_point_time += gpu_timer_measure(gpu_kernel_input);
/*************** interpolate point-based E to gyroparticle ***************/
int mi_per_thread = gpu_kernel_input->charge_mi_per_thread;
int nthreads = gpu_kernel_input->nthreads/2;
int mp = gpu_kernel_input->deviceProp.multiProcessorCount;
int m = (mi + nthreads*mp - 1)/ (nthreads*mp);
m = (m + mi_per_thread - 1)/mi_per_thread;
nblocks = mp*m;
mi_per_thread = (mi + nblocks*nthreads - 1)/ mi_per_thread;
shared_buffer = 5*sizeof(real) + (7*nthreads+4*MFLUX*nthreads)*sizeof(real);
gpu_push_gyro_interpolation<<<nblocks, nthreads, shared_buffer>>>(gpu_kernel_input->ptr_d_zion, gpu_kernel_input->ptr_d_aux_point, gpu_kernel_input->ptr_d_grid, gpu_kernel_input->ptr_d_diagnosis, gpu_kernel_input->irk, gpu_kernel_input->istep, gpu_kernel_input->idiag);
gpu_kernel_input->gpu_timing.interpolation_push_gyro_time += gpu_timer_measure(gpu_kernel_input);
/********************* copy diagnosis data back to host********************/
if (idiag==0){
// notice: host-device memory copy of 64KB or less is NOT synchronous
//modified by cxx 2017/5/8
// CUDA_SAFE_CALL(cudaMemcpy((void*)(h_diagnosis->scalar_data), (d_diagnosis->scalar_data), 16*sizeof(real),cudaMemcpyDeviceToHost));
memcpy((h_diagnosis->scalar_data), (d_diagnosis->scalar_data), 16*sizeof(real));
/*
CUDA_SAFE_CALL(cudaMemcpy((void*)(h_diagnosis->eflux), (d_diagnosis->eflux), MFLUX*sizeof(real),cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy((void*)(h_diagnosis->rmarker), (d_diagnosis->rmarker), MFLUX*sizeof(real),cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy((void*)(h_diagnosis->dmark), (d_diagnosis->dmark), MFLUX*sizeof(real),cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy((void*)(h_diagnosis->dden), (d_diagnosis->dden), MFLUX*sizeof(real),cudaMemcpyDeviceToHost));
*/
//modified by cxx 2017/5/8
// CUDA_SAFE_CALL(cudaMemcpy((void*)(h_diagnosis->flux_data), (d_diagnosis->flux_data), 4*MFLUX*sizeof(real),cudaMemcpyDeviceToHost));
memcpy((h_diagnosis->flux_data), (d_diagnosis->flux_data), 4*MFLUX*sizeof(real));
gpu_kernel_input->gpu_timing.memtransfer_push_time += gpu_timer_measure(gpu_kernel_input);
}
gpu_kernel_input->gpu_timing.device_push_time += gpu_timer_measure_end(gpu_kernel_input);
}
|
e662ac34152fdc07fb7dee9897d3e6edf229cd46.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef PROFILING
#include "RDTimer.h"
#endif
//====================================================================================================100
// UPDATE
//====================================================================================================100
// 2006.03 Rob Janiczek
// --creation of prototype version
// 2006.03 Drew Gilliam
// --rewriting of prototype version into current version
// --got rid of multiple function calls, all code in a
// single function (for speed)
// --code cleanup & commenting
// --code optimization efforts
// 2006.04 Drew Gilliam
// --added diffusion coefficent saturation on [0,1]
// 2009.12 Lukasz G. Szafaryn
// -- reading from image, command line inputs
// 2010.01 Lukasz G. Szafaryn
// --comments
//====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "define.c"
#include "extract_kernel.hip"
#include "prepare_kernel.cu"
#include "reduce_kernel.hip"
#include "srad_kernel.hip"
#include "srad2_kernel.cu"
#include "compress_kernel.cu"
#include "graphics.c"
#include "resize.c"
#include "timer.c"
#include "device.c" // (in library path specified to compiler) needed by for device functions
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
int main(int argc, char *argv []){
//================================================================================80
// VARIABLES
//================================================================================80
// time
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
long long time7;
long long time8;
long long time9;
long long time10;
long long time11;
long long time12;
#ifdef PROFILING
PerfSerializer *serializeTime = new SimplePerfSerializer(argv[6]);
RDTimer *myTimer = new RDTimerCPU();
#endif
time0 = get_time();
#ifdef PROFILING
myTimer->Reset("SETUP VARIABLES");
myTimer->Start();
#endif
// inputs image, input paramenters
fp* image_ori; // originalinput image
int image_ori_rows;
int image_ori_cols;
long image_ori_elem;
// inputs image, input paramenters
fp* image; // input image
int Nr,Nc; // IMAGE nbr of rows/cols/elements
long Ne;
// algorithm parameters
int niter; // nbr of iterations
fp lambda; // update step size
// size of IMAGE
int r1,r2,c1,c2; // row/col coordinates of uniform ROI
long NeROI; // ROI nbr of elements
// surrounding pixel indicies
int *iN,*iS,*jE,*jW;
// counters
int iter; // primary loop
long i,j; // image row/col
// memory sizes
int mem_size_i;
int mem_size_j;
int mem_size_single;
//================================================================================80
// GPU VARIABLES
//================================================================================80
// CUDA kernel execution parameters
dim3 threads;
int blocks_x;
dim3 blocks;
dim3 blocks2;
dim3 blocks3;
// memory sizes
int mem_size; // matrix memory size
// HOST
int no;
int mul;
fp total;
fp total2;
fp meanROI;
fp meanROI2;
fp varROI;
fp q0sqr;
// DEVICE
fp* d_sums; // partial sum
fp* d_sums2;
int* d_iN;
int* d_iS;
int* d_jE;
int* d_jW;
fp* d_dN;
fp* d_dS;
fp* d_dW;
fp* d_dE;
fp* d_I; // input IMAGE on DEVICE
fp* d_c;
time1 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("READ COMMAND LINE PARAMETERS");
myTimer->Start();
#endif
//================================================================================80
// GET INPUT PARAMETERS
//================================================================================80
if(argc != 7){
printf("ERROR: wrong number of arguments\n");
return 0;
}
else{
niter = atoi(argv[1]);
lambda = atof(argv[2]);
Nr = atoi(argv[3]); // it is 502 in the original image
Nc = atoi(argv[4]); // it is 458 in the original image
}
time2 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("READ IMAGE FROM FILE");
myTimer->Start();
#endif
//================================================================================80
// READ IMAGE (SIZE OF IMAGE HAS TO BE KNOWN)
//================================================================================80
// read image
image_ori_rows = 502;
image_ori_cols = 458;
image_ori_elem = image_ori_rows * image_ori_cols;
image_ori = (fp*)malloc(sizeof(fp) * image_ori_elem);
read_graphics( argv[5],
image_ori,
image_ori_rows,
image_ori_cols,
1);
time3 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("RESIZE IMAGE");
myTimer->Start();
#endif
//================================================================================80
// RESIZE IMAGE (ASSUMING COLUMN MAJOR STORAGE OF image_orig)
//================================================================================80
Ne = Nr*Nc;
image = (fp*)malloc(sizeof(fp) * Ne);
resize( image_ori,
image_ori_rows,
image_ori_cols,
image,
Nr,
Nc,
1);
time4 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("GPU DRIVER INIT + CPU/GPU SETUP + MEMORY ALLOCATION");
myTimer->Start();
#endif
//================================================================================80
// SETUP
//================================================================================80
r1 = 0; // top row index of ROI
r2 = Nr - 1; // bottom row index of ROI
c1 = 0; // left column index of ROI
c2 = Nc - 1; // right column index of ROI
// ROI image size
NeROI = (r2-r1+1)*(c2-c1+1); // number of elements in ROI, ROI size
// allocate variables for surrounding pixels
mem_size_i = sizeof(int) * Nr; //
iN = (int *)malloc(mem_size_i) ; // north surrounding element
iS = (int *)malloc(mem_size_i) ; // south surrounding element
mem_size_j = sizeof(int) * Nc; //
jW = (int *)malloc(mem_size_j) ; // west surrounding element
jE = (int *)malloc(mem_size_j) ; // east surrounding element
// N/S/W/E indices of surrounding pixels (every element of IMAGE)
for (i=0; i<Nr; i++) {
iN[i] = i-1; // holds index of IMAGE row above
iS[i] = i+1; // holds index of IMAGE row below
}
for (j=0; j<Nc; j++) {
jW[j] = j-1; // holds index of IMAGE column on the left
jE[j] = j+1; // holds index of IMAGE column on the right
}
// N/S/W/E boundary conditions, fix surrounding indices outside boundary of image
iN[0] = 0; // changes IMAGE top row index from -1 to 0
iS[Nr-1] = Nr-1; // changes IMAGE bottom row index from Nr to Nr-1
jW[0] = 0; // changes IMAGE leftmost column index from -1 to 0
jE[Nc-1] = Nc-1; // changes IMAGE rightmost column index from Nc to Nc-1
//================================================================================80
// GPU SETUP
//================================================================================80
// allocate memory for entire IMAGE on DEVICE
mem_size = sizeof(fp) * Ne; // get the size of float representation of input IMAGE
hipMalloc((void **)&d_I, mem_size); //
// allocate memory for coordinates on DEVICE
hipMalloc((void **)&d_iN, mem_size_i); //
hipMemcpy(d_iN, iN, mem_size_i, hipMemcpyHostToDevice); //
hipMalloc((void **)&d_iS, mem_size_i); //
hipMemcpy(d_iS, iS, mem_size_i, hipMemcpyHostToDevice); //
hipMalloc((void **)&d_jE, mem_size_j); //
hipMemcpy(d_jE, jE, mem_size_j, hipMemcpyHostToDevice); //
hipMalloc((void **)&d_jW, mem_size_j); //
hipMemcpy(d_jW, jW, mem_size_j, hipMemcpyHostToDevice); //
// allocate memory for partial sums on DEVICE
hipMalloc((void **)&d_sums, mem_size); //
hipMalloc((void **)&d_sums2, mem_size); //
// allocate memory for derivatives
hipMalloc((void **)&d_dN, mem_size); //
hipMalloc((void **)&d_dS, mem_size); //
hipMalloc((void **)&d_dW, mem_size); //
hipMalloc((void **)&d_dE, mem_size); //
// allocate memory for coefficient on DEVICE
hipMalloc((void **)&d_c, mem_size); //
checkCUDAError("setup");
//================================================================================80
// KERNEL EXECUTION PARAMETERS
//================================================================================80
// all kernels operating on entire matrix
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks_x = Ne/threads.x;
if (Ne % threads.x != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks.x = blocks_x; // define the number of blocks in the grid
blocks.y = 1;
time5 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("COPY DATA TO CPU->GPU");
myTimer->Start();
#endif
//================================================================================80
// COPY INPUT TO CPU
//================================================================================80
hipMemcpy(d_I, image, mem_size, hipMemcpyHostToDevice);
time6 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("COMPUTE: extract image");
myTimer->Start();
#endif
//================================================================================80
// SCALE IMAGE DOWN FROM 0-255 TO 0-1 AND EXTRACT
//================================================================================80
hipLaunchKernelGGL(extract, dim3(blocks), dim3(threads), 0, 0, Ne,
d_I);
checkCUDAError("extract");
time7 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("COMPUTE: prepare + reduce + memcpy + srad + srad2");
myTimer->Start();
#endif
//================================================================================80
// COMPUTATION
//================================================================================80
// printf("iterations: ");
// execute main loop
for (iter=0; iter<niter; iter++){ // do for the number of iterations input parameter
// printf("%d ", iter);
// fflush(NULL);
// execute square kernel
hipLaunchKernelGGL(prepare, dim3(blocks), dim3(threads), 0, 0, Ne,
d_I,
d_sums,
d_sums2);
checkCUDAError("prepare");
// performs subsequent reductions of sums
blocks2.x = blocks.x; // original number of blocks
blocks2.y = blocks.y;
no = Ne; // original number of sum elements
mul = 1; // original multiplier
while(blocks2.x != 0){
checkCUDAError("before reduce");
// run kernel
hipLaunchKernelGGL(reduce, dim3(blocks2), dim3(threads), 0, 0, Ne,
no,
mul,
d_sums,
d_sums2);
checkCUDAError("reduce");
// update execution parameters
no = blocks2.x; // get current number of elements
if(blocks2.x == 1){
blocks2.x = 0;
}
else{
mul = mul * NUMBER_THREADS; // update the increment
blocks_x = blocks2.x/threads.x; // number of blocks
if (blocks2.x % threads.x != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks2.x = blocks_x;
blocks2.y = 1;
}
checkCUDAError("after reduce");
}
checkCUDAError("before copy sum");
// copy total sums to device
mem_size_single = sizeof(fp) * 1;
hipMemcpy(&total, d_sums, mem_size_single, hipMemcpyDeviceToHost);
hipMemcpy(&total2, d_sums2, mem_size_single, hipMemcpyDeviceToHost);
checkCUDAError("copy sum");
// calculate statistics
meanROI = total / fp(NeROI); // gets mean (average) value of element in ROI
meanROI2 = meanROI * meanROI; //
varROI = (total2 / fp(NeROI)) - meanROI2; // gets variance of ROI
q0sqr = varROI / meanROI2; // gets standard deviation of ROI
// execute srad kernel
hipLaunchKernelGGL(srad, dim3(blocks), dim3(threads), 0, 0, lambda, // SRAD coefficient
Nr, // # of rows in input image
Nc, // # of columns in input image
Ne, // # of elements in input image
d_iN, // indices of North surrounding pixels
d_iS, // indices of South surrounding pixels
d_jE, // indices of East surrounding pixels
d_jW, // indices of West surrounding pixels
d_dN, // North derivative
d_dS, // South derivative
d_dW, // West derivative
d_dE, // East derivative
q0sqr, // standard deviation of ROI
d_c, // diffusion coefficient
d_I); // output image
checkCUDAError("srad");
// execute srad2 kernel
hipLaunchKernelGGL(srad2, dim3(blocks), dim3(threads), 0, 0, lambda, // SRAD coefficient
Nr, // # of rows in input image
Nc, // # of columns in input image
Ne, // # of elements in input image
d_iN, // indices of North surrounding pixels
d_iS, // indices of South surrounding pixels
d_jE, // indices of East surrounding pixels
d_jW, // indices of West surrounding pixels
d_dN, // North derivative
d_dS, // South derivative
d_dW, // West derivative
d_dE, // East derivative
d_c, // diffusion coefficient
d_I); // output image
checkCUDAError("srad2");
}
// printf("\n");
time8 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("COMPUTE: compress image");
myTimer->Start();
#endif
//================================================================================80
// SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS
//================================================================================80
hipLaunchKernelGGL(compress, dim3(blocks), dim3(threads), 0, 0, Ne,
d_I);
checkCUDAError("compress");
time9 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("COPY DATA TO GPU->CPU");
myTimer->Start();
#endif
//================================================================================80
// COPY RESULTS BACK TO CPU
//================================================================================80
hipMemcpy(image, d_I, mem_size, hipMemcpyDeviceToHost);
checkCUDAError("copy back");
time10 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("SAVE IMAGE INTO FILE");
myTimer->Start();
#endif
//================================================================================80
// WRITE IMAGE AFTER PROCESSING
//================================================================================80
write_graphics( "image_out.pgm",
image,
Nr,
Nc,
1,
255);
time11 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("FREE MEMORY");
myTimer->Start();
#endif
//================================================================================80
// DEALLOCATE
//================================================================================80
free(image_ori);
free(image);
free(iN);
free(iS);
free(jW);
free(jE);
hipFree(d_I);
hipFree(d_c);
hipFree(d_iN);
hipFree(d_iS);
hipFree(d_jE);
hipFree(d_jW);
hipFree(d_dN);
hipFree(d_dS);
hipFree(d_dE);
hipFree(d_dW);
hipFree(d_sums);
hipFree(d_sums2);
time12 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
delete myTimer;
delete serializeTime;
#endif
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%15.12f s, %15.12f % : SETUP VARIABLES\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : READ COMMAND LINE PARAMETERS\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : READ IMAGE FROM FILE\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : RESIZE IMAGE\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : GPU DRIVER INIT, CPU/GPU SETUP, MEMORY ALLOCATION\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COPY DATA TO CPU->GPU\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : EXTRACT IMAGE\n", (float) (time7-time6) / 1000000, (float) (time7-time6) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COMPUTE\n", (float) (time8-time7) / 1000000, (float) (time8-time7) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COMPRESS IMAGE\n", (float) (time9-time8) / 1000000, (float) (time9-time8) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COPY DATA TO GPU->CPU\n", (float) (time10-time9) / 1000000, (float) (time10-time9) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : SAVE IMAGE INTO FILE\n", (float) (time11-time10) / 1000000, (float) (time11-time10) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : FREE MEMORY\n", (float) (time12-time11) / 1000000, (float) (time12-time11) / (float) (time12-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time12-time0) / 1000000);
}
//====================================================================================================100
// END OF FILE
//====================================================================================================100
| e662ac34152fdc07fb7dee9897d3e6edf229cd46.cu | #include "hip/hip_runtime.h"
#ifdef PROFILING
#include "RDTimer.h"
#endif
//====================================================================================================100
// UPDATE
//====================================================================================================100
// 2006.03 Rob Janiczek
// --creation of prototype version
// 2006.03 Drew Gilliam
// --rewriting of prototype version into current version
// --got rid of multiple function calls, all code in a
// single function (for speed)
// --code cleanup & commenting
// --code optimization efforts
// 2006.04 Drew Gilliam
// --added diffusion coefficent saturation on [0,1]
// 2009.12 Lukasz G. Szafaryn
// -- reading from image, command line inputs
// 2010.01 Lukasz G. Szafaryn
// --comments
//====================================================================================================100
// DEFINE / INCLUDE
//====================================================================================================100
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <cuda.h>
#include "define.c"
#include "extract_kernel.cu"
#include "prepare_kernel.cu"
#include "reduce_kernel.cu"
#include "srad_kernel.cu"
#include "srad2_kernel.cu"
#include "compress_kernel.cu"
#include "graphics.c"
#include "resize.c"
#include "timer.c"
#include "device.c" // (in library path specified to compiler) needed by for device functions
//====================================================================================================100
// MAIN FUNCTION
//====================================================================================================100
int main(int argc, char *argv []){
//================================================================================80
// VARIABLES
//================================================================================80
// time
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
long long time7;
long long time8;
long long time9;
long long time10;
long long time11;
long long time12;
#ifdef PROFILING
PerfSerializer *serializeTime = new SimplePerfSerializer(argv[6]);
RDTimer *myTimer = new RDTimerCPU();
#endif
time0 = get_time();
#ifdef PROFILING
myTimer->Reset("SETUP VARIABLES");
myTimer->Start();
#endif
// inputs image, input paramenters
fp* image_ori; // originalinput image
int image_ori_rows;
int image_ori_cols;
long image_ori_elem;
// inputs image, input paramenters
fp* image; // input image
int Nr,Nc; // IMAGE nbr of rows/cols/elements
long Ne;
// algorithm parameters
int niter; // nbr of iterations
fp lambda; // update step size
// size of IMAGE
int r1,r2,c1,c2; // row/col coordinates of uniform ROI
long NeROI; // ROI nbr of elements
// surrounding pixel indicies
int *iN,*iS,*jE,*jW;
// counters
int iter; // primary loop
long i,j; // image row/col
// memory sizes
int mem_size_i;
int mem_size_j;
int mem_size_single;
//================================================================================80
// GPU VARIABLES
//================================================================================80
// CUDA kernel execution parameters
dim3 threads;
int blocks_x;
dim3 blocks;
dim3 blocks2;
dim3 blocks3;
// memory sizes
int mem_size; // matrix memory size
// HOST
int no;
int mul;
fp total;
fp total2;
fp meanROI;
fp meanROI2;
fp varROI;
fp q0sqr;
// DEVICE
fp* d_sums; // partial sum
fp* d_sums2;
int* d_iN;
int* d_iS;
int* d_jE;
int* d_jW;
fp* d_dN;
fp* d_dS;
fp* d_dW;
fp* d_dE;
fp* d_I; // input IMAGE on DEVICE
fp* d_c;
time1 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("READ COMMAND LINE PARAMETERS");
myTimer->Start();
#endif
//================================================================================80
// GET INPUT PARAMETERS
//================================================================================80
if(argc != 7){
printf("ERROR: wrong number of arguments\n");
return 0;
}
else{
niter = atoi(argv[1]);
lambda = atof(argv[2]);
Nr = atoi(argv[3]); // it is 502 in the original image
Nc = atoi(argv[4]); // it is 458 in the original image
}
time2 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("READ IMAGE FROM FILE");
myTimer->Start();
#endif
//================================================================================80
// READ IMAGE (SIZE OF IMAGE HAS TO BE KNOWN)
//================================================================================80
// read image
image_ori_rows = 502;
image_ori_cols = 458;
image_ori_elem = image_ori_rows * image_ori_cols;
image_ori = (fp*)malloc(sizeof(fp) * image_ori_elem);
read_graphics( argv[5],
image_ori,
image_ori_rows,
image_ori_cols,
1);
time3 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("RESIZE IMAGE");
myTimer->Start();
#endif
//================================================================================80
// RESIZE IMAGE (ASSUMING COLUMN MAJOR STORAGE OF image_orig)
//================================================================================80
Ne = Nr*Nc;
image = (fp*)malloc(sizeof(fp) * Ne);
resize( image_ori,
image_ori_rows,
image_ori_cols,
image,
Nr,
Nc,
1);
time4 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("GPU DRIVER INIT + CPU/GPU SETUP + MEMORY ALLOCATION");
myTimer->Start();
#endif
//================================================================================80
// SETUP
//================================================================================80
r1 = 0; // top row index of ROI
r2 = Nr - 1; // bottom row index of ROI
c1 = 0; // left column index of ROI
c2 = Nc - 1; // right column index of ROI
// ROI image size
NeROI = (r2-r1+1)*(c2-c1+1); // number of elements in ROI, ROI size
// allocate variables for surrounding pixels
mem_size_i = sizeof(int) * Nr; //
iN = (int *)malloc(mem_size_i) ; // north surrounding element
iS = (int *)malloc(mem_size_i) ; // south surrounding element
mem_size_j = sizeof(int) * Nc; //
jW = (int *)malloc(mem_size_j) ; // west surrounding element
jE = (int *)malloc(mem_size_j) ; // east surrounding element
// N/S/W/E indices of surrounding pixels (every element of IMAGE)
for (i=0; i<Nr; i++) {
iN[i] = i-1; // holds index of IMAGE row above
iS[i] = i+1; // holds index of IMAGE row below
}
for (j=0; j<Nc; j++) {
jW[j] = j-1; // holds index of IMAGE column on the left
jE[j] = j+1; // holds index of IMAGE column on the right
}
// N/S/W/E boundary conditions, fix surrounding indices outside boundary of image
iN[0] = 0; // changes IMAGE top row index from -1 to 0
iS[Nr-1] = Nr-1; // changes IMAGE bottom row index from Nr to Nr-1
jW[0] = 0; // changes IMAGE leftmost column index from -1 to 0
jE[Nc-1] = Nc-1; // changes IMAGE rightmost column index from Nc to Nc-1
//================================================================================80
// GPU SETUP
//================================================================================80
// allocate memory for entire IMAGE on DEVICE
mem_size = sizeof(fp) * Ne; // get the size of float representation of input IMAGE
hipMalloc((void **)&d_I, mem_size); //
// allocate memory for coordinates on DEVICE
hipMalloc((void **)&d_iN, mem_size_i); //
hipMemcpy(d_iN, iN, mem_size_i, hipMemcpyHostToDevice); //
hipMalloc((void **)&d_iS, mem_size_i); //
hipMemcpy(d_iS, iS, mem_size_i, hipMemcpyHostToDevice); //
hipMalloc((void **)&d_jE, mem_size_j); //
hipMemcpy(d_jE, jE, mem_size_j, hipMemcpyHostToDevice); //
hipMalloc((void **)&d_jW, mem_size_j); //
hipMemcpy(d_jW, jW, mem_size_j, hipMemcpyHostToDevice); //
// allocate memory for partial sums on DEVICE
hipMalloc((void **)&d_sums, mem_size); //
hipMalloc((void **)&d_sums2, mem_size); //
// allocate memory for derivatives
hipMalloc((void **)&d_dN, mem_size); //
hipMalloc((void **)&d_dS, mem_size); //
hipMalloc((void **)&d_dW, mem_size); //
hipMalloc((void **)&d_dE, mem_size); //
// allocate memory for coefficient on DEVICE
hipMalloc((void **)&d_c, mem_size); //
checkCUDAError("setup");
//================================================================================80
// KERNEL EXECUTION PARAMETERS
//================================================================================80
// all kernels operating on entire matrix
threads.x = NUMBER_THREADS; // define the number of threads in the block
threads.y = 1;
blocks_x = Ne/threads.x;
if (Ne % threads.x != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks.x = blocks_x; // define the number of blocks in the grid
blocks.y = 1;
time5 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("COPY DATA TO CPU->GPU");
myTimer->Start();
#endif
//================================================================================80
// COPY INPUT TO CPU
//================================================================================80
hipMemcpy(d_I, image, mem_size, hipMemcpyHostToDevice);
time6 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("COMPUTE: extract image");
myTimer->Start();
#endif
//================================================================================80
// SCALE IMAGE DOWN FROM 0-255 TO 0-1 AND EXTRACT
//================================================================================80
hipLaunchKernelGGL(extract, dim3(blocks), dim3(threads), 0, 0, Ne,
d_I);
checkCUDAError("extract");
time7 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("COMPUTE: prepare + reduce + memcpy + srad + srad2");
myTimer->Start();
#endif
//================================================================================80
// COMPUTATION
//================================================================================80
// printf("iterations: ");
// execute main loop
for (iter=0; iter<niter; iter++){ // do for the number of iterations input parameter
// printf("%d ", iter);
// fflush(NULL);
// execute square kernel
hipLaunchKernelGGL(prepare, dim3(blocks), dim3(threads), 0, 0, Ne,
d_I,
d_sums,
d_sums2);
checkCUDAError("prepare");
// performs subsequent reductions of sums
blocks2.x = blocks.x; // original number of blocks
blocks2.y = blocks.y;
no = Ne; // original number of sum elements
mul = 1; // original multiplier
while(blocks2.x != 0){
checkCUDAError("before reduce");
// run kernel
hipLaunchKernelGGL(reduce, dim3(blocks2), dim3(threads), 0, 0, Ne,
no,
mul,
d_sums,
d_sums2);
checkCUDAError("reduce");
// update execution parameters
no = blocks2.x; // get current number of elements
if(blocks2.x == 1){
blocks2.x = 0;
}
else{
mul = mul * NUMBER_THREADS; // update the increment
blocks_x = blocks2.x/threads.x; // number of blocks
if (blocks2.x % threads.x != 0){ // compensate for division remainder above by adding one grid
blocks_x = blocks_x + 1;
}
blocks2.x = blocks_x;
blocks2.y = 1;
}
checkCUDAError("after reduce");
}
checkCUDAError("before copy sum");
// copy total sums to device
mem_size_single = sizeof(fp) * 1;
hipMemcpy(&total, d_sums, mem_size_single, hipMemcpyDeviceToHost);
hipMemcpy(&total2, d_sums2, mem_size_single, hipMemcpyDeviceToHost);
checkCUDAError("copy sum");
// calculate statistics
meanROI = total / fp(NeROI); // gets mean (average) value of element in ROI
meanROI2 = meanROI * meanROI; //
varROI = (total2 / fp(NeROI)) - meanROI2; // gets variance of ROI
q0sqr = varROI / meanROI2; // gets standard deviation of ROI
// execute srad kernel
hipLaunchKernelGGL(srad, dim3(blocks), dim3(threads), 0, 0, lambda, // SRAD coefficient
Nr, // # of rows in input image
Nc, // # of columns in input image
Ne, // # of elements in input image
d_iN, // indices of North surrounding pixels
d_iS, // indices of South surrounding pixels
d_jE, // indices of East surrounding pixels
d_jW, // indices of West surrounding pixels
d_dN, // North derivative
d_dS, // South derivative
d_dW, // West derivative
d_dE, // East derivative
q0sqr, // standard deviation of ROI
d_c, // diffusion coefficient
d_I); // output image
checkCUDAError("srad");
// execute srad2 kernel
hipLaunchKernelGGL(srad2, dim3(blocks), dim3(threads), 0, 0, lambda, // SRAD coefficient
Nr, // # of rows in input image
Nc, // # of columns in input image
Ne, // # of elements in input image
d_iN, // indices of North surrounding pixels
d_iS, // indices of South surrounding pixels
d_jE, // indices of East surrounding pixels
d_jW, // indices of West surrounding pixels
d_dN, // North derivative
d_dS, // South derivative
d_dW, // West derivative
d_dE, // East derivative
d_c, // diffusion coefficient
d_I); // output image
checkCUDAError("srad2");
}
// printf("\n");
time8 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("COMPUTE: compress image");
myTimer->Start();
#endif
//================================================================================80
// SCALE IMAGE UP FROM 0-1 TO 0-255 AND COMPRESS
//================================================================================80
hipLaunchKernelGGL(compress, dim3(blocks), dim3(threads), 0, 0, Ne,
d_I);
checkCUDAError("compress");
time9 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("COPY DATA TO GPU->CPU");
myTimer->Start();
#endif
//================================================================================80
// COPY RESULTS BACK TO CPU
//================================================================================80
hipMemcpy(image, d_I, mem_size, hipMemcpyDeviceToHost);
checkCUDAError("copy back");
time10 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("SAVE IMAGE INTO FILE");
myTimer->Start();
#endif
//================================================================================80
// WRITE IMAGE AFTER PROCESSING
//================================================================================80
write_graphics( "image_out.pgm",
image,
Nr,
Nc,
1,
255);
time11 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
myTimer->Reset("FREE MEMORY");
myTimer->Start();
#endif
//================================================================================80
// DEALLOCATE
//================================================================================80
free(image_ori);
free(image);
free(iN);
free(iS);
free(jW);
free(jE);
hipFree(d_I);
hipFree(d_c);
hipFree(d_iN);
hipFree(d_iS);
hipFree(d_jE);
hipFree(d_jW);
hipFree(d_dN);
hipFree(d_dS);
hipFree(d_dE);
hipFree(d_dW);
hipFree(d_sums);
hipFree(d_sums2);
time12 = get_time();
#ifdef PROFILING
myTimer->Stop();
serializeTime->Serialize(myTimer);
delete myTimer;
delete serializeTime;
#endif
//================================================================================80
// DISPLAY TIMING
//================================================================================80
printf("Time spent in different stages of the application:\n");
printf("%15.12f s, %15.12f % : SETUP VARIABLES\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : READ COMMAND LINE PARAMETERS\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : READ IMAGE FROM FILE\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : RESIZE IMAGE\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : GPU DRIVER INIT, CPU/GPU SETUP, MEMORY ALLOCATION\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COPY DATA TO CPU->GPU\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : EXTRACT IMAGE\n", (float) (time7-time6) / 1000000, (float) (time7-time6) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COMPUTE\n", (float) (time8-time7) / 1000000, (float) (time8-time7) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COMPRESS IMAGE\n", (float) (time9-time8) / 1000000, (float) (time9-time8) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : COPY DATA TO GPU->CPU\n", (float) (time10-time9) / 1000000, (float) (time10-time9) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : SAVE IMAGE INTO FILE\n", (float) (time11-time10) / 1000000, (float) (time11-time10) / (float) (time12-time0) * 100);
printf("%15.12f s, %15.12f % : FREE MEMORY\n", (float) (time12-time11) / 1000000, (float) (time12-time11) / (float) (time12-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time12-time0) / 1000000);
}
//====================================================================================================100
// END OF FILE
//====================================================================================================100
|
4231ecf38d20f7eb56b9de55f45e3a0ff6cad550.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
namespace pwn {
template <class T>
__global__ void fillBuffer(T* buffer, int numElems, T value){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i<numElems){
buffer[i] = value;
}
}
__global__ void computeDepthBuffer(int* buffer, int rows, int cols,
const float* KT, const float* points, int numPoints,
int dmin, int dmax){
int i = blockDim.x * blockIdx.x + threadIdx.x;
float ip[4];
if (i<numPoints){
const float* wp=points+4*i;
pwn::matVecMul<4,4>(ip,KT,wp);
float iw= 1./ip[2];
int d = ip[2] * 1000.0f;
int x = (int)(ip[0]*iw);
int y = (int)(ip[1]*iw);
int offset = y*rows+x;
if (d > dmin && d < dmax &&
x >= 0 && x<rows &&
y>=0 && y<cols){
atomicMin(buffer+offset,d);
}
}
}
__global__ void computeIndexBuffer(int* ibuffer, const int* zbuffer, int rows, int cols,
const float* KT, const float* points, int numPoints){
int i = blockDim.x * blockIdx.x + threadIdx.x;
float ip[4];
if (i<numPoints){
const float* wp=points+4*i;
pwn::matVecMul<4,4>(ip,KT,wp);
float iw= 1./ip[2];
int d = ip[2] * 1000.0f;
int x = (int)(ip[0]*iw);
int y = (int)(ip[1]*iw);
int offset = y*rows+x;
if ( x >= 0 && x<rows &&
y>=0 && y<cols && d == zbuffer[offset]){
ibuffer[offset]=i;
}
}
}
__device__ int processCorrespondence(float* Htt,
float* Hrr,
float* Htr,
float* bt,
float* br,
const float* transform,
const float* referencePoints,
const float* referenceNormals,
const float* referenceCurvatures,
const float* currentPoints,
const float* currentNormals,
const float* currentCurvatures,
const float* currentOmegaPs,
const float* currentOmegaNs,
const int referenceIndex,
const int currentIndex,
const float flatCurvatureThreshold,
const float normalThreshold,
const float minCurvatureRatio,
const float maxCurvatureRatio,
const float distanceThreshold,
const float inlierThreshold){
vecFill<16>(Htt, 0);
vecFill<16>(Htr, 0);
vecFill<16>(Hrr, 0);
vecFill<4>(bt,0);
vecFill<4>(br,0);
const float* T=transform;
float referencePoint[4];
{
const float* referencePoint_ = referencePoints + (referenceIndex*4);
matVecMul<4,4>(referencePoint,T,referencePoint_);
}
float referenceNormal[4];
{
const float* referenceNormal_ = referenceNormals + (referenceIndex*4);
matVecMul<4,4>(referenceNormal,T,referenceNormal_);
}
float referenceCurvature = referenceCurvatures[referenceIndex];
referenceCurvature = (referenceCurvature<flatCurvatureThreshold)?flatCurvatureThreshold:referenceCurvature;
const float* currentPoint = currentPoints + ( currentIndex* 4);
const float* currentNormal = currentNormals + ( currentIndex* 4);
float currentCurvature = currentCurvatures[currentIndex];
currentCurvature = (currentCurvature<flatCurvatureThreshold)?flatCurvatureThreshold:currentCurvature;
const float* omegaP = currentOmegaPs + ( currentIndex* 16);
const float* omegaN = currentOmegaNs + ( currentIndex* 16);
float pointsDifference[4];
float curvatureRatio=(referenceCurvature + 1e-5)/(currentCurvature + 1e-5);
float normalsRatio = vecDot<4>(currentNormal,referenceNormal);
vecCopy<4>(pointsDifference,referencePoint);
vecSum<4>(pointsDifference,currentPoint,-1.0f);
float pointsDistance=vecDot<4>(pointsDifference,pointsDifference);
bool normalFail = (normalsRatio < normalThreshold);
bool distanceFail = (pointsDistance > distanceThreshold);
bool curvatureFail = ((curvatureRatio < minCurvatureRatio) ||
(curvatureRatio > maxCurvatureRatio));
int increment = ! normalFail && ! distanceFail && ! curvatureFail;
if (! increment)
return 0;
float normalsDifference[4];
vecCopy<4>(normalsDifference,referenceNormal);
vecSum<4>(normalsDifference,currentNormal,-1.0f);
//const Vector4f ep = omegaP*pointError;
float ep[4];
matVecMul<4,4>(ep, omegaP, pointsDifference);
//const Vector4f en = omegaN*normalError;
float en[4];
matVecMul<4,4>(en, omegaN, normalsDifference);
float localError = vecDot<4>(ep,pointsDifference) + vecDot<4>(en,normalsDifference);
int chiOk = localError < inlierThreshold;
if (! chiOk)
return 0;
//Matrix4f Sp = skew(referencePoint);
float Sp[16];
matBuildSkew(Sp,referencePoint);
//Matrix4f Sn = skew(referenceNormal);
float Sn[16];
matBuildSkew(Sn,referenceNormal);
//Htt = omegaP;
vecCopy<16>(Htt,omegaP);
//Htr.noalias() = omegaP*Sp;
matMatMul<4,4,4>(Htr,omegaP,Sp);
//Hrr.noalias() = - (Sp*omegaP*Sp + Sn*omegaN*Sn);
float temp[16], temp2[16];
matMatMul<4,4,4>(Hrr,Sp,Htr);
matMatMul<4,4,4>(temp,Sn,omegaN);
matMatMul<4,4,4>(temp2,temp,Sn);
vecSum<16>(Hrr,temp2,+1.0f);
vecScale<16>(Hrr,-1.0f);
//bt.noalias() = ep;
vecCopy<4>(bt,ep);
//br.noalias() = - (Sp*ep + Sn*en);
matVecMul<4,4>(br,Sp,ep);
matVecMul<4,4>(temp,Sn,en);
vecSum<4>(br,temp,+1.0f);
vecScale<4>(br,-1.0f);
*(bt+3)=1;
*(br+3)= localError;
return 1;
}
}
| 4231ecf38d20f7eb56b9de55f45e3a0ff6cad550.cu | namespace pwn {
template <class T>
__global__ void fillBuffer(T* buffer, int numElems, T value){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i<numElems){
buffer[i] = value;
}
}
__global__ void computeDepthBuffer(int* buffer, int rows, int cols,
const float* KT, const float* points, int numPoints,
int dmin, int dmax){
int i = blockDim.x * blockIdx.x + threadIdx.x;
float ip[4];
if (i<numPoints){
const float* wp=points+4*i;
pwn::matVecMul<4,4>(ip,KT,wp);
float iw= 1./ip[2];
int d = ip[2] * 1000.0f;
int x = (int)(ip[0]*iw);
int y = (int)(ip[1]*iw);
int offset = y*rows+x;
if (d > dmin && d < dmax &&
x >= 0 && x<rows &&
y>=0 && y<cols){
atomicMin(buffer+offset,d);
}
}
}
__global__ void computeIndexBuffer(int* ibuffer, const int* zbuffer, int rows, int cols,
const float* KT, const float* points, int numPoints){
int i = blockDim.x * blockIdx.x + threadIdx.x;
float ip[4];
if (i<numPoints){
const float* wp=points+4*i;
pwn::matVecMul<4,4>(ip,KT,wp);
float iw= 1./ip[2];
int d = ip[2] * 1000.0f;
int x = (int)(ip[0]*iw);
int y = (int)(ip[1]*iw);
int offset = y*rows+x;
if ( x >= 0 && x<rows &&
y>=0 && y<cols && d == zbuffer[offset]){
ibuffer[offset]=i;
}
}
}
__device__ int processCorrespondence(float* Htt,
float* Hrr,
float* Htr,
float* bt,
float* br,
const float* transform,
const float* referencePoints,
const float* referenceNormals,
const float* referenceCurvatures,
const float* currentPoints,
const float* currentNormals,
const float* currentCurvatures,
const float* currentOmegaPs,
const float* currentOmegaNs,
const int referenceIndex,
const int currentIndex,
const float flatCurvatureThreshold,
const float normalThreshold,
const float minCurvatureRatio,
const float maxCurvatureRatio,
const float distanceThreshold,
const float inlierThreshold){
vecFill<16>(Htt, 0);
vecFill<16>(Htr, 0);
vecFill<16>(Hrr, 0);
vecFill<4>(bt,0);
vecFill<4>(br,0);
const float* T=transform;
float referencePoint[4];
{
const float* referencePoint_ = referencePoints + (referenceIndex*4);
matVecMul<4,4>(referencePoint,T,referencePoint_);
}
float referenceNormal[4];
{
const float* referenceNormal_ = referenceNormals + (referenceIndex*4);
matVecMul<4,4>(referenceNormal,T,referenceNormal_);
}
float referenceCurvature = referenceCurvatures[referenceIndex];
referenceCurvature = (referenceCurvature<flatCurvatureThreshold)?flatCurvatureThreshold:referenceCurvature;
const float* currentPoint = currentPoints + ( currentIndex* 4);
const float* currentNormal = currentNormals + ( currentIndex* 4);
float currentCurvature = currentCurvatures[currentIndex];
currentCurvature = (currentCurvature<flatCurvatureThreshold)?flatCurvatureThreshold:currentCurvature;
const float* omegaP = currentOmegaPs + ( currentIndex* 16);
const float* omegaN = currentOmegaNs + ( currentIndex* 16);
float pointsDifference[4];
float curvatureRatio=(referenceCurvature + 1e-5)/(currentCurvature + 1e-5);
float normalsRatio = vecDot<4>(currentNormal,referenceNormal);
vecCopy<4>(pointsDifference,referencePoint);
vecSum<4>(pointsDifference,currentPoint,-1.0f);
float pointsDistance=vecDot<4>(pointsDifference,pointsDifference);
bool normalFail = (normalsRatio < normalThreshold);
bool distanceFail = (pointsDistance > distanceThreshold);
bool curvatureFail = ((curvatureRatio < minCurvatureRatio) ||
(curvatureRatio > maxCurvatureRatio));
int increment = ! normalFail && ! distanceFail && ! curvatureFail;
if (! increment)
return 0;
float normalsDifference[4];
vecCopy<4>(normalsDifference,referenceNormal);
vecSum<4>(normalsDifference,currentNormal,-1.0f);
//const Vector4f ep = omegaP*pointError;
float ep[4];
matVecMul<4,4>(ep, omegaP, pointsDifference);
//const Vector4f en = omegaN*normalError;
float en[4];
matVecMul<4,4>(en, omegaN, normalsDifference);
float localError = vecDot<4>(ep,pointsDifference) + vecDot<4>(en,normalsDifference);
int chiOk = localError < inlierThreshold;
if (! chiOk)
return 0;
//Matrix4f Sp = skew(referencePoint);
float Sp[16];
matBuildSkew(Sp,referencePoint);
//Matrix4f Sn = skew(referenceNormal);
float Sn[16];
matBuildSkew(Sn,referenceNormal);
//Htt = omegaP;
vecCopy<16>(Htt,omegaP);
//Htr.noalias() = omegaP*Sp;
matMatMul<4,4,4>(Htr,omegaP,Sp);
//Hrr.noalias() = - (Sp*omegaP*Sp + Sn*omegaN*Sn);
float temp[16], temp2[16];
matMatMul<4,4,4>(Hrr,Sp,Htr);
matMatMul<4,4,4>(temp,Sn,omegaN);
matMatMul<4,4,4>(temp2,temp,Sn);
vecSum<16>(Hrr,temp2,+1.0f);
vecScale<16>(Hrr,-1.0f);
//bt.noalias() = ep;
vecCopy<4>(bt,ep);
//br.noalias() = - (Sp*ep + Sn*en);
matVecMul<4,4>(br,Sp,ep);
matVecMul<4,4>(temp,Sn,en);
vecSum<4>(br,temp,+1.0f);
vecScale<4>(br,-1.0f);
*(bt+3)=1;
*(br+3)= localError;
return 1;
}
}
|
8487d6179f8086586a4068492de818b7e0fbc45d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <blas_magma.h>
#include <string.h>
#include <vector>
#include <algorithm>
#include <util_quda.h>
#include <quda_internal.h>
#ifndef MAX
#define MAX(a, b) (a > b) ? a : b;
#endif
#define MAGMA_17 //default version version of the MAGMA library
#ifdef MAGMA_LIB
#include <magma.h>
#ifdef MAGMA_14
#define _cV 'V'
#define _cU 'U'
#define _cR 'R'
#define _cL 'L'
#define _cC 'C'
#define _cN 'N'
#define _cNV 'N'
#else
#define _cV MagmaVec
#define _cU MagmaUpper
#define _cR MagmaRight
#define _cL MagmaLeft
#define _cC MagmaConjTrans
#define _cN MagmaNoTrans
#define _cNV MagmaNoVec
#endif
#endif
//Column major format: Big matrix times Little matrix.
#ifdef MAGMA_LIB
//Simplified version for the above:
#define BLOCK_SIZE 16
__global__ void SMatCMatCuda_16x16(cuFloatComplex *outBuff, const int bldm, cuFloatComplex *sMat, const int sldm, hipDoubleComplex *cMat, const int cldm, const int scols)
{
//block coords:
int by = blockIdx.x;
int bx = blockIdx.y;
//local coords:
int ty = threadIdx.x;
int tx = threadIdx.y;
int sBegin = BLOCK_SIZE * by;//global offset in Y-direction for the Big matrix
int sEnd = sBegin + sldm*scols - 1;//loop limit in X-direction for the Big matrix
int sStep = sldm * BLOCK_SIZE;//step in X-direction for the Big matrix
int cBegin = cldm * BLOCK_SIZE * bx;//global offset in X-direction for the Little matrix
int cStep = BLOCK_SIZE;//step in Y-direction for the Little matrix
hipDoubleComplex accum = make_cuDoubleComplex (0.0, 0.0);
cuFloatComplex ftmp;
hipDoubleComplex dtmp;
for (int s = sBegin, c = cBegin; s <= sEnd; s += sStep, c += cStep)
{
__shared__ float reSmat[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float imSmat[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double reCmat[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double imCmat[BLOCK_SIZE][BLOCK_SIZE];
ftmp = sMat[s + sldm * tx + ty];
reSmat[ty][tx] = cuCrealf(ftmp);
imSmat[ty][tx] = cuCimagf(ftmp);
dtmp = cMat[c + cldm * tx + ty];
reCmat[ty][tx] = cuCreal(dtmp);
imCmat[ty][tx] = cuCimag(dtmp);
__syncthreads();
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
ftmp = make_cuFloatComplex(reSmat[ty][k], imSmat[ty][k]);
dtmp = make_cuDoubleComplex(reCmat[k][tx], imCmat[k][tx]);
hipDoubleComplex dtmp2 = cuComplexFloatToDouble( ftmp );
accum = cuCfma(dtmp2, dtmp, accum);
}
__syncthreads();
}
int idx = BLOCK_SIZE * by + bldm * BLOCK_SIZE * bx;
outBuff[idx + bldm * tx + ty] = cuComplexDoubleToFloat( accum );
return;
}
#endif
void sMM_v2(void *outBuff, const int bldm, void *sMat, const int srows, const int scols, const int sldm, void *cMat, const int crows, const int ccols, const int cldm)
{
#ifdef MAGMA_LIB
// for test only:
if(scols != crows) errorQuda("\nError: wrong dimensions\n");
const int block_size = 16;
if (ccols % block_size != 0) errorQuda("\nError: wrong dimensions\n");
// Setup execution parameters (column-major format):
dim3 threads(block_size, block_size);
dim3 grid((srows+15) / threads.x, ccols / threads.y);//both ccols and srows must be multiple of block_size...
hipFuncSetCacheConfig( SMatCMatCuda_16x16, hipFuncCachePreferShared );
hipLaunchKernelGGL(( SMatCMatCuda_16x16), dim3(grid), dim3(threads) , 0, 0, (cuFloatComplex*)outBuff, bldm, (cuFloatComplex*)sMat, sldm, (hipDoubleComplex*)cMat, cldm, scols);
#endif
}
#undef BLOCK_SIZE
void BlasMagmaArgs::OpenMagma(){
#ifdef MAGMA_LIB
magma_int_t err = magma_init();
if(err != MAGMA_SUCCESS) errorQuda("\nError: cannot initialize MAGMA library\n");
int major, minor, micro;
magma_version( &major, &minor, µ);
printfQuda("\nMAGMA library version: %d.%d\n\n", major, minor);
#else
errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n");
#endif
return;
}
void BlasMagmaArgs::CloseMagma(){
#ifdef MAGMA_LIB
if(magma_finalize() != MAGMA_SUCCESS) errorQuda("\nError: cannot close MAGMA library\n");
#else
errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n");
#endif
return;
}
BlasMagmaArgs::BlasMagmaArgs(const int prec) : m(0), max_nev(0), prec(prec), ldm(0), info(-1), llwork(0),
lrwork(0), liwork(0), sideLR(0), htsize(0), dtsize(0), lwork_max(0), W(0), W2(0),
hTau(0), dTau(0), lwork(0), rwork(0), iwork(0)
{
#ifdef MAGMA_LIB
magma_int_t dev_info = magma_getdevice_arch();//mostly to check whether magma is intialized...
if(dev_info == 0) exit(-1);
printfQuda("\nMAGMA will use device architecture %d.\n", dev_info);
alloc = false;
init = true;
#else
errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n");
#endif
return;
}
BlasMagmaArgs::BlasMagmaArgs(const int m, const int ldm, const int prec)
: m(m), max_nev(0), prec(prec), ldm(ldm), info(-1), sideLR(0), htsize(0), dtsize(0),
W(0), hTau(0), dTau(0)
{
#ifdef MAGMA_LIB
magma_int_t dev_info = magma_getdevice_arch();//mostly to check whether magma is intialized...
if(dev_info == 0) exit(-1);
printfQuda("\nMAGMA will use device architecture %d.\n", dev_info);
const int complex_prec = 2*prec;
magma_int_t nbtrd = prec == 4 ? magma_get_chetrd_nb(m) : magma_get_zhetrd_nb(m);//ldm
llwork = MAX(m + m*nbtrd, 2*m + m*m);//ldm
lrwork = 1 + 5*m + 2*m*m;//ldm
liwork = 3 + 5*m;//ldm
magma_malloc_pinned((void**)&W2, ldm*m*complex_prec);
magma_malloc_pinned((void**)&lwork, llwork*complex_prec);
magma_malloc_cpu((void**)&rwork, lrwork*prec);
magma_malloc_cpu((void**)&iwork, liwork*sizeof(magma_int_t));
init = true;
alloc = true;
#else
errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n");
#endif
return;
}
BlasMagmaArgs::BlasMagmaArgs(const int m, const int max_nev, const int ldm, const int prec)
: m(m), max_nev(max_nev), prec(prec), ldm(ldm), info(-1)
{
#ifdef MAGMA_LIB
magma_int_t dev_info = magma_getdevice_arch();//mostly to check whether magma is intialized...
if(dev_info == 0) exit(-1);
printfQuda("\nMAGMA will use device architecture %d.\n", dev_info);
const int complex_prec = 2*prec;
magma_int_t nbtrd = prec == 4 ? magma_get_chetrd_nb(ldm) : magma_get_zhetrd_nb(ldm);//ldm<-m
magma_int_t nbqrf = prec == 4 ? magma_get_cgeqrf_nb(ldm) : magma_get_zgeqrf_nb(ldm);//ldm
htsize = max_nev;//MIN(l,k)-number of Householder vectors, but we always have k <= MIN(m,n)
dtsize = ( 2*htsize + ((htsize + 31)/32)*32 )*nbqrf;//in general: MIN(m,k) for side = 'L' and MIN(n,k) for side = 'R'
magma_malloc_pinned((void**)&hTau, htsize*complex_prec);
magma_malloc((void**)&dTau, dtsize*complex_prec);
//these are needed for the eigCG solver only.
sideLR = (m - max_nev + nbqrf)*(m + nbqrf) + m*nbqrf;//ldm
magma_malloc_pinned((void**)&W, sideLR*complex_prec);
magma_malloc_pinned((void**)&W2, ldm*m*complex_prec);
llwork = MAX(m + m*nbtrd, 2*m + m*m);//ldm
lrwork = 1 + 5*m + 2*m*m;//ldm
liwork = 3 + 5*m;//ldm
magma_malloc_pinned((void**)&lwork, llwork*complex_prec);
magma_malloc_cpu((void**)&rwork, lrwork*prec);
magma_malloc_cpu((void**)&iwork, liwork*sizeof(magma_int_t));
init = true;
alloc = true;
#else
errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n");
#endif
return;
}
BlasMagmaArgs::~BlasMagmaArgs()
{
#ifdef MAGMA_LIB
if(alloc == true)
{
if(dTau) magma_free(dTau);
if(hTau) magma_free_pinned(hTau);
if(W) magma_free_pinned(W);
if(W2) magma_free_pinned(W2);
if(lwork) magma_free_pinned(lwork);
if(rwork) magma_free_cpu(rwork);
if(iwork) magma_free_cpu(iwork);
alloc = false;
}
init = false;
#endif
return;
}
void BlasMagmaArgs::MagmaHEEVD(void *dTvecm, void *hTvalm, const int prob_size, bool host)
{
#ifdef MAGMA_LIB
if(prob_size > m) errorQuda("\nError in MagmaHEEVD (problem size cannot exceed given search space %d), exit ...\n", m);
hipPointerAttribute_t ptr_attr;
if(!host)
{
//check if dTvecm is a device pointer..
hipPointerGetAttributes(&ptr_attr, dTvecm);
if(ptr_attr.memoryType != hipMemoryTypeDevice || ptr_attr.devicePointer == NULL ) errorQuda("Error in MagmaHEEVD, no device pointer found.");
if(prec == 4)
{
magma_cheevd_gpu(_cV, _cU, prob_size, (magmaFloatComplex*)dTvecm, ldm, (float*)hTvalm, (magmaFloatComplex*)W2, ldm, (magmaFloatComplex*)lwork, llwork, (float*)rwork, lrwork, iwork, liwork, &info);
if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_cheevd_gpu), exit ...\n");
}
else
{
magma_zheevd_gpu(_cV, _cU, prob_size, (magmaDoubleComplex*)dTvecm, ldm, (double*)hTvalm, (magmaDoubleComplex*)W2, ldm, (magmaDoubleComplex*)lwork, llwork, (double*)rwork, lrwork, iwork, liwork, &info);
if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_zheevd_gpu), exit ...\n");
}
}
else
{
//check if dTvecm is a device pointer..
hipPointerGetAttributes(&ptr_attr, dTvecm);
if(ptr_attr.memoryType != hipMemoryTypeHost || ptr_attr.hostPointer == NULL ) errorQuda("Error in MagmaHEEVD, no host pointer found.");
if(prec == 4)
{
magma_cheevd(_cV, _cU, prob_size, (magmaFloatComplex*)dTvecm, ldm, (float*)hTvalm, (magmaFloatComplex*)lwork, llwork, (float*)rwork, lrwork, iwork, liwork, &info);
if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_cheevd_gpu), exit ...\n");
}
else
{
magma_zheevd(_cV, _cU, prob_size, (magmaDoubleComplex*)dTvecm, ldm, (double*)hTvalm, (magmaDoubleComplex*)lwork, llwork, (double*)rwork, lrwork, iwork, liwork, &info);
if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_zheevd_gpu), exit ...\n");
}
}
#endif
return;
}
int BlasMagmaArgs::MagmaORTH_2nev(void *dTvecm, void *dTm)
{
const int l = max_nev;
#ifdef MAGMA_LIB
if(prec == 4)
{
magma_int_t nb = magma_get_cgeqrf_nb(m);//ldm
magma_cgeqrf_gpu(m, l, (magmaFloatComplex *)dTvecm, ldm, (magmaFloatComplex *)hTau, (magmaFloatComplex *)dTau, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_cgeqrf_gpu), exit ...\n");
//compute dTevecm0=QHTmQ
//get TQ product:
magma_cunmqr_gpu(_cR, _cN, m, m, l, (magmaFloatComplex *)dTvecm, ldm, (magmaFloatComplex *)hTau, (magmaFloatComplex *)dTm, ldm, (magmaFloatComplex *)W, sideLR, (magmaFloatComplex *)dTau, nb, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_cunmqr_gpu), exit ...\n");
//get QHT product:
magma_cunmqr_gpu(_cL, _cC, m, l, l, (magmaFloatComplex *)dTvecm, ldm, (magmaFloatComplex *)hTau, (magmaFloatComplex *)dTm, ldm, (magmaFloatComplex *)W, sideLR, (magmaFloatComplex *)dTau, nb, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_cunmqr_gpu), exit ...\n");
}
else
{
magma_int_t nb = magma_get_zgeqrf_nb(m);//ldm
magma_zgeqrf_gpu(m, l, (magmaDoubleComplex *)dTvecm, ldm, (magmaDoubleComplex *)hTau, (magmaDoubleComplex *)dTau, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zgeqrf_gpu), exit ...\n");
//compute dTevecm0=QHTmQ
//get TQ product:
magma_zunmqr_gpu(_cR, _cN, m, m, l, (magmaDoubleComplex *)dTvecm, ldm, (magmaDoubleComplex *)hTau, (magmaDoubleComplex *)dTm, ldm, (magmaDoubleComplex *)W, sideLR, (magmaDoubleComplex *)dTau, nb, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n");
//get QHT product:
magma_zunmqr_gpu(_cL, _cC, m, l, l, (magmaDoubleComplex *)dTvecm, ldm, (magmaDoubleComplex *)hTau, (magmaDoubleComplex *)dTm, ldm, (magmaDoubleComplex *)W, sideLR, (magmaDoubleComplex *)dTau, nb, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n");
}
#endif
return l;
}
void BlasMagmaArgs::RestartV(void *dV, const int vld, const int vlen, const int vprec, void *dTevecm, void *dTm)
{
#ifdef MAGMA_LIB
if( (vld % 32) != 0) errorQuda("\nError: leading dimension must be multiple of the warp size\n");
const int cvprec = 2*vprec;
const int l = max_nev;
//int bufferSize = 2*vld+l*l;
//int bufferBlock = bufferSize / l;
int bufferBlock = (2*vld) / l;
bufferBlock = (bufferBlock / 32) * 32;//corrected bufferBlock to be multiple of the warp size
int bufferSize = (bufferBlock * l);
void *buffer = 0;
magma_malloc(&buffer, bufferSize*cvprec);
hipMemset(buffer, 0, bufferSize*cvprec);
if(prec == 4)
{
magma_int_t nb = magma_get_cgeqrf_nb(m);//ldm
magma_cunmqr_gpu(_cL, _cN, m, l, l, (magmaFloatComplex*)dTevecm, ldm, (magmaFloatComplex*)hTau, (magmaFloatComplex*)dTm, ldm, (magmaFloatComplex*)W, sideLR, (magmaFloatComplex*)dTau, nb, &info);
if(info != 0) errorQuda("\nError in RestartV (magma_cunmqr_gpu), exit ...\n");
}
else
{
magma_int_t nb = magma_get_zgeqrf_nb(m);//ldm
magma_zunmqr_gpu(_cL, _cN, m, l, l, (magmaDoubleComplex*)dTevecm, ldm, (magmaDoubleComplex*)hTau, (magmaDoubleComplex*)dTm, ldm, (magmaDoubleComplex*)W, sideLR, (magmaDoubleComplex*)dTau, nb, &info);
if(info != 0) errorQuda("\nError in RestartV (magma_zunmqr_gpu), exit ...\n");
}
if(vprec == 4)
{
if(prec == vprec) errorQuda("\nError: option is not currently supported, exit ...\n");
for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock)
{
if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset);
magmaFloatComplex *ptrV = &(((magmaFloatComplex*)dV)[blockOffset]);
sMM_v2(buffer, bufferBlock, ptrV, bufferBlock, m, vld, dTm, m, l, ldm);
hipMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, hipMemcpyDefault);
}
}
else
{
for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock)
{
if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset);
magmaDoubleComplex *ptrV = &(((magmaDoubleComplex*)dV)[blockOffset]);
magmablas_zgemm(_cN, _cN, bufferBlock, l, m, MAGMA_Z_ONE, ptrV, vld, (magmaDoubleComplex*)dTm, ldm, MAGMA_Z_ZERO, (magmaDoubleComplex*)buffer, bufferBlock);
hipMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, hipMemcpyDefault);
}
}
magma_free(buffer);
#endif
return;
}
void BlasMagmaArgs::SolveProjMatrix(void* rhs, const int ldn, const int n, void* H, const int ldH)
{
#ifdef MAGMA_LIB
const int complex_prec = 2*prec;
void *tmp;
magma_int_t *ipiv;
magma_int_t err;
magma_malloc_pinned((void**)&tmp, ldH*n*complex_prec);
magma_malloc_pinned((void**)&ipiv, n*sizeof(magma_int_t));
memcpy(tmp, H, ldH*n*complex_prec);
if (prec == 4)
{
err = magma_cgesv(n, 1, (magmaFloatComplex*)tmp, ldH, ipiv, (magmaFloatComplex*)rhs, ldn, &info);
if(err != 0) errorQuda("\nError in SolveProjMatrix (magma_cgesv), exit ...\n");
}
else
{
err = magma_zgesv(n, 1, (magmaDoubleComplex*)tmp, ldH, ipiv, (magmaDoubleComplex*)rhs, ldn, &info);
if(err != 0) errorQuda("\nError in SolveProjMatrix (magma_zgesv), exit ...\n");
}
magma_free_pinned(tmp);
magma_free_pinned(ipiv);
#endif
return;
}
void BlasMagmaArgs::SolveGPUProjMatrix(void* rhs, const int ldn, const int n, void* H, const int ldH)
{
#ifdef MAGMA_LIB
const int complex_prec = 2*prec;
void *tmp;
magma_int_t *ipiv;
magma_int_t err;
magma_malloc((void**)&tmp, ldH*n*complex_prec);
magma_malloc_pinned((void**)&ipiv, n*sizeof(magma_int_t));
qudaMemcpy(tmp, H, ldH*n*complex_prec, hipMemcpyDefault);
if (prec == 4)
{
err = magma_cgesv_gpu(n, 1, (magmaFloatComplex*)tmp, ldH, ipiv, (magmaFloatComplex*)rhs, ldn, &info);
if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_cgesv), exit ...\n");
}
else
{
err = magma_zgesv_gpu(n, 1, (magmaDoubleComplex*)tmp, ldH, ipiv, (magmaDoubleComplex*)rhs, ldn, &info);
if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_zgesv), exit ...\n");
}
magma_free(tmp);
magma_free_pinned(ipiv);
#endif
return;
}
void BlasMagmaArgs::SpinorMatVec
(void *spinorOut, const void *spinorSetIn, const int sld, const int slen, const void *vec, const int vlen)
{
#ifdef MAGMA_LIB
if (prec == 4)
{
magmaFloatComplex *spmat = (magmaFloatComplex*)spinorSetIn;
magmaFloatComplex *spout = (magmaFloatComplex*)spinorOut;
magmablas_cgemv(_cN, slen, vlen, MAGMA_C_ONE, spmat, sld, (magmaFloatComplex*)vec, 1, MAGMA_C_ZERO, spout, 1);//in colour-major format
}
else
{
magmaDoubleComplex *spmat = (magmaDoubleComplex*)spinorSetIn;
magmaDoubleComplex *spout = (magmaDoubleComplex*)spinorOut;
magmablas_zgemv(_cN, slen, vlen, MAGMA_Z_ONE, spmat, sld, (magmaDoubleComplex*)vec, 1, MAGMA_Z_ZERO, spout, 1);//in colour-major format
}
#endif
return;
}
void BlasMagmaArgs::MagmaRightNotrUNMQR(const int clen, const int qrlen, const int nrefls, void *QR, const int ldqr, void *Vm, const int cldn)
{
#ifdef MAGMA_LIB
magma_int_t m = clen;
magma_int_t n = qrlen;
magma_int_t k = nrefls;
magma_int_t lwork = -1;
if(prec == 4)
{
}
else
{
magmaDoubleComplex *dQR = NULL;
magmaDoubleComplex *dtau = NULL;
magmaDoubleComplex *htau = NULL;
magmaDoubleComplex *hW = NULL;
magmaDoubleComplex qW;
magma_malloc_pinned((void**)&dQR, ldqr*k*sizeof(magmaDoubleComplex));
magma_malloc_pinned((void**)&htau, k*sizeof(magmaDoubleComplex));
//
magma_malloc((void**)&dTau, k*sizeof(magmaDoubleComplex));
qudaMemcpy(dQR, QR, ldqr*k*sizeof(magmaDoubleComplex), hipMemcpyDefault);
magma_int_t nb = magma_get_zgeqrf_nb(m);//ldm
//
magma_zgeqrf_gpu(n, k, (magmaDoubleComplex *)dQR, ldqr, (magmaDoubleComplex *)htau, (magmaDoubleComplex *)dtau, &info);//identical to zgeqrf?
magma_zunmqr_gpu(_cR, _cN, m, n, k, dQR, ldqr, htau, (magmaDoubleComplex *)Vm, cldn, &qW, lwork, dtau, nb, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n");
lwork = (magma_int_t) MAGMA_Z_REAL(qW);
magma_malloc_cpu((void**)&hW, lwork*sizeof(magmaDoubleComplex));
//get TQ product:
magma_zunmqr_gpu(_cR, _cN, m, n, k, dQR, ldqr, htau, (magmaDoubleComplex *)Vm, cldn, hW, lwork, dtau, nb, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n");
magma_free_cpu(hW);
magma_free(dtau);
magma_free_pinned(htau);
magma_free_pinned(dQR);
}
#endif
return;
}
//STL based version:
//
struct SortedEval{
double eval_nrm;
int eval_idx;
SortedEval(double val, int idx) : eval_nrm(val), eval_idx(idx) {};
};
bool cmp_eigen_nrms (SortedEval v1, SortedEval v2)
{
return (v1.eval_nrm < v2.eval_nrm);
}
void BlasMagmaArgs::Sort(const int m, const int ldm, void *eVecs, const int nev, void *unsorted_eVecs, void *eVals)
{
if (prec == 4) errorQuda("\nSingle precision is currently not supported.\n");
std::vector<SortedEval> sorted_evals_cntr;
for(int e = 0; e < m; e++) sorted_evals_cntr.push_back( SortedEval( abs(((std::complex<double>*)eVals)[e]), e ));
std::stable_sort(sorted_evals_cntr.begin(), sorted_evals_cntr.end(), cmp_eigen_nrms);
for(int e = 0; e < nev; e++)
{
memcpy(&(((std::complex<double>*)eVecs)[ldm*e]), &(((std::complex<double>*)unsorted_eVecs)[ldm*( sorted_evals_cntr[e].eval_idx)]), (ldm)*sizeof(std::complex<double>));
//set zero in m+1 element:
((std::complex<double>*)eVecs)[ldm*e+m] = std::complex<double>(0.0, 0.0);
}
return;
}
///NEW STUFF:
void BlasMagmaArgs::ComputeQR(const int nev, Complex * evmat, const int m, const int ldm, Complex *tau)
{
#ifdef MAGMA_LIB
magma_int_t _m = m;//matrix size
magma_int_t _nev = nev;//matrix size
magma_int_t _ldm = ldm;
//Lapack parameters:
magma_int_t info = 0;
magma_int_t lwork = -1;
magmaDoubleComplex *work = NULL;
magmaDoubleComplex qwork; //parameter to extract optimal size of work
magma_zgeqrf(_m, _nev, (magmaDoubleComplex *)evmat, _ldm, (magmaDoubleComplex *)tau, &qwork, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: MAGMA_ZGEQRF, info %d\n",info);
lwork = (magma_int_t) MAGMA_Z_REAL(qwork);
magma_malloc_cpu((void**)&work, lwork*sizeof(magmaDoubleComplex));
magma_zgeqrf(_m, _nev, (magmaDoubleComplex *)evmat, _ldm, (magmaDoubleComplex *)tau, work, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZGEQRF, info %d\n",info);
if(work) magma_free_cpu(work);
#endif
return;
}
void BlasMagmaArgs::LeftConjZUNMQR(const int k /*number of reflectors*/, const int n /*number of columns of H*/, Complex *H, const int dh /*number of rows*/,
const int ldh, Complex * QR, const int ldqr, Complex *tau)//for vectors: n =1
{
#ifdef MAGMA_LIB
//Note: # rows of QR = # rows of H.
magma_int_t _h = dh;//matrix size
magma_int_t _n = n;//vector size
magma_int_t _k = k;
magma_int_t _ldh = ldh;
magma_int_t _ldqr = ldqr;
//Lapack parameters:
magma_side_t _s = _cL;//apply QR-matrix from the left
magma_trans_t _t = _cC;//conjugate
magma_int_t info = 0;
magma_int_t lwork = -1;
magmaDoubleComplex *work = NULL;
magmaDoubleComplex qwork; //parameter to extract optimal size of work
//Pdagger_{k+1} PrevRes
magma_zunmqr(_s, _t, _h, _n, _k, (magmaDoubleComplex *)QR, _ldqr, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldh, &qwork, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
lwork = (magma_int_t) MAGMA_Z_REAL(qwork);
magma_malloc_cpu((void**)&work, lwork*sizeof(magmaDoubleComplex));
magma_zunmqr(_s, _t, _h, _n, _k, (magmaDoubleComplex *)QR, _ldqr, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldh, work, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
if(work) magma_free_cpu(work);
#endif
return;
}
void BlasMagmaArgs::Construct_harmonic_matrix(Complex * const harmH, Complex * const conjH, const double beta2, const int m, const int ldH)
{
#ifdef MAGMA_LIB
//Lapack parameters:
magma_int_t _m = m;
//
magma_int_t _ldH = ldH;
//
magma_int_t info = 0;
//
magma_int_t I_ONE = 1;
//
magma_int_t *ipiv;
magma_malloc_cpu((void**)&ipiv, ldH*sizeof(magma_int_t));
//
//Construct H + beta*H^{-H} e_m*e_m^{T}
// 1. need to solve H^{H}y = e_m;
Complex *em = new Complex[m];
em[m-1] = beta2;//in fact, we construct beta*em,
magma_zgesv(_m, I_ONE, (magmaDoubleComplex *)conjH, _ldH, ipiv, (magmaDoubleComplex *)em, _ldH, &info);
if( (info != 0 ) ) errorQuda( "Error: DGESV, info %d\n",info);
//make this cleaner!
//check solution:
for (int j = 0; j < m; j++)
{
Complex accum = 0.0;
for (int i = 0; i < m; i++) accum = (accum + harmH[ldH*j+i]*em[(ipiv[i])-1]);
}
// 2. Construct matrix for harmonic Ritz vectors:
// Adjust last column with KroneckerProd((H^{-H}*beta*em)=em, em^{T}=[0,....,1]):
for(int i = 0; i < m; i++) harmH[ldH*(m-1)+i] += em[i];
magma_free_cpu(ipiv);
//
delete [] em;
#endif
return;
}
void BlasMagmaArgs::Compute_harmonic_matrix_eigenpairs(Complex *harmH, const int m, const int ldH, Complex *vr, Complex *evalues, const int ldv)
{
#ifdef MAGMA_LIB
magma_int_t _m = m;//matrix size
magma_int_t _ldH = ldH;
magma_int_t _ldv = ldv;
//Lapack parameters:
magma_int_t info = 0;
//
magma_vec_t _r = _cV;
magma_vec_t _l = _cNV;//no left eigenvectors
magma_int_t lwork = -1;
magmaDoubleComplex *work = NULL;
magmaDoubleComplex qwork; //parameter to extract optimal size of work
double *rwork = NULL;
magma_malloc_cpu((void**)&rwork, 2*_m*sizeof(double));
//Get optimal work:
magma_zgeev(_l, _r, _m, (magmaDoubleComplex *)harmH, _ldH, (magmaDoubleComplex *)evalues, NULL, _ldv, (magmaDoubleComplex *)vr, _ldv, &qwork, lwork, rwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZGEEVX, info %d\n",info);
lwork = (magma_int_t) MAGMA_Z_REAL(qwork);
magma_malloc_cpu((void**)&work, lwork*sizeof(magmaDoubleComplex));
//now get eigenpairs:
magma_zgeev(_l, _r, _m, (magmaDoubleComplex *)harmH, _ldH, (magmaDoubleComplex *)evalues, NULL, _ldv, (magmaDoubleComplex *)vr, _ldv, work, lwork, rwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZGEEVX, info %d\n",info);
if(rwork) magma_free_cpu(rwork);
//
if(work) magma_free_cpu(work);
//
#endif
return;
}
void BlasMagmaArgs::RestartVH(void *dV, const int vlen, const int vld, const int vprec, void *sortedHarVecs, void *H, const int ldh)
{
#ifdef MAGMA_LIB
if(prec == 4)
{
errorQuda("\nError: single precision is not currently supported\n");
}
if( (vld % 32) != 0) errorQuda("\nError: leading dimension must be multiple of the warp size\n");
int nev = (max_nev - 1); //(nev+1) - 1 for GMRESDR
int _m = m;//matrix size
int _k = nev;
int _kp1 = max_nev;
int _mp1 = (m+1);
int _ldm = ldh;
magma_side_t _s = _cR;//apply P-matrix from the right
magma_trans_t _t = _cN;//no left eigenvectors
int info = 0;
int lwork = -1;
Complex *work = NULL;
Complex qwork; //parameter to extract optimal size of work
const int cprec = 2*prec; //currently: sizeof(Complex)
const int cvprec = 2*vprec;
const int l = max_nev;
int lbsize = 2*((nev / 16)*16);
//const int bufferSize = 2*vld+lbsize*lbsize;
//int bufferBlock = bufferSize / lbsize;//or: lbsize = (nev+1)
int bufferBlock = (2*vld) / lbsize;
bufferBlock = (bufferBlock / 32) * 32;//corrected bufferBlock to be multiple of the warp size
int bufferSize = (bufferBlock * lbsize);
void *buffer = NULL;
void *dQmat = NULL;
magma_malloc(&buffer, bufferSize*cvprec);
hipMemset(buffer, 0, bufferSize*cvprec);
magma_malloc(&dQmat, l*ldh*cprec);
//GPU code:
Complex *tau = new Complex[l];//nev+1 =>max_nev
Complex *Qmat = new Complex[ldh*_mp1];//need (m+1)x(m+1) matrix on input...
ComputeQR(l, (Complex*)sortedHarVecs, _mp1, ldh, tau);//lapack version
//max_nev vectors are stored in Qmat (output):
//restoreOrthVectors(Qmat, max_nev, (Complex*)sortedHarVecs, (m+1), ldh, tau);
//Load diagonal units
for(int d = 0; d < (m+1); d++) Qmat[ldh*d+d] = Complex(1.0, 0.0);
magma_zunmqr(_s, _t, _mp1, _mp1, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)Qmat, _ldm, (magmaDoubleComplex *)&qwork, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
lwork = (int) qwork.real();
work = new Complex[lwork];
magma_zunmqr(_s, _t, _mp1, _mp1, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)Qmat, _ldm, (magmaDoubleComplex *)work, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
//Copy (nev+1) vectors on the device:
qudaMemcpy(dQmat, Qmat, (max_nev)*ldh*cprec, hipMemcpyDefault);
if(cvprec == sizeof(magmaDoubleComplex))
{
for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock)
{
if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset);
//printfQuda("\nBuffer block : %d\n", bufferBlock);
magmaDoubleComplex *ptrV = &(((magmaDoubleComplex*)dV)[blockOffset]);
magmablas_zgemm(_cN, _cN, bufferBlock, l, _mp1, MAGMA_Z_ONE, ptrV, vld, (magmaDoubleComplex*)dQmat, ldh, MAGMA_Z_ZERO, (magmaDoubleComplex*)buffer, bufferBlock);
hipMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, hipMemcpyDefault);//make this async!
}
hipMemset(&(((magmaDoubleComplex*)dV)[vld*max_nev]), 0, (m+1-max_nev)*vld*sizeof(magmaDoubleComplex));//= m - nev
}
else // low precision field
{
for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock)
{
if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset);
magmaFloatComplex *ptrV = &(((magmaFloatComplex*)dV)[blockOffset]);
sMM_v2(buffer, bufferBlock, ptrV, bufferBlock, _mp1, vld, dQmat, _mp1, l, ldh);
hipMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, hipMemcpyDefault);
}
hipMemset(&(((magmaFloatComplex*)dV)[vld*max_nev]), 0, (m+1-max_nev)*vld*sizeof(magmaFloatComplex));//= m - nev
}
//Construct H_new = Pdagger_{k+1} \bar{H}_{m} P_{k}
//bar{H}_{m} P_{k}
lwork = -1;
magma_zunmqr(_s, _t, _mp1, _m, _k, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)&qwork, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
delete[] work;
lwork = (int) qwork.real();
work = new Complex[lwork];
magma_zunmqr(_s, _t, _mp1, _m, _k, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)work, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
//Pdagger_{k+1} PrevRes
lwork = -1;
_s = _cL;
_t = _cC;
magma_zunmqr(_s, _t, _mp1, _k, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)&qwork, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
delete [] work;
lwork = (int) qwork.real();
work = new Complex[lwork];
magma_zunmqr(_s, _t, _mp1, _k, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)work, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
const int len = ldh - nev-1;
for(int i = 0; i < nev; i++) memset(&(((Complex*)H)[ldh*i+nev+1]), 0, len*sizeof(Complex) );
//
memset(&(((Complex*)H)[ldh*(nev)]), 0, (m-nev)*ldh*sizeof(Complex));
delete [] work;
magma_free(buffer);
magma_free(dQmat);
delete [] Qmat;
delete [] tau ;
#endif
return;
}
#define FMULS_GETRF(m_, n_) ( ((m_) < (n_)) \
? (0.5 * (m_) * ((m_) * ((n_) - (1./3.) * (m_) - 1. ) + (n_)) + (2. / 3.) * (m_)) \
: (0.5 * (n_) * ((n_) * ((m_) - (1./3.) * (n_) - 1. ) + (m_)) + (2. / 3.) * (n_)) )
#define FADDS_GETRF(m_, n_) ( ((m_) < (n_)) \
? (0.5 * (m_) * ((m_) * ((n_) - (1./3.) * (m_) ) - (n_)) + (1. / 6.) * (m_)) \
: (0.5 * (n_) * ((n_) * ((m_) - (1./3.) * (n_) ) - (m_)) + (1. / 6.) * (n_)) )
#define FLOPS_ZGETRF(m_, n_) (6. * FMULS_GETRF((double)(m_), (double)(n_)) + 2.0 * FADDS_GETRF((double)(m_), (double)(n_)) )
#define FLOPS_CGETRF(m_, n_) (6. * FMULS_GETRF((double)(m_), (double)(n_)) + 2.0 * FADDS_GETRF((double)(m_), (double)(n_)) )
#define FMULS_GETRI(n_) ( (n_) * ((5. / 6.) + (n_) * ((2. / 3.) * (n_) + 0.5)) )
#define FADDS_GETRI(n_) ( (n_) * ((5. / 6.) + (n_) * ((2. / 3.) * (n_) - 1.5)) )
#define FLOPS_ZGETRI(n_) (6. * FMULS_GETRI((double)(n_)) + 2.0 * FADDS_GETRI((double)(n_)) )
#define FLOPS_CGETRI(n_) (6. * FMULS_GETRI((double)(n_)) + 2.0 * FADDS_GETRI((double)(n_)) )
void BlasMagmaArgs::BatchInvertMatrix(void *Ainv_h, void* A_h, const int n, const int batch)
{
#ifdef MAGMA_LIB
printfQuda("%s with n=%d and batch=%d\n", __func__, n, batch);
magma_queue_t queue = 0;
size_t size = 2*n*n*prec*batch;
void *A_d = device_malloc(size);
void *Ainv_d = device_malloc(size);
qudaMemcpy(A_d, A_h, size, hipMemcpyHostToDevice);
magma_int_t **dipiv_array = static_cast<magma_int_t**>(device_malloc(batch*sizeof(magma_int_t*)));
magma_int_t *dipiv_tmp = static_cast<magma_int_t*>(device_malloc(batch*n*sizeof(magma_int_t)));
set_ipointer(dipiv_array, dipiv_tmp, 1, 0, 0, n, batch, queue);
magma_int_t *no_piv_array = static_cast<magma_int_t*>(safe_malloc(batch*n*sizeof(magma_int_t)));
for (int i=0; i<batch; i++) {
for (int j=0; j<n; j++) {
no_piv_array[i*n + j] = j+1;
}
}
qudaMemcpy(dipiv_tmp, no_piv_array, batch*n*sizeof(magma_int_t), hipMemcpyHostToDevice);
host_free(no_piv_array);
magma_int_t *dinfo_array = static_cast<magma_int_t*>(device_malloc(batch*sizeof(magma_int_t)));
magma_int_t *info_array = static_cast<magma_int_t*>(safe_malloc(batch*sizeof(magma_int_t)));
magma_int_t err;
// FIXME do this in pipelined fashion to reduce memory overhead.
if (prec == 4) {
magmaFloatComplex **A_array = static_cast<magmaFloatComplex**>(device_malloc(batch*sizeof(magmaFloatComplex*)));
magmaFloatComplex **Ainv_array = static_cast<magmaFloatComplex**>(device_malloc(batch*sizeof(magmaFloatComplex*)));
cset_pointer(A_array, static_cast<magmaFloatComplex*>(A_d), n, 0, 0, n*n, batch, queue);
cset_pointer(Ainv_array, static_cast<magmaFloatComplex*>(Ainv_d), n, 0, 0, n*n, batch, queue);
double magma_time = magma_sync_wtime(queue);
//err = magma_cgetrf_batched(n, n, A_array, n, dipiv_array, dinfo_array, batch, queue);
err = magma_cgetrf_nopiv_batched(n, n, A_array, n, dinfo_array, batch, queue);
magma_time = magma_sync_wtime(queue) - magma_time;
printfQuda("LU factorization completed in %f seconds with GFLOPS = %f\n",
magma_time, 1e-9 * batch * FLOPS_CGETRF(n,n) / magma_time);
if(err != 0) errorQuda("\nError in LU decomposition (magma_cgetrf), error code = %d\n", err);
qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), hipMemcpyDeviceToHost);
for (int i=0; i<batch; i++) {
if (info_array[i] < 0) {
errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i);
} else if (info_array[i] > 0) {
errorQuda("%d factorization completed but the factor U is exactly singular", i);
}
}
magma_time = magma_sync_wtime(queue);
err = magma_cgetri_outofplace_batched(n, A_array, n, dipiv_array, Ainv_array, n, dinfo_array, batch, queue);
magma_time = magma_sync_wtime(queue) - magma_time;
printfQuda("Matrix inversion completed in %f seconds with GFLOPS = %f\n",
magma_time, 1e-9 * batch * FLOPS_CGETRI(n) / magma_time);
if(err != 0) errorQuda("\nError in matrix inversion (magma_cgetri), error code = %d\n", err);
qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), hipMemcpyDeviceToHost);
for (int i=0; i<batch; i++) {
if (info_array[i] < 0) {
errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i);
} else if (info_array[i] > 0) {
errorQuda("%d factorization completed but the factor U is exactly singular", i);
}
}
device_free(Ainv_array);
device_free(A_array);
} else if (prec == 8) {
magmaDoubleComplex **A_array = static_cast<magmaDoubleComplex**>(device_malloc(batch*sizeof(magmaDoubleComplex*)));
zset_pointer(A_array, static_cast<magmaDoubleComplex*>(A_d), n, 0, 0, n*n, batch, queue);
magmaDoubleComplex **Ainv_array = static_cast<magmaDoubleComplex**>(device_malloc(batch*sizeof(magmaDoubleComplex*)));
zset_pointer(Ainv_array, static_cast<magmaDoubleComplex*>(Ainv_d), n, 0, 0, n*n, batch, queue);
double magma_time = magma_sync_wtime(queue);
err = magma_zgetrf_batched(n, n, A_array, n, dipiv_array, dinfo_array, batch, queue);
magma_time = magma_sync_wtime(queue) - magma_time;
printfQuda("LU factorization completed in %f seconds with GFLOPS = %f\n",
magma_time, 1e-9 * batch * FLOPS_ZGETRF(n,n) / magma_time);
if(err != 0) errorQuda("\nError in LU decomposition (magma_zgetrf), error code = %d\n", err);
qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), hipMemcpyDeviceToHost);
for (int i=0; i<batch; i++) {
if (info_array[i] < 0) {
errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i);
} else if (info_array[i] > 0) {
errorQuda("%d factorization completed but the factor U is exactly singular", i);
}
}
magma_time = magma_sync_wtime(queue);
err = magma_zgetri_outofplace_batched(n, A_array, n, dipiv_array, Ainv_array, n, dinfo_array, batch, queue);
magma_time = magma_sync_wtime(queue) - magma_time;
printfQuda("Matrix inversion completed in %f seconds with GFLOPS = %f\n",
magma_time, 1e-9 * batch * FLOPS_ZGETRI(n) / magma_time);
if(err != 0) errorQuda("\nError in matrix inversion (magma_cgetri), error code = %d\n", err);
qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), hipMemcpyDeviceToHost);
for (int i=0; i<batch; i++) {
if (info_array[i] < 0) {
errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i);
} else if (info_array[i] > 0) {
errorQuda("%d factorization completed but the factor U is exactly singular", i);
}
}
device_free(Ainv_array);
device_free(A_array);
} else {
errorQuda("%s not implemented for precision=%d", __func__, prec);
}
qudaMemcpy(Ainv_h, Ainv_d, size, hipMemcpyDeviceToHost);
device_free(dipiv_tmp);
device_free(dipiv_array);
device_free(dinfo_array);
host_free(info_array);
device_free(Ainv_d);
device_free(A_d);
#endif
return;
}
#ifdef MAGMA_LIB
#undef _cV
#undef _cU
#undef _cR
#undef _cL
#undef _cC
#undef _cN
#undef _cNV
#endif
| 8487d6179f8086586a4068492de818b7e0fbc45d.cu | #include <blas_magma.h>
#include <string.h>
#include <vector>
#include <algorithm>
#include <util_quda.h>
#include <quda_internal.h>
#ifndef MAX
#define MAX(a, b) (a > b) ? a : b;
#endif
#define MAGMA_17 //default version version of the MAGMA library
#ifdef MAGMA_LIB
#include <magma.h>
#ifdef MAGMA_14
#define _cV 'V'
#define _cU 'U'
#define _cR 'R'
#define _cL 'L'
#define _cC 'C'
#define _cN 'N'
#define _cNV 'N'
#else
#define _cV MagmaVec
#define _cU MagmaUpper
#define _cR MagmaRight
#define _cL MagmaLeft
#define _cC MagmaConjTrans
#define _cN MagmaNoTrans
#define _cNV MagmaNoVec
#endif
#endif
//Column major format: Big matrix times Little matrix.
#ifdef MAGMA_LIB
//Simplified version for the above:
#define BLOCK_SIZE 16
__global__ void SMatCMatCuda_16x16(cuFloatComplex *outBuff, const int bldm, cuFloatComplex *sMat, const int sldm, cuDoubleComplex *cMat, const int cldm, const int scols)
{
//block coords:
int by = blockIdx.x;
int bx = blockIdx.y;
//local coords:
int ty = threadIdx.x;
int tx = threadIdx.y;
int sBegin = BLOCK_SIZE * by;//global offset in Y-direction for the Big matrix
int sEnd = sBegin + sldm*scols - 1;//loop limit in X-direction for the Big matrix
int sStep = sldm * BLOCK_SIZE;//step in X-direction for the Big matrix
int cBegin = cldm * BLOCK_SIZE * bx;//global offset in X-direction for the Little matrix
int cStep = BLOCK_SIZE;//step in Y-direction for the Little matrix
cuDoubleComplex accum = make_cuDoubleComplex (0.0, 0.0);
cuFloatComplex ftmp;
cuDoubleComplex dtmp;
for (int s = sBegin, c = cBegin; s <= sEnd; s += sStep, c += cStep)
{
__shared__ float reSmat[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float imSmat[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double reCmat[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double imCmat[BLOCK_SIZE][BLOCK_SIZE];
ftmp = sMat[s + sldm * tx + ty];
reSmat[ty][tx] = cuCrealf(ftmp);
imSmat[ty][tx] = cuCimagf(ftmp);
dtmp = cMat[c + cldm * tx + ty];
reCmat[ty][tx] = cuCreal(dtmp);
imCmat[ty][tx] = cuCimag(dtmp);
__syncthreads();
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
ftmp = make_cuFloatComplex(reSmat[ty][k], imSmat[ty][k]);
dtmp = make_cuDoubleComplex(reCmat[k][tx], imCmat[k][tx]);
cuDoubleComplex dtmp2 = cuComplexFloatToDouble( ftmp );
accum = cuCfma(dtmp2, dtmp, accum);
}
__syncthreads();
}
int idx = BLOCK_SIZE * by + bldm * BLOCK_SIZE * bx;
outBuff[idx + bldm * tx + ty] = cuComplexDoubleToFloat( accum );
return;
}
#endif
void sMM_v2(void *outBuff, const int bldm, void *sMat, const int srows, const int scols, const int sldm, void *cMat, const int crows, const int ccols, const int cldm)
{
#ifdef MAGMA_LIB
// for test only:
if(scols != crows) errorQuda("\nError: wrong dimensions\n");
const int block_size = 16;
if (ccols % block_size != 0) errorQuda("\nError: wrong dimensions\n");
// Setup execution parameters (column-major format):
dim3 threads(block_size, block_size);
dim3 grid((srows+15) / threads.x, ccols / threads.y);//both ccols and srows must be multiple of block_size...
cudaFuncSetCacheConfig( SMatCMatCuda_16x16, cudaFuncCachePreferShared );
SMatCMatCuda_16x16<<< grid, threads >>>((cuFloatComplex*)outBuff, bldm, (cuFloatComplex*)sMat, sldm, (cuDoubleComplex*)cMat, cldm, scols);
#endif
}
#undef BLOCK_SIZE
void BlasMagmaArgs::OpenMagma(){
#ifdef MAGMA_LIB
magma_int_t err = magma_init();
if(err != MAGMA_SUCCESS) errorQuda("\nError: cannot initialize MAGMA library\n");
int major, minor, micro;
magma_version( &major, &minor, µ);
printfQuda("\nMAGMA library version: %d.%d\n\n", major, minor);
#else
errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n");
#endif
return;
}
void BlasMagmaArgs::CloseMagma(){
#ifdef MAGMA_LIB
if(magma_finalize() != MAGMA_SUCCESS) errorQuda("\nError: cannot close MAGMA library\n");
#else
errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n");
#endif
return;
}
BlasMagmaArgs::BlasMagmaArgs(const int prec) : m(0), max_nev(0), prec(prec), ldm(0), info(-1), llwork(0),
lrwork(0), liwork(0), sideLR(0), htsize(0), dtsize(0), lwork_max(0), W(0), W2(0),
hTau(0), dTau(0), lwork(0), rwork(0), iwork(0)
{
#ifdef MAGMA_LIB
magma_int_t dev_info = magma_getdevice_arch();//mostly to check whether magma is intialized...
if(dev_info == 0) exit(-1);
printfQuda("\nMAGMA will use device architecture %d.\n", dev_info);
alloc = false;
init = true;
#else
errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n");
#endif
return;
}
BlasMagmaArgs::BlasMagmaArgs(const int m, const int ldm, const int prec)
: m(m), max_nev(0), prec(prec), ldm(ldm), info(-1), sideLR(0), htsize(0), dtsize(0),
W(0), hTau(0), dTau(0)
{
#ifdef MAGMA_LIB
magma_int_t dev_info = magma_getdevice_arch();//mostly to check whether magma is intialized...
if(dev_info == 0) exit(-1);
printfQuda("\nMAGMA will use device architecture %d.\n", dev_info);
const int complex_prec = 2*prec;
magma_int_t nbtrd = prec == 4 ? magma_get_chetrd_nb(m) : magma_get_zhetrd_nb(m);//ldm
llwork = MAX(m + m*nbtrd, 2*m + m*m);//ldm
lrwork = 1 + 5*m + 2*m*m;//ldm
liwork = 3 + 5*m;//ldm
magma_malloc_pinned((void**)&W2, ldm*m*complex_prec);
magma_malloc_pinned((void**)&lwork, llwork*complex_prec);
magma_malloc_cpu((void**)&rwork, lrwork*prec);
magma_malloc_cpu((void**)&iwork, liwork*sizeof(magma_int_t));
init = true;
alloc = true;
#else
errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n");
#endif
return;
}
BlasMagmaArgs::BlasMagmaArgs(const int m, const int max_nev, const int ldm, const int prec)
: m(m), max_nev(max_nev), prec(prec), ldm(ldm), info(-1)
{
#ifdef MAGMA_LIB
magma_int_t dev_info = magma_getdevice_arch();//mostly to check whether magma is intialized...
if(dev_info == 0) exit(-1);
printfQuda("\nMAGMA will use device architecture %d.\n", dev_info);
const int complex_prec = 2*prec;
magma_int_t nbtrd = prec == 4 ? magma_get_chetrd_nb(ldm) : magma_get_zhetrd_nb(ldm);//ldm<-m
magma_int_t nbqrf = prec == 4 ? magma_get_cgeqrf_nb(ldm) : magma_get_zgeqrf_nb(ldm);//ldm
htsize = max_nev;//MIN(l,k)-number of Householder vectors, but we always have k <= MIN(m,n)
dtsize = ( 2*htsize + ((htsize + 31)/32)*32 )*nbqrf;//in general: MIN(m,k) for side = 'L' and MIN(n,k) for side = 'R'
magma_malloc_pinned((void**)&hTau, htsize*complex_prec);
magma_malloc((void**)&dTau, dtsize*complex_prec);
//these are needed for the eigCG solver only.
sideLR = (m - max_nev + nbqrf)*(m + nbqrf) + m*nbqrf;//ldm
magma_malloc_pinned((void**)&W, sideLR*complex_prec);
magma_malloc_pinned((void**)&W2, ldm*m*complex_prec);
llwork = MAX(m + m*nbtrd, 2*m + m*m);//ldm
lrwork = 1 + 5*m + 2*m*m;//ldm
liwork = 3 + 5*m;//ldm
magma_malloc_pinned((void**)&lwork, llwork*complex_prec);
magma_malloc_cpu((void**)&rwork, lrwork*prec);
magma_malloc_cpu((void**)&iwork, liwork*sizeof(magma_int_t));
init = true;
alloc = true;
#else
errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n");
#endif
return;
}
BlasMagmaArgs::~BlasMagmaArgs()
{
#ifdef MAGMA_LIB
if(alloc == true)
{
if(dTau) magma_free(dTau);
if(hTau) magma_free_pinned(hTau);
if(W) magma_free_pinned(W);
if(W2) magma_free_pinned(W2);
if(lwork) magma_free_pinned(lwork);
if(rwork) magma_free_cpu(rwork);
if(iwork) magma_free_cpu(iwork);
alloc = false;
}
init = false;
#endif
return;
}
void BlasMagmaArgs::MagmaHEEVD(void *dTvecm, void *hTvalm, const int prob_size, bool host)
{
#ifdef MAGMA_LIB
if(prob_size > m) errorQuda("\nError in MagmaHEEVD (problem size cannot exceed given search space %d), exit ...\n", m);
cudaPointerAttributes ptr_attr;
if(!host)
{
//check if dTvecm is a device pointer..
cudaPointerGetAttributes(&ptr_attr, dTvecm);
if(ptr_attr.memoryType != cudaMemoryTypeDevice || ptr_attr.devicePointer == NULL ) errorQuda("Error in MagmaHEEVD, no device pointer found.");
if(prec == 4)
{
magma_cheevd_gpu(_cV, _cU, prob_size, (magmaFloatComplex*)dTvecm, ldm, (float*)hTvalm, (magmaFloatComplex*)W2, ldm, (magmaFloatComplex*)lwork, llwork, (float*)rwork, lrwork, iwork, liwork, &info);
if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_cheevd_gpu), exit ...\n");
}
else
{
magma_zheevd_gpu(_cV, _cU, prob_size, (magmaDoubleComplex*)dTvecm, ldm, (double*)hTvalm, (magmaDoubleComplex*)W2, ldm, (magmaDoubleComplex*)lwork, llwork, (double*)rwork, lrwork, iwork, liwork, &info);
if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_zheevd_gpu), exit ...\n");
}
}
else
{
//check if dTvecm is a device pointer..
cudaPointerGetAttributes(&ptr_attr, dTvecm);
if(ptr_attr.memoryType != cudaMemoryTypeHost || ptr_attr.hostPointer == NULL ) errorQuda("Error in MagmaHEEVD, no host pointer found.");
if(prec == 4)
{
magma_cheevd(_cV, _cU, prob_size, (magmaFloatComplex*)dTvecm, ldm, (float*)hTvalm, (magmaFloatComplex*)lwork, llwork, (float*)rwork, lrwork, iwork, liwork, &info);
if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_cheevd_gpu), exit ...\n");
}
else
{
magma_zheevd(_cV, _cU, prob_size, (magmaDoubleComplex*)dTvecm, ldm, (double*)hTvalm, (magmaDoubleComplex*)lwork, llwork, (double*)rwork, lrwork, iwork, liwork, &info);
if(info != 0) errorQuda("\nError in MagmaHEEVD (magma_zheevd_gpu), exit ...\n");
}
}
#endif
return;
}
int BlasMagmaArgs::MagmaORTH_2nev(void *dTvecm, void *dTm)
{
const int l = max_nev;
#ifdef MAGMA_LIB
if(prec == 4)
{
magma_int_t nb = magma_get_cgeqrf_nb(m);//ldm
magma_cgeqrf_gpu(m, l, (magmaFloatComplex *)dTvecm, ldm, (magmaFloatComplex *)hTau, (magmaFloatComplex *)dTau, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_cgeqrf_gpu), exit ...\n");
//compute dTevecm0=QHTmQ
//get TQ product:
magma_cunmqr_gpu(_cR, _cN, m, m, l, (magmaFloatComplex *)dTvecm, ldm, (magmaFloatComplex *)hTau, (magmaFloatComplex *)dTm, ldm, (magmaFloatComplex *)W, sideLR, (magmaFloatComplex *)dTau, nb, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_cunmqr_gpu), exit ...\n");
//get QHT product:
magma_cunmqr_gpu(_cL, _cC, m, l, l, (magmaFloatComplex *)dTvecm, ldm, (magmaFloatComplex *)hTau, (magmaFloatComplex *)dTm, ldm, (magmaFloatComplex *)W, sideLR, (magmaFloatComplex *)dTau, nb, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_cunmqr_gpu), exit ...\n");
}
else
{
magma_int_t nb = magma_get_zgeqrf_nb(m);//ldm
magma_zgeqrf_gpu(m, l, (magmaDoubleComplex *)dTvecm, ldm, (magmaDoubleComplex *)hTau, (magmaDoubleComplex *)dTau, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zgeqrf_gpu), exit ...\n");
//compute dTevecm0=QHTmQ
//get TQ product:
magma_zunmqr_gpu(_cR, _cN, m, m, l, (magmaDoubleComplex *)dTvecm, ldm, (magmaDoubleComplex *)hTau, (magmaDoubleComplex *)dTm, ldm, (magmaDoubleComplex *)W, sideLR, (magmaDoubleComplex *)dTau, nb, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n");
//get QHT product:
magma_zunmqr_gpu(_cL, _cC, m, l, l, (magmaDoubleComplex *)dTvecm, ldm, (magmaDoubleComplex *)hTau, (magmaDoubleComplex *)dTm, ldm, (magmaDoubleComplex *)W, sideLR, (magmaDoubleComplex *)dTau, nb, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n");
}
#endif
return l;
}
void BlasMagmaArgs::RestartV(void *dV, const int vld, const int vlen, const int vprec, void *dTevecm, void *dTm)
{
#ifdef MAGMA_LIB
if( (vld % 32) != 0) errorQuda("\nError: leading dimension must be multiple of the warp size\n");
const int cvprec = 2*vprec;
const int l = max_nev;
//int bufferSize = 2*vld+l*l;
//int bufferBlock = bufferSize / l;
int bufferBlock = (2*vld) / l;
bufferBlock = (bufferBlock / 32) * 32;//corrected bufferBlock to be multiple of the warp size
int bufferSize = (bufferBlock * l);
void *buffer = 0;
magma_malloc(&buffer, bufferSize*cvprec);
cudaMemset(buffer, 0, bufferSize*cvprec);
if(prec == 4)
{
magma_int_t nb = magma_get_cgeqrf_nb(m);//ldm
magma_cunmqr_gpu(_cL, _cN, m, l, l, (magmaFloatComplex*)dTevecm, ldm, (magmaFloatComplex*)hTau, (magmaFloatComplex*)dTm, ldm, (magmaFloatComplex*)W, sideLR, (magmaFloatComplex*)dTau, nb, &info);
if(info != 0) errorQuda("\nError in RestartV (magma_cunmqr_gpu), exit ...\n");
}
else
{
magma_int_t nb = magma_get_zgeqrf_nb(m);//ldm
magma_zunmqr_gpu(_cL, _cN, m, l, l, (magmaDoubleComplex*)dTevecm, ldm, (magmaDoubleComplex*)hTau, (magmaDoubleComplex*)dTm, ldm, (magmaDoubleComplex*)W, sideLR, (magmaDoubleComplex*)dTau, nb, &info);
if(info != 0) errorQuda("\nError in RestartV (magma_zunmqr_gpu), exit ...\n");
}
if(vprec == 4)
{
if(prec == vprec) errorQuda("\nError: option is not currently supported, exit ...\n");
for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock)
{
if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset);
magmaFloatComplex *ptrV = &(((magmaFloatComplex*)dV)[blockOffset]);
sMM_v2(buffer, bufferBlock, ptrV, bufferBlock, m, vld, dTm, m, l, ldm);
cudaMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, cudaMemcpyDefault);
}
}
else
{
for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock)
{
if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset);
magmaDoubleComplex *ptrV = &(((magmaDoubleComplex*)dV)[blockOffset]);
magmablas_zgemm(_cN, _cN, bufferBlock, l, m, MAGMA_Z_ONE, ptrV, vld, (magmaDoubleComplex*)dTm, ldm, MAGMA_Z_ZERO, (magmaDoubleComplex*)buffer, bufferBlock);
cudaMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, cudaMemcpyDefault);
}
}
magma_free(buffer);
#endif
return;
}
void BlasMagmaArgs::SolveProjMatrix(void* rhs, const int ldn, const int n, void* H, const int ldH)
{
#ifdef MAGMA_LIB
const int complex_prec = 2*prec;
void *tmp;
magma_int_t *ipiv;
magma_int_t err;
magma_malloc_pinned((void**)&tmp, ldH*n*complex_prec);
magma_malloc_pinned((void**)&ipiv, n*sizeof(magma_int_t));
memcpy(tmp, H, ldH*n*complex_prec);
if (prec == 4)
{
err = magma_cgesv(n, 1, (magmaFloatComplex*)tmp, ldH, ipiv, (magmaFloatComplex*)rhs, ldn, &info);
if(err != 0) errorQuda("\nError in SolveProjMatrix (magma_cgesv), exit ...\n");
}
else
{
err = magma_zgesv(n, 1, (magmaDoubleComplex*)tmp, ldH, ipiv, (magmaDoubleComplex*)rhs, ldn, &info);
if(err != 0) errorQuda("\nError in SolveProjMatrix (magma_zgesv), exit ...\n");
}
magma_free_pinned(tmp);
magma_free_pinned(ipiv);
#endif
return;
}
void BlasMagmaArgs::SolveGPUProjMatrix(void* rhs, const int ldn, const int n, void* H, const int ldH)
{
#ifdef MAGMA_LIB
const int complex_prec = 2*prec;
void *tmp;
magma_int_t *ipiv;
magma_int_t err;
magma_malloc((void**)&tmp, ldH*n*complex_prec);
magma_malloc_pinned((void**)&ipiv, n*sizeof(magma_int_t));
qudaMemcpy(tmp, H, ldH*n*complex_prec, cudaMemcpyDefault);
if (prec == 4)
{
err = magma_cgesv_gpu(n, 1, (magmaFloatComplex*)tmp, ldH, ipiv, (magmaFloatComplex*)rhs, ldn, &info);
if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_cgesv), exit ...\n");
}
else
{
err = magma_zgesv_gpu(n, 1, (magmaDoubleComplex*)tmp, ldH, ipiv, (magmaDoubleComplex*)rhs, ldn, &info);
if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_zgesv), exit ...\n");
}
magma_free(tmp);
magma_free_pinned(ipiv);
#endif
return;
}
void BlasMagmaArgs::SpinorMatVec
(void *spinorOut, const void *spinorSetIn, const int sld, const int slen, const void *vec, const int vlen)
{
#ifdef MAGMA_LIB
if (prec == 4)
{
magmaFloatComplex *spmat = (magmaFloatComplex*)spinorSetIn;
magmaFloatComplex *spout = (magmaFloatComplex*)spinorOut;
magmablas_cgemv(_cN, slen, vlen, MAGMA_C_ONE, spmat, sld, (magmaFloatComplex*)vec, 1, MAGMA_C_ZERO, spout, 1);//in colour-major format
}
else
{
magmaDoubleComplex *spmat = (magmaDoubleComplex*)spinorSetIn;
magmaDoubleComplex *spout = (magmaDoubleComplex*)spinorOut;
magmablas_zgemv(_cN, slen, vlen, MAGMA_Z_ONE, spmat, sld, (magmaDoubleComplex*)vec, 1, MAGMA_Z_ZERO, spout, 1);//in colour-major format
}
#endif
return;
}
void BlasMagmaArgs::MagmaRightNotrUNMQR(const int clen, const int qrlen, const int nrefls, void *QR, const int ldqr, void *Vm, const int cldn)
{
#ifdef MAGMA_LIB
magma_int_t m = clen;
magma_int_t n = qrlen;
magma_int_t k = nrefls;
magma_int_t lwork = -1;
if(prec == 4)
{
}
else
{
magmaDoubleComplex *dQR = NULL;
magmaDoubleComplex *dtau = NULL;
magmaDoubleComplex *htau = NULL;
magmaDoubleComplex *hW = NULL;
magmaDoubleComplex qW;
magma_malloc_pinned((void**)&dQR, ldqr*k*sizeof(magmaDoubleComplex));
magma_malloc_pinned((void**)&htau, k*sizeof(magmaDoubleComplex));
//
magma_malloc((void**)&dTau, k*sizeof(magmaDoubleComplex));
qudaMemcpy(dQR, QR, ldqr*k*sizeof(magmaDoubleComplex), cudaMemcpyDefault);
magma_int_t nb = magma_get_zgeqrf_nb(m);//ldm
//
magma_zgeqrf_gpu(n, k, (magmaDoubleComplex *)dQR, ldqr, (magmaDoubleComplex *)htau, (magmaDoubleComplex *)dtau, &info);//identical to zgeqrf?
magma_zunmqr_gpu(_cR, _cN, m, n, k, dQR, ldqr, htau, (magmaDoubleComplex *)Vm, cldn, &qW, lwork, dtau, nb, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n");
lwork = (magma_int_t) MAGMA_Z_REAL(qW);
magma_malloc_cpu((void**)&hW, lwork*sizeof(magmaDoubleComplex));
//get TQ product:
magma_zunmqr_gpu(_cR, _cN, m, n, k, dQR, ldqr, htau, (magmaDoubleComplex *)Vm, cldn, hW, lwork, dtau, nb, &info);
if(info != 0) errorQuda("\nError in MagmaORTH_2nev (magma_zunmqr_gpu), exit ...\n");
magma_free_cpu(hW);
magma_free(dtau);
magma_free_pinned(htau);
magma_free_pinned(dQR);
}
#endif
return;
}
//STL based version:
//
struct SortedEval{
double eval_nrm;
int eval_idx;
SortedEval(double val, int idx) : eval_nrm(val), eval_idx(idx) {};
};
bool cmp_eigen_nrms (SortedEval v1, SortedEval v2)
{
return (v1.eval_nrm < v2.eval_nrm);
}
void BlasMagmaArgs::Sort(const int m, const int ldm, void *eVecs, const int nev, void *unsorted_eVecs, void *eVals)
{
if (prec == 4) errorQuda("\nSingle precision is currently not supported.\n");
std::vector<SortedEval> sorted_evals_cntr;
for(int e = 0; e < m; e++) sorted_evals_cntr.push_back( SortedEval( abs(((std::complex<double>*)eVals)[e]), e ));
std::stable_sort(sorted_evals_cntr.begin(), sorted_evals_cntr.end(), cmp_eigen_nrms);
for(int e = 0; e < nev; e++)
{
memcpy(&(((std::complex<double>*)eVecs)[ldm*e]), &(((std::complex<double>*)unsorted_eVecs)[ldm*( sorted_evals_cntr[e].eval_idx)]), (ldm)*sizeof(std::complex<double>));
//set zero in m+1 element:
((std::complex<double>*)eVecs)[ldm*e+m] = std::complex<double>(0.0, 0.0);
}
return;
}
///NEW STUFF:
void BlasMagmaArgs::ComputeQR(const int nev, Complex * evmat, const int m, const int ldm, Complex *tau)
{
#ifdef MAGMA_LIB
magma_int_t _m = m;//matrix size
magma_int_t _nev = nev;//matrix size
magma_int_t _ldm = ldm;
//Lapack parameters:
magma_int_t info = 0;
magma_int_t lwork = -1;
magmaDoubleComplex *work = NULL;
magmaDoubleComplex qwork; //parameter to extract optimal size of work
magma_zgeqrf(_m, _nev, (magmaDoubleComplex *)evmat, _ldm, (magmaDoubleComplex *)tau, &qwork, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: MAGMA_ZGEQRF, info %d\n",info);
lwork = (magma_int_t) MAGMA_Z_REAL(qwork);
magma_malloc_cpu((void**)&work, lwork*sizeof(magmaDoubleComplex));
magma_zgeqrf(_m, _nev, (magmaDoubleComplex *)evmat, _ldm, (magmaDoubleComplex *)tau, work, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZGEQRF, info %d\n",info);
if(work) magma_free_cpu(work);
#endif
return;
}
void BlasMagmaArgs::LeftConjZUNMQR(const int k /*number of reflectors*/, const int n /*number of columns of H*/, Complex *H, const int dh /*number of rows*/,
const int ldh, Complex * QR, const int ldqr, Complex *tau)//for vectors: n =1
{
#ifdef MAGMA_LIB
//Note: # rows of QR = # rows of H.
magma_int_t _h = dh;//matrix size
magma_int_t _n = n;//vector size
magma_int_t _k = k;
magma_int_t _ldh = ldh;
magma_int_t _ldqr = ldqr;
//Lapack parameters:
magma_side_t _s = _cL;//apply QR-matrix from the left
magma_trans_t _t = _cC;//conjugate
magma_int_t info = 0;
magma_int_t lwork = -1;
magmaDoubleComplex *work = NULL;
magmaDoubleComplex qwork; //parameter to extract optimal size of work
//Pdagger_{k+1} PrevRes
magma_zunmqr(_s, _t, _h, _n, _k, (magmaDoubleComplex *)QR, _ldqr, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldh, &qwork, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
lwork = (magma_int_t) MAGMA_Z_REAL(qwork);
magma_malloc_cpu((void**)&work, lwork*sizeof(magmaDoubleComplex));
magma_zunmqr(_s, _t, _h, _n, _k, (magmaDoubleComplex *)QR, _ldqr, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldh, work, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
if(work) magma_free_cpu(work);
#endif
return;
}
void BlasMagmaArgs::Construct_harmonic_matrix(Complex * const harmH, Complex * const conjH, const double beta2, const int m, const int ldH)
{
#ifdef MAGMA_LIB
//Lapack parameters:
magma_int_t _m = m;
//
magma_int_t _ldH = ldH;
//
magma_int_t info = 0;
//
magma_int_t I_ONE = 1;
//
magma_int_t *ipiv;
magma_malloc_cpu((void**)&ipiv, ldH*sizeof(magma_int_t));
//
//Construct H + beta*H^{-H} e_m*e_m^{T}
// 1. need to solve H^{H}y = e_m;
Complex *em = new Complex[m];
em[m-1] = beta2;//in fact, we construct beta*em,
magma_zgesv(_m, I_ONE, (magmaDoubleComplex *)conjH, _ldH, ipiv, (magmaDoubleComplex *)em, _ldH, &info);
if( (info != 0 ) ) errorQuda( "Error: DGESV, info %d\n",info);
//make this cleaner!
//check solution:
for (int j = 0; j < m; j++)
{
Complex accum = 0.0;
for (int i = 0; i < m; i++) accum = (accum + harmH[ldH*j+i]*em[(ipiv[i])-1]);
}
// 2. Construct matrix for harmonic Ritz vectors:
// Adjust last column with KroneckerProd((H^{-H}*beta*em)=em, em^{T}=[0,....,1]):
for(int i = 0; i < m; i++) harmH[ldH*(m-1)+i] += em[i];
magma_free_cpu(ipiv);
//
delete [] em;
#endif
return;
}
void BlasMagmaArgs::Compute_harmonic_matrix_eigenpairs(Complex *harmH, const int m, const int ldH, Complex *vr, Complex *evalues, const int ldv)
{
#ifdef MAGMA_LIB
magma_int_t _m = m;//matrix size
magma_int_t _ldH = ldH;
magma_int_t _ldv = ldv;
//Lapack parameters:
magma_int_t info = 0;
//
magma_vec_t _r = _cV;
magma_vec_t _l = _cNV;//no left eigenvectors
magma_int_t lwork = -1;
magmaDoubleComplex *work = NULL;
magmaDoubleComplex qwork; //parameter to extract optimal size of work
double *rwork = NULL;
magma_malloc_cpu((void**)&rwork, 2*_m*sizeof(double));
//Get optimal work:
magma_zgeev(_l, _r, _m, (magmaDoubleComplex *)harmH, _ldH, (magmaDoubleComplex *)evalues, NULL, _ldv, (magmaDoubleComplex *)vr, _ldv, &qwork, lwork, rwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZGEEVX, info %d\n",info);
lwork = (magma_int_t) MAGMA_Z_REAL(qwork);
magma_malloc_cpu((void**)&work, lwork*sizeof(magmaDoubleComplex));
//now get eigenpairs:
magma_zgeev(_l, _r, _m, (magmaDoubleComplex *)harmH, _ldH, (magmaDoubleComplex *)evalues, NULL, _ldv, (magmaDoubleComplex *)vr, _ldv, work, lwork, rwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZGEEVX, info %d\n",info);
if(rwork) magma_free_cpu(rwork);
//
if(work) magma_free_cpu(work);
//
#endif
return;
}
void BlasMagmaArgs::RestartVH(void *dV, const int vlen, const int vld, const int vprec, void *sortedHarVecs, void *H, const int ldh)
{
#ifdef MAGMA_LIB
if(prec == 4)
{
errorQuda("\nError: single precision is not currently supported\n");
}
if( (vld % 32) != 0) errorQuda("\nError: leading dimension must be multiple of the warp size\n");
int nev = (max_nev - 1); //(nev+1) - 1 for GMRESDR
int _m = m;//matrix size
int _k = nev;
int _kp1 = max_nev;
int _mp1 = (m+1);
int _ldm = ldh;
magma_side_t _s = _cR;//apply P-matrix from the right
magma_trans_t _t = _cN;//no left eigenvectors
int info = 0;
int lwork = -1;
Complex *work = NULL;
Complex qwork; //parameter to extract optimal size of work
const int cprec = 2*prec; //currently: sizeof(Complex)
const int cvprec = 2*vprec;
const int l = max_nev;
int lbsize = 2*((nev / 16)*16);
//const int bufferSize = 2*vld+lbsize*lbsize;
//int bufferBlock = bufferSize / lbsize;//or: lbsize = (nev+1)
int bufferBlock = (2*vld) / lbsize;
bufferBlock = (bufferBlock / 32) * 32;//corrected bufferBlock to be multiple of the warp size
int bufferSize = (bufferBlock * lbsize);
void *buffer = NULL;
void *dQmat = NULL;
magma_malloc(&buffer, bufferSize*cvprec);
cudaMemset(buffer, 0, bufferSize*cvprec);
magma_malloc(&dQmat, l*ldh*cprec);
//GPU code:
Complex *tau = new Complex[l];//nev+1 =>max_nev
Complex *Qmat = new Complex[ldh*_mp1];//need (m+1)x(m+1) matrix on input...
ComputeQR(l, (Complex*)sortedHarVecs, _mp1, ldh, tau);//lapack version
//max_nev vectors are stored in Qmat (output):
//restoreOrthVectors(Qmat, max_nev, (Complex*)sortedHarVecs, (m+1), ldh, tau);
//Load diagonal units
for(int d = 0; d < (m+1); d++) Qmat[ldh*d+d] = Complex(1.0, 0.0);
magma_zunmqr(_s, _t, _mp1, _mp1, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)Qmat, _ldm, (magmaDoubleComplex *)&qwork, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
lwork = (int) qwork.real();
work = new Complex[lwork];
magma_zunmqr(_s, _t, _mp1, _mp1, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)Qmat, _ldm, (magmaDoubleComplex *)work, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
//Copy (nev+1) vectors on the device:
qudaMemcpy(dQmat, Qmat, (max_nev)*ldh*cprec, cudaMemcpyDefault);
if(cvprec == sizeof(magmaDoubleComplex))
{
for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock)
{
if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset);
//printfQuda("\nBuffer block : %d\n", bufferBlock);
magmaDoubleComplex *ptrV = &(((magmaDoubleComplex*)dV)[blockOffset]);
magmablas_zgemm(_cN, _cN, bufferBlock, l, _mp1, MAGMA_Z_ONE, ptrV, vld, (magmaDoubleComplex*)dQmat, ldh, MAGMA_Z_ZERO, (magmaDoubleComplex*)buffer, bufferBlock);
cudaMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, cudaMemcpyDefault);//make this async!
}
cudaMemset(&(((magmaDoubleComplex*)dV)[vld*max_nev]), 0, (m+1-max_nev)*vld*sizeof(magmaDoubleComplex));//= m - nev
}
else // low precision field
{
for (int blockOffset = 0; blockOffset < vlen; blockOffset += bufferBlock)
{
if (bufferBlock > (vlen-blockOffset)) bufferBlock = (vlen-blockOffset);
magmaFloatComplex *ptrV = &(((magmaFloatComplex*)dV)[blockOffset]);
sMM_v2(buffer, bufferBlock, ptrV, bufferBlock, _mp1, vld, dQmat, _mp1, l, ldh);
cudaMemcpy2D(ptrV, vld*cvprec, buffer, bufferBlock*cvprec, bufferBlock*cvprec, l, cudaMemcpyDefault);
}
cudaMemset(&(((magmaFloatComplex*)dV)[vld*max_nev]), 0, (m+1-max_nev)*vld*sizeof(magmaFloatComplex));//= m - nev
}
//Construct H_new = Pdagger_{k+1} \bar{H}_{m} P_{k}
//bar{H}_{m} P_{k}
lwork = -1;
magma_zunmqr(_s, _t, _mp1, _m, _k, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)&qwork, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
delete[] work;
lwork = (int) qwork.real();
work = new Complex[lwork];
magma_zunmqr(_s, _t, _mp1, _m, _k, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)work, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
//Pdagger_{k+1} PrevRes
lwork = -1;
_s = _cL;
_t = _cC;
magma_zunmqr(_s, _t, _mp1, _k, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)&qwork, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
delete [] work;
lwork = (int) qwork.real();
work = new Complex[lwork];
magma_zunmqr(_s, _t, _mp1, _k, _kp1, (magmaDoubleComplex *)sortedHarVecs, _ldm, (magmaDoubleComplex *)tau, (magmaDoubleComplex *)H, _ldm, (magmaDoubleComplex *)work, lwork, &info);
if( (info != 0 ) ) errorQuda( "Error: ZUNMQR, info %d\n",info);
const int len = ldh - nev-1;
for(int i = 0; i < nev; i++) memset(&(((Complex*)H)[ldh*i+nev+1]), 0, len*sizeof(Complex) );
//
memset(&(((Complex*)H)[ldh*(nev)]), 0, (m-nev)*ldh*sizeof(Complex));
delete [] work;
magma_free(buffer);
magma_free(dQmat);
delete [] Qmat;
delete [] tau ;
#endif
return;
}
#define FMULS_GETRF(m_, n_) ( ((m_) < (n_)) \
? (0.5 * (m_) * ((m_) * ((n_) - (1./3.) * (m_) - 1. ) + (n_)) + (2. / 3.) * (m_)) \
: (0.5 * (n_) * ((n_) * ((m_) - (1./3.) * (n_) - 1. ) + (m_)) + (2. / 3.) * (n_)) )
#define FADDS_GETRF(m_, n_) ( ((m_) < (n_)) \
? (0.5 * (m_) * ((m_) * ((n_) - (1./3.) * (m_) ) - (n_)) + (1. / 6.) * (m_)) \
: (0.5 * (n_) * ((n_) * ((m_) - (1./3.) * (n_) ) - (m_)) + (1. / 6.) * (n_)) )
#define FLOPS_ZGETRF(m_, n_) (6. * FMULS_GETRF((double)(m_), (double)(n_)) + 2.0 * FADDS_GETRF((double)(m_), (double)(n_)) )
#define FLOPS_CGETRF(m_, n_) (6. * FMULS_GETRF((double)(m_), (double)(n_)) + 2.0 * FADDS_GETRF((double)(m_), (double)(n_)) )
#define FMULS_GETRI(n_) ( (n_) * ((5. / 6.) + (n_) * ((2. / 3.) * (n_) + 0.5)) )
#define FADDS_GETRI(n_) ( (n_) * ((5. / 6.) + (n_) * ((2. / 3.) * (n_) - 1.5)) )
#define FLOPS_ZGETRI(n_) (6. * FMULS_GETRI((double)(n_)) + 2.0 * FADDS_GETRI((double)(n_)) )
#define FLOPS_CGETRI(n_) (6. * FMULS_GETRI((double)(n_)) + 2.0 * FADDS_GETRI((double)(n_)) )
void BlasMagmaArgs::BatchInvertMatrix(void *Ainv_h, void* A_h, const int n, const int batch)
{
#ifdef MAGMA_LIB
printfQuda("%s with n=%d and batch=%d\n", __func__, n, batch);
magma_queue_t queue = 0;
size_t size = 2*n*n*prec*batch;
void *A_d = device_malloc(size);
void *Ainv_d = device_malloc(size);
qudaMemcpy(A_d, A_h, size, cudaMemcpyHostToDevice);
magma_int_t **dipiv_array = static_cast<magma_int_t**>(device_malloc(batch*sizeof(magma_int_t*)));
magma_int_t *dipiv_tmp = static_cast<magma_int_t*>(device_malloc(batch*n*sizeof(magma_int_t)));
set_ipointer(dipiv_array, dipiv_tmp, 1, 0, 0, n, batch, queue);
magma_int_t *no_piv_array = static_cast<magma_int_t*>(safe_malloc(batch*n*sizeof(magma_int_t)));
for (int i=0; i<batch; i++) {
for (int j=0; j<n; j++) {
no_piv_array[i*n + j] = j+1;
}
}
qudaMemcpy(dipiv_tmp, no_piv_array, batch*n*sizeof(magma_int_t), cudaMemcpyHostToDevice);
host_free(no_piv_array);
magma_int_t *dinfo_array = static_cast<magma_int_t*>(device_malloc(batch*sizeof(magma_int_t)));
magma_int_t *info_array = static_cast<magma_int_t*>(safe_malloc(batch*sizeof(magma_int_t)));
magma_int_t err;
// FIXME do this in pipelined fashion to reduce memory overhead.
if (prec == 4) {
magmaFloatComplex **A_array = static_cast<magmaFloatComplex**>(device_malloc(batch*sizeof(magmaFloatComplex*)));
magmaFloatComplex **Ainv_array = static_cast<magmaFloatComplex**>(device_malloc(batch*sizeof(magmaFloatComplex*)));
cset_pointer(A_array, static_cast<magmaFloatComplex*>(A_d), n, 0, 0, n*n, batch, queue);
cset_pointer(Ainv_array, static_cast<magmaFloatComplex*>(Ainv_d), n, 0, 0, n*n, batch, queue);
double magma_time = magma_sync_wtime(queue);
//err = magma_cgetrf_batched(n, n, A_array, n, dipiv_array, dinfo_array, batch, queue);
err = magma_cgetrf_nopiv_batched(n, n, A_array, n, dinfo_array, batch, queue);
magma_time = magma_sync_wtime(queue) - magma_time;
printfQuda("LU factorization completed in %f seconds with GFLOPS = %f\n",
magma_time, 1e-9 * batch * FLOPS_CGETRF(n,n) / magma_time);
if(err != 0) errorQuda("\nError in LU decomposition (magma_cgetrf), error code = %d\n", err);
qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), cudaMemcpyDeviceToHost);
for (int i=0; i<batch; i++) {
if (info_array[i] < 0) {
errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i);
} else if (info_array[i] > 0) {
errorQuda("%d factorization completed but the factor U is exactly singular", i);
}
}
magma_time = magma_sync_wtime(queue);
err = magma_cgetri_outofplace_batched(n, A_array, n, dipiv_array, Ainv_array, n, dinfo_array, batch, queue);
magma_time = magma_sync_wtime(queue) - magma_time;
printfQuda("Matrix inversion completed in %f seconds with GFLOPS = %f\n",
magma_time, 1e-9 * batch * FLOPS_CGETRI(n) / magma_time);
if(err != 0) errorQuda("\nError in matrix inversion (magma_cgetri), error code = %d\n", err);
qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), cudaMemcpyDeviceToHost);
for (int i=0; i<batch; i++) {
if (info_array[i] < 0) {
errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i);
} else if (info_array[i] > 0) {
errorQuda("%d factorization completed but the factor U is exactly singular", i);
}
}
device_free(Ainv_array);
device_free(A_array);
} else if (prec == 8) {
magmaDoubleComplex **A_array = static_cast<magmaDoubleComplex**>(device_malloc(batch*sizeof(magmaDoubleComplex*)));
zset_pointer(A_array, static_cast<magmaDoubleComplex*>(A_d), n, 0, 0, n*n, batch, queue);
magmaDoubleComplex **Ainv_array = static_cast<magmaDoubleComplex**>(device_malloc(batch*sizeof(magmaDoubleComplex*)));
zset_pointer(Ainv_array, static_cast<magmaDoubleComplex*>(Ainv_d), n, 0, 0, n*n, batch, queue);
double magma_time = magma_sync_wtime(queue);
err = magma_zgetrf_batched(n, n, A_array, n, dipiv_array, dinfo_array, batch, queue);
magma_time = magma_sync_wtime(queue) - magma_time;
printfQuda("LU factorization completed in %f seconds with GFLOPS = %f\n",
magma_time, 1e-9 * batch * FLOPS_ZGETRF(n,n) / magma_time);
if(err != 0) errorQuda("\nError in LU decomposition (magma_zgetrf), error code = %d\n", err);
qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), cudaMemcpyDeviceToHost);
for (int i=0; i<batch; i++) {
if (info_array[i] < 0) {
errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i);
} else if (info_array[i] > 0) {
errorQuda("%d factorization completed but the factor U is exactly singular", i);
}
}
magma_time = magma_sync_wtime(queue);
err = magma_zgetri_outofplace_batched(n, A_array, n, dipiv_array, Ainv_array, n, dinfo_array, batch, queue);
magma_time = magma_sync_wtime(queue) - magma_time;
printfQuda("Matrix inversion completed in %f seconds with GFLOPS = %f\n",
magma_time, 1e-9 * batch * FLOPS_ZGETRI(n) / magma_time);
if(err != 0) errorQuda("\nError in matrix inversion (magma_cgetri), error code = %d\n", err);
qudaMemcpy(info_array, dinfo_array, batch*sizeof(magma_int_t), cudaMemcpyDeviceToHost);
for (int i=0; i<batch; i++) {
if (info_array[i] < 0) {
errorQuda("%d argument had an illegal value or another error occured, such as memory allocation failed", i);
} else if (info_array[i] > 0) {
errorQuda("%d factorization completed but the factor U is exactly singular", i);
}
}
device_free(Ainv_array);
device_free(A_array);
} else {
errorQuda("%s not implemented for precision=%d", __func__, prec);
}
qudaMemcpy(Ainv_h, Ainv_d, size, cudaMemcpyDeviceToHost);
device_free(dipiv_tmp);
device_free(dipiv_array);
device_free(dinfo_array);
host_free(info_array);
device_free(Ainv_d);
device_free(A_d);
#endif
return;
}
#ifdef MAGMA_LIB
#undef _cV
#undef _cU
#undef _cR
#undef _cL
#undef _cC
#undef _cN
#undef _cNV
#endif
|
51a608fe30539fd4f2499b99d8f252f396548b55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/book.h"
#include <iostream>
const int N = 10;
__global__ void add(int *a, int *b, int *c){
int tid = threadIdx.x;
if(tid < N)
c[tid] = a[tid] + b[tid];
}
int main(void){
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
hipMalloc( (void**)&dev_a, sizeof(int) * N );
hipMalloc( (void**)&dev_b, sizeof(int) * N);
hipMalloc( (void**)&dev_c, sizeof(int) * N );
for(int i = 0; i < N; i++){
a[i] = -i;
b[i] = i;
}
hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(1), dim3(N), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, sizeof(int) * N, hipMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
std::cout << a[i] << std::endl;
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
} | 51a608fe30539fd4f2499b99d8f252f396548b55.cu | #include "../common/book.h"
#include <iostream>
const int N = 10;
__global__ void add(int *a, int *b, int *c){
int tid = threadIdx.x;
if(tid < N)
c[tid] = a[tid] + b[tid];
}
int main(void){
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc( (void**)&dev_a, sizeof(int) * N );
cudaMalloc( (void**)&dev_b, sizeof(int) * N);
cudaMalloc( (void**)&dev_c, sizeof(int) * N );
for(int i = 0; i < N; i++){
a[i] = -i;
b[i] = i;
}
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
add<<<1, N>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, sizeof(int) * N, cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++)
std::cout << a[i] << std::endl;
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
} |
e295ec419c226f8a7cc665dc135c6fce2763ebba.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "assert.h"
#include "matx.h"
#include "test_types.h"
#include "utilities.h"
#include "gtest/gtest.h"
using namespace matx;
constexpr int dim_size = 100;
template <typename T> class EigenSolverTest : public ::testing::Test {
protected:
void SetUp() override
{
pb = std::make_unique<detail::MatXPybind>();
pb->InitAndRunTVGenerator<T>("00_solver", "eig", "run", {dim_size});
pb->NumpyToTensorView(Bv, "B");
}
void TearDown() { pb.reset(); }
std::unique_ptr<detail::MatXPybind> pb;
tensor_t<T, 2> Bv{{dim_size, dim_size}};
tensor_t<T, 2> Btv{{dim_size, dim_size}};
tensor_t<T, 2> Evv{{dim_size, dim_size}};
tensor_t<T, 2> Wv{{dim_size, 1}};
tensor_t<T, 1> Wov{{dim_size}};
tensor_t<T, 2> Gtv{{dim_size, 1}};
tensor_t<T, 2> Lvv{{dim_size, 1}};
};
template <typename TensorType>
class EigenSolverTestNonComplexFloatTypes : public EigenSolverTest<TensorType> {
};
TYPED_TEST_SUITE(EigenSolverTestNonComplexFloatTypes,
MatXFloatNonComplexNonHalfTypes);
TYPED_TEST(EigenSolverTestNonComplexFloatTypes, EigenBasic)
{
MATX_ENTER_HANDLER();
// example-begin eig-test-1
// Note that eigenvalue/vector solutions are not necessarily ordered in the same way other libraries
// may order them. When comparing against other libraries it's best to check A*v = lambda * v
(mtie(this->Evv, this->Wov) = eig(this->Bv)).run();
// example-end eig-test-1
// Now we need to go through all the eigenvectors and eigenvalues and make
// sure the results match the equation A*v = lambda*v, where v are the
// eigenvectors corresponding to the eigenvalue lambda.
for (index_t i = 0; i < dim_size; i++) {
auto v = this->Evv.template Slice<2>({0, i}, {matxEnd, i + 1});
matx::copy(this->Wv, v, 0);
// Compute lambda*v
auto b = v * this->Wov(i);
(this->Lvv = b).run();
// Compute A*v
(this->Gtv = matmul(this->Bv, this->Wv)).run();
hipStreamSynchronize(0);
// Compare
for (index_t j = 0; j < dim_size; j++) {
ASSERT_NEAR(this->Gtv(j, 0), this->Lvv(j, 0), 0.001);
}
}
MATX_EXIT_HANDLER();
}
| e295ec419c226f8a7cc665dc135c6fce2763ebba.cu | ////////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2021, NVIDIA Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/////////////////////////////////////////////////////////////////////////////////
#include "assert.h"
#include "matx.h"
#include "test_types.h"
#include "utilities.h"
#include "gtest/gtest.h"
using namespace matx;
constexpr int dim_size = 100;
template <typename T> class EigenSolverTest : public ::testing::Test {
protected:
void SetUp() override
{
pb = std::make_unique<detail::MatXPybind>();
pb->InitAndRunTVGenerator<T>("00_solver", "eig", "run", {dim_size});
pb->NumpyToTensorView(Bv, "B");
}
void TearDown() { pb.reset(); }
std::unique_ptr<detail::MatXPybind> pb;
tensor_t<T, 2> Bv{{dim_size, dim_size}};
tensor_t<T, 2> Btv{{dim_size, dim_size}};
tensor_t<T, 2> Evv{{dim_size, dim_size}};
tensor_t<T, 2> Wv{{dim_size, 1}};
tensor_t<T, 1> Wov{{dim_size}};
tensor_t<T, 2> Gtv{{dim_size, 1}};
tensor_t<T, 2> Lvv{{dim_size, 1}};
};
template <typename TensorType>
class EigenSolverTestNonComplexFloatTypes : public EigenSolverTest<TensorType> {
};
TYPED_TEST_SUITE(EigenSolverTestNonComplexFloatTypes,
MatXFloatNonComplexNonHalfTypes);
TYPED_TEST(EigenSolverTestNonComplexFloatTypes, EigenBasic)
{
MATX_ENTER_HANDLER();
// example-begin eig-test-1
// Note that eigenvalue/vector solutions are not necessarily ordered in the same way other libraries
// may order them. When comparing against other libraries it's best to check A*v = lambda * v
(mtie(this->Evv, this->Wov) = eig(this->Bv)).run();
// example-end eig-test-1
// Now we need to go through all the eigenvectors and eigenvalues and make
// sure the results match the equation A*v = lambda*v, where v are the
// eigenvectors corresponding to the eigenvalue lambda.
for (index_t i = 0; i < dim_size; i++) {
auto v = this->Evv.template Slice<2>({0, i}, {matxEnd, i + 1});
matx::copy(this->Wv, v, 0);
// Compute lambda*v
auto b = v * this->Wov(i);
(this->Lvv = b).run();
// Compute A*v
(this->Gtv = matmul(this->Bv, this->Wv)).run();
cudaStreamSynchronize(0);
// Compare
for (index_t j = 0; j < dim_size; j++) {
ASSERT_NEAR(this->Gtv(j, 0), this->Lvv(j, 0), 0.001);
}
}
MATX_EXIT_HANDLER();
}
|
b20e25faebecad3c178c13a93c31f8b471404e22.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/group_norm_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/gpu/group_norm_utils.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/device_context.h"
namespace phi {
static inline int32_t divUp(int32_t m, int32_t n) { return (m + n - 1) / n; }
static inline __device__ __host__ float sigmoid(float x) {
return 1.F / (1.F + expf(-x));
}
#ifdef PADDLE_CUDA_BF16
__host__ __device__ inline float2 bfloat1622float2(const __nv_bfloat162 a) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
return __bfloat1622float2(a);
#else
float hi_float;
float lo_float;
lo_float = __internal_bfloat162float(((__nv_bfloat162_raw)a).x);
hi_float = __internal_bfloat162float(((__nv_bfloat162_raw)a).y);
return make_float2(lo_float, hi_float);
#endif
}
__host__ __device__ inline __nv_bfloat162 float22bfloat162_rn(const float2 a) {
__nv_bfloat162 val;
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
val = __float22bfloat162_rn(a);
#else
val.x = __float2bfloat16_rn(a.x);
val.y = __float2bfloat16_rn(a.y);
#endif
return val;
}
#endif
template <typename T>
__host__ __device__ inline float __2float(const T a) {
return static_cast<float>(a);
}
template <>
__host__ __device__ inline float __2float<__half>(const __half a) {
return __half2float(a);
}
template <typename T>
__host__ __device__ inline T __2dst(const float a) {
return static_cast<T>(a);
}
template <>
__host__ __device__ inline __half __2dst<__half>(const float a) {
return __float2half(a);
}
struct GroupSums {
// Is it the 1st element of the group?
int32_t flag;
// The sum.
float sum;
// The sum of squares.
float sumSq;
};
struct GroupSumsOp {
inline __device__ GroupSums operator()(GroupSums const& a,
GroupSums const& b) {
GroupSums dst;
dst.sum = b.flag ? b.sum : (a.sum + b.sum);
dst.sumSq = b.flag ? b.sumSq : (a.sumSq + b.sumSq);
dst.flag = a.flag + b.flag;
return dst;
}
};
static int32_t findMaxDivisor(int32_t n, int32_t maxAllowedDivisor) {
int32_t maxDivisor = -1;
for (int32_t i = 1; i <= std::sqrt(n); i++) {
if (n % i == 0) {
int32_t divisor1 = n / i;
int32_t divisor2 = i;
if (divisor1 > maxDivisor && divisor1 < maxAllowedDivisor) {
maxDivisor = divisor1;
}
if (divisor2 > maxDivisor && divisor2 < maxAllowedDivisor) {
maxDivisor = divisor2;
}
}
}
return maxDivisor;
}
template <typename T, int THREADS_PER_CHANNEL>
inline __device__ void UpdateSum(const T* srcX, float* sum, float* sumSq) {
float src_data = phi::__2float<T>(*srcX);
*sum += src_data;
*sumSq += src_data * src_data;
}
template <>
inline __device__ void UpdateSum<__half, 2>(const __half* srcX,
float* sum,
float* sumSq) {
__half2 h2 = *reinterpret_cast<__half2 const*>(srcX);
float2 f2 = __half22float2(h2);
*sum += f2.x + f2.y;
*sumSq += f2.x * f2.x + f2.y * f2.y;
}
template <>
inline __device__ void UpdateSum<phi::dtype::float16, 2>(
const phi::dtype::float16* srcX, float* sum, float* sumSq) {
__half2 h2 = *reinterpret_cast<__half2 const*>(srcX);
float2 f2 = __half22float2(h2);
*sum += f2.x + f2.y;
*sumSq += f2.x * f2.x + f2.y * f2.y;
}
#ifdef PADDLE_CUDA_BF16
template <>
inline __device__ void UpdateSum<phi::dtype::bfloat16, 2>(
const phi::dtype::bfloat16* srcX, float* sum, float* sumSq) {
__nv_bfloat162 h2 = *reinterpret_cast<__nv_bfloat162 const*>(srcX);
float2 f2 = phi::bfloat1622float2(h2);
*sum += f2.x + f2.y;
*sumSq += f2.x * f2.x + f2.y * f2.y;
}
#endif
template <typename T, int THREADS_PER_BLOCK>
__global__ void groupNormNHWCSumSingerChannelKernel(
const GroupNormNHWCParams<T> params) {
// The instance in the batch.
__shared__ float2 smem[THREADS_PER_BLOCK];
int32_t ni = blockIdx.z;
int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x;
if (ci >= params.c) {
return;
}
// The first activation loaded by that block.
int32_t hwBegin = blockIdx.y * params.hwPerBlock;
// The last activation loaded by that block.
int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw);
// The sums.
float sum = 0.F;
float sumSq = 0.F;
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The offset.
int64_t offset = static_cast<int64_t>(ni) * params.hwc +
static_cast<int64_t>(hwi) * params.c + ci;
float src_data = *reinterpret_cast<float const*>(¶ms.srcX[offset]);
UpdateSum<T, 1>(¶ms.srcX[offset], &sum, &sumSq);
}
smem[threadIdx.x] = make_float2(sum, sumSq);
__syncthreads();
float2 sums = smem[threadIdx.x];
atomicAdd(¶ms.redBuffer[(2 * ni + 0) * params.groups + ci],
sums.x * params.invHWC);
atomicAdd(¶ms.redBuffer[(2 * ni + 1) * params.groups + ci], sums.y);
}
template <typename T, int THREADS_PER_BLOCK, int THREADS_PER_CHANNEL>
__global__ void groupNormNHWCSumKernel(const GroupNormNHWCParams<T> params) {
// The object in charge of doing the sums for the different blocks.
typedef hipcub::BlockScan<GroupSums, THREADS_PER_BLOCK> BlockScan;
__shared__ typename BlockScan::TempStorage tempStorage;
// Allocate shared memory for BlockScan.
// Allocate shared memory for the groups. We could reduce the amount of shared
// memory reserved.
__shared__ float2 smem[THREADS_PER_BLOCK];
// The instance in the batch.
int32_t ni = blockIdx.z;
// The channel loaded by that thread (2 channels per thread for F16x2).
int32_t ci =
blockIdx.x * params.cPerBlock + threadIdx.x * THREADS_PER_CHANNEL;
if (ci >= params.c || threadIdx.x * THREADS_PER_CHANNEL >= params.cPerBlock) {
return;
}
// The first activation loaded by that block.
int32_t hwBegin = blockIdx.y * params.hwPerBlock;
// The last activation loaded by that block.
int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw);
// The sums.
float sum = 0.F;
float sumSq = 0.F;
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The offset.
int64_t offset = static_cast<int64_t>(ni) * params.hwc +
static_cast<int64_t>(hwi) * params.c + ci;
float src_data = *reinterpret_cast<float const*>(¶ms.srcX[offset]);
UpdateSum<T, THREADS_PER_CHANNEL>(¶ms.srcX[offset], &sum, &sumSq);
}
// The group that thread works on and the channel in the group (modulus).
int32_t gi =
ci / params.cPerGroup - blockIdx.x * params.cPerBlock / params.cPerGroup;
int32_t cj = ci % params.cPerGroup;
int flag = (cj == 0 || threadIdx.x == 0) ? 1 : 0;
GroupSums inp{flag, sum, sumSq};
GroupSums out;
BlockScan(tempStorage).InclusiveScan(inp, out, GroupSumsOp());
if (cj == params.cPerGroup - THREADS_PER_CHANNEL ||
threadIdx.x * THREADS_PER_CHANNEL ==
params.cPerBlock - THREADS_PER_CHANNEL) {
smem[gi] = make_float2(out.sum, out.sumSq);
}
__syncthreads();
int32_t gj = ci / params.cPerGroup;
if (cj == params.cPerGroup - THREADS_PER_CHANNEL ||
threadIdx.x * THREADS_PER_CHANNEL ==
params.cPerBlock - THREADS_PER_CHANNEL) {
float2 sums = smem[gi];
atomicAdd(¶ms.redBuffer[(2 * ni + 0) * params.groups + gj],
sums.x * params.invHWC);
atomicAdd(¶ms.redBuffer[(2 * ni + 1) * params.groups + gj], sums.y);
}
}
template <typename T>
void groupNormNHWCSum<T>::operator()(GroupNormNHWCParams<T>* params,
gpuStream_t stream) {
dim3 grid;
grid.x = divUp(params->c, params->cPerBlock);
grid.y = divUp(params->hw, params->hwPerBlock);
grid.z = params->n;
if (params->cPerGroup % 2 == 0) {
switch (params->cPerBlock) {
case 512:
case 480:
hipLaunchKernelGGL(( groupNormNHWCSumKernel<T, 256, 2>), dim3(grid), dim3(256), 0, stream, *params);
break;
case 320:
hipLaunchKernelGGL(( groupNormNHWCSumKernel<T, 160, 2>), dim3(grid), dim3(160), 0, stream, *params);
break;
case 256:
hipLaunchKernelGGL(( groupNormNHWCSumKernel<T, 128, 2>), dim3(grid), dim3(128), 0, stream, *params);
break;
case 128:
hipLaunchKernelGGL(( groupNormNHWCSumKernel<T, 64, 2>), dim3(grid), dim3(64), 0, stream, *params);
break;
default:
grid.x = divUp(params->c, 128);
params->cPerBlock = 128;
hipLaunchKernelGGL(( groupNormNHWCSumKernel<T, 64, 2>), dim3(grid), dim3(64), 0, stream, *params);
}
} else {
if (params->cPerGroup != 1) {
switch (params->cPerBlock) {
case 512:
hipLaunchKernelGGL(( groupNormNHWCSumKernel<T, 512, 1>), dim3(grid), dim3(512), 0, stream, *params);
break;
case 480:
hipLaunchKernelGGL(( groupNormNHWCSumKernel<T, 480, 1>), dim3(grid), dim3(480), 0, stream, *params);
break;
case 320:
hipLaunchKernelGGL(( groupNormNHWCSumKernel<T, 320, 1>), dim3(grid), dim3(320), 0, stream, *params);
break;
case 256:
hipLaunchKernelGGL(( groupNormNHWCSumKernel<T, 256, 1>), dim3(grid), dim3(256), 0, stream, *params);
break;
case 128:
hipLaunchKernelGGL(( groupNormNHWCSumKernel<T, 128, 1>), dim3(grid), dim3(128), 0, stream, *params);
break;
default:
grid.x = divUp(params->c, 128);
params->cPerBlock = 128;
hipLaunchKernelGGL(( groupNormNHWCSumKernel<T, 128, 1>), dim3(grid), dim3(128), 0, stream, *params);
}
} else {
switch (params->cPerBlock) {
case 512:
hipLaunchKernelGGL(( groupNormNHWCSumSingerChannelKernel<T, 512>)
, dim3(grid), dim3(512), 0, stream, *params);
break;
case 480:
hipLaunchKernelGGL(( groupNormNHWCSumSingerChannelKernel<T, 480>)
, dim3(grid), dim3(480), 0, stream, *params);
break;
case 320:
hipLaunchKernelGGL(( groupNormNHWCSumSingerChannelKernel<T, 320>)
, dim3(grid), dim3(320), 0, stream, *params);
break;
case 256:
hipLaunchKernelGGL(( groupNormNHWCSumSingerChannelKernel<T, 256>)
, dim3(grid), dim3(256), 0, stream, *params);
break;
case 128:
hipLaunchKernelGGL(( groupNormNHWCSumSingerChannelKernel<T, 128>)
, dim3(grid), dim3(128), 0, stream, *params);
break;
default:
grid.x = divUp(params->c, 128);
params->cPerBlock = 128;
hipLaunchKernelGGL(( groupNormNHWCSumSingerChannelKernel<T, 128>)
, dim3(grid), dim3(128), 0, stream, *params);
}
}
}
}
template class groupNormNHWCSum<half>;
template <typename T, int THREADS_PER_CHANNEL>
inline __device__ void GroupNormCompute(int32_t hwBegin,
int32_t hwEnd,
int32_t ci,
const GroupNormNHWCParams<T>& params,
float mean,
float invStdDev) {
float gamma =
phi::__2float<T>(*(reinterpret_cast<T const*>(params.gamma) + ci));
float beta =
phi::__2float<T>(*(reinterpret_cast<T const*>(params.beta) + ci));
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The src/dst offset.
int64_t offset = (int64_t)blockIdx.z * params.hwc + hwi * params.c + ci;
const float src_data = phi::__2float<T>(params.srcX[offset]);
// Normalize the channels.
float dst_data = (src_data - mean) * invStdDev;
// Scale by gamma and add beta.
dst_data = gamma * dst_data + beta;
// Apply Silu if needed.
if (params.withSilu) {
dst_data = dst_data * sigmoid(dst_data);
}
// Store the scaled values.
*reinterpret_cast<T*>(¶ms.dst[offset]) = phi::__2dst<T>(dst_data);
}
}
template <>
inline __device__ void GroupNormCompute<phi::dtype::float16, 2>(
int32_t hwBegin,
int32_t hwEnd,
int32_t ci,
const GroupNormNHWCParams<phi::dtype::float16>& params,
float mean,
float invStdDev) {
float2 gammaF2, betaF2;
gammaF2 = __half22float2(*reinterpret_cast<__half2 const*>(
reinterpret_cast<half const*>(params.gamma) + ci));
betaF2 = __half22float2(*reinterpret_cast<__half2 const*>(
reinterpret_cast<half const*>(params.beta) + ci));
// Iterate over the activations to compute the sums.
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The src/dst offset.
int64_t offset = (int64_t)blockIdx.z * params.hwc + hwi * params.c + ci;
// Fetch two channels per thread.
__half2 h2 = *reinterpret_cast<__half2 const*>(¶ms.srcX[offset]);
// Extract the two half values.
float2 f2 = __half22float2(h2);
// Normalize the channels.
f2.x = (f2.x - mean) * invStdDev;
f2.y = (f2.y - mean) * invStdDev;
// Scale by gamma and add beta.
f2.x = gammaF2.x * f2.x + betaF2.x;
f2.y = gammaF2.y * f2.y + betaF2.y;
// Apply Silu if needed.
if (params.withSilu) {
f2.x = f2.x * sigmoid(f2.x);
f2.y = f2.y * sigmoid(f2.y);
}
// Store the scaled values.
*reinterpret_cast<__half2*>(¶ms.dst[offset]) = __float22half2_rn(f2);
}
}
template <>
inline __device__ void GroupNormCompute<__half, 2>(
int32_t hwBegin,
int32_t hwEnd,
int32_t ci,
const GroupNormNHWCParams<__half>& params,
float mean,
float invStdDev) {
float2 gammaF2, betaF2;
gammaF2 = __half22float2(*reinterpret_cast<__half2 const*>(
reinterpret_cast<half const*>(params.gamma) + ci));
betaF2 = __half22float2(*reinterpret_cast<__half2 const*>(
reinterpret_cast<half const*>(params.beta) + ci));
// Iterate over the activations to compute the sums.
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The src/dst offset.
int64_t offset = (int64_t)blockIdx.z * params.hwc + hwi * params.c + ci;
// Fetch two channels per thread.
__half2 h2 = *reinterpret_cast<__half2 const*>(¶ms.srcX[offset]);
// Extract the two half values.
float2 f2 = __half22float2(h2);
// Normalize the channels.
f2.x = (f2.x - mean) * invStdDev;
f2.y = (f2.y - mean) * invStdDev;
// Scale by gamma and add beta.
f2.x = gammaF2.x * f2.x + betaF2.x;
f2.y = gammaF2.y * f2.y + betaF2.y;
// Apply Silu if needed.
if (params.withSilu) {
f2.x = f2.x * sigmoid(f2.x);
f2.y = f2.y * sigmoid(f2.y);
}
// Store the scaled values.
*reinterpret_cast<__half2*>(¶ms.dst[offset]) = __float22half2_rn(f2);
}
}
#ifdef PADDLE_CUDA_BF16
template <>
inline __device__ void GroupNormCompute<phi::dtype::bfloat16, 2>(
int32_t hwBegin,
int32_t hwEnd,
int32_t ci,
const GroupNormNHWCParams<phi::dtype::bfloat16>& params,
float mean,
float invStdDev) {
float2 gammaF2, betaF2;
gammaF2 = phi::bfloat1622float2(*reinterpret_cast<__nv_bfloat162 const*>(
reinterpret_cast<__nv_bfloat16 const*>(params.gamma) + ci));
betaF2 = phi::bfloat1622float2(*reinterpret_cast<__nv_bfloat162 const*>(
reinterpret_cast<__nv_bfloat16 const*>(params.beta) + ci));
// Iterate over the activations to compute the sums.
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The src/dst offset.
int64_t offset = (int64_t)blockIdx.z * params.hwc + hwi * params.c + ci;
// Fetch two channels per thread.
__nv_bfloat162 h2 =
*reinterpret_cast<__nv_bfloat162 const*>(¶ms.srcX[offset]);
// Extract the two half values.
float2 f2 = phi::bfloat1622float2(h2);
// Normalize the channels.
f2.x = (f2.x - mean) * invStdDev;
f2.y = (f2.y - mean) * invStdDev;
// Scale by gamma and add beta.
f2.x = gammaF2.x * f2.x + betaF2.x;
f2.y = gammaF2.y * f2.y + betaF2.y;
// Apply Silu if needed.
if (params.withSilu) {
f2.x = f2.x * sigmoid(f2.x);
f2.y = f2.y * sigmoid(f2.y);
}
// Store the scaled values.
*reinterpret_cast<__nv_bfloat162*>(¶ms.dst[offset]) =
phi::float22bfloat162_rn(f2);
}
}
#endif
template <typename T, int THREADS_PER_CHANNEL>
__global__ void groupNormNHWCScaleKernel(const GroupNormNHWCParams<T> params) {
// The instance in the batch.
int32_t ni = blockIdx.z;
// The channel loaded by that thread (2 channels per thread for F16x2).
int32_t ci =
blockIdx.x * params.cPerBlock + threadIdx.x * THREADS_PER_CHANNEL;
// The group that thread works on and the channel in the group (modulus).
int32_t gi = ci / params.cPerGroup;
if (ci >= params.c || gi >= params.groups) {
return;
}
// Load the sum and sum of squares for the group.
float mean = params.redBuffer[(2 * ni + 0) * params.groups + gi];
float sumSq = params.redBuffer[(2 * ni + 1) * params.groups + gi];
// Compute the variance.
float var = sumSq * params.invHWC - (mean * mean);
if (params.var_data != nullptr) {
params.var_data[ni * params.groups + gi] = var;
}
// Compute the inverse of the stddev.
float invStdDev = rsqrtf(var + params.eps);
// The first activation loaded by that block.
int32_t hwBegin = blockIdx.y * params.hwPerBlock;
// The last activation loaded by that block.
int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw);
GroupNormCompute<T, THREADS_PER_CHANNEL>(
hwBegin, hwEnd, ci, params, mean, invStdDev);
}
template <typename T>
void groupNormNHWCScale<T>::operator()(const GroupNormNHWCParams<T>& params,
gpuStream_t stream) {
dim3 grid;
// The number of blocks to compute all the channels.
grid.x = divUp(params.c, params.cPerBlock);
// The number of blocks to compute all the activations in a given instance.
grid.y = divUp(params.hw, params.hwPerBlock);
// The number of instances.
grid.z = params.n;
if (params.cPerGroup % 2 == 0) {
switch (params.cPerBlock) {
case 512:
case 480:
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<T, 2>), dim3(grid), dim3(256), 0, stream, params);
break;
case 320:
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<T, 2>), dim3(grid), dim3(160), 0, stream, params);
break;
case 256:
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<T, 2>), dim3(grid), dim3(128), 0, stream, params);
break;
case 128:
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<T, 2>), dim3(grid), dim3(64), 0, stream, params);
break;
default:
grid.x = divUp(params.c, 128);
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<T, 2>), dim3(grid), dim3(64), 0, stream, params);
}
} else {
switch (params.cPerBlock) {
case 512:
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<T, 1>), dim3(grid), dim3(512), 0, stream, params);
break;
case 480:
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<T, 1>), dim3(grid), dim3(480), 0, stream, params);
break;
case 320:
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<T, 1>), dim3(grid), dim3(320), 0, stream, params);
break;
case 256:
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<T, 1>), dim3(grid), dim3(256), 0, stream, params);
break;
case 128:
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<T, 1>), dim3(grid), dim3(128), 0, stream, params);
break;
default:
grid.x = divUp(params.c, 128);
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<T, 1>), dim3(grid), dim3(128), 0, stream, params);
}
}
}
template class groupNormNHWCScale<half>;
template <typename T, typename Context>
void GroupNormNHWCKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& scale,
const paddle::optional<DenseTensor>& bias,
float epsilon,
int groups,
const std::string& data_layout_str,
DenseTensor* y,
DenseTensor* mean,
DenseTensor* var) {
using AccT = typename phi::dtype::MPTypeTrait<T>::Type;
GroupNormNHWCParams<T> params_;
params_.withSilu = false;
const auto x_dims = x.dims();
dev_ctx.template Alloc<T>(y);
const T* x_data = x.data<T>();
T* y_data = y->data<T>();
const auto scale_ptr = scale.get_ptr();
const auto bias_ptr = bias.get_ptr();
const T* scale_data = nullptr;
if (scale_ptr) scale_data = scale_ptr->data<T>();
const T* bias_data = nullptr;
if (bias_ptr) bias_data = bias_ptr->data<T>();
params_.n = x_dims[0];
params_.c = x_dims[3];
params_.h = x_dims[1];
params_.w = x_dims[2];
dev_ctx.template Alloc<AccT>(mean);
dev_ctx.template Alloc<AccT>(var);
auto* mean_data = mean->data<AccT>();
auto* var_data = var->data<AccT>();
params_.var_data = var_data;
int32_t cPerBlock = 320;
int32_t maxBlocksPerHW = 1024;
switch (params_.c) {
case 2048:
case 1024:
cPerBlock = 512;
break;
case 960:
case 1920:
cPerBlock = 480;
break;
case 512:
case 256:
cPerBlock = 256;
break;
case 128:
cPerBlock = 128;
break;
default:
cPerBlock = 320;
}
params_.groups = groups;
params_.cPerGroup = params_.c / params_.groups;
if (cPerBlock % params_.cPerGroup != 0) {
cPerBlock = params_.cPerGroup;
}
params_.srcX = reinterpret_cast<const T*>(x_data);
params_.dst = reinterpret_cast<T*>(y_data);
params_.gamma = scale_data;
params_.beta = bias_data;
params_.hw = params_.h * params_.w;
const int32_t blocksPerHW = findMaxDivisor(params_.hw, maxBlocksPerHW);
params_.hwPerBlock = divUp(params_.hw, blocksPerHW);
params_.cPerBlock = cPerBlock;
params_.hwc = params_.hw * params_.c;
params_.invHWC = 1.F / static_cast<float>(params_.hw * params_.cPerGroup);
params_.eps = epsilon;
auto stream = dev_ctx.stream();
DenseTensor redBuffer;
int buffer_sizes = 2 * params_.n * groups;
redBuffer.Resize({1, buffer_sizes});
params_.redBuffer = dev_ctx.template Alloc<float>(&redBuffer);
#ifdef PADDLE_WITH_HIP
hipMemset(params_.redBuffer, 0, buffer_sizes * sizeof(float));
#else
hipMemset(params_.redBuffer, 0, buffer_sizes * sizeof(float));
#endif
groupNormNHWCSum<T> nhwc_sum;
nhwc_sum(¶ms_, stream);
groupNormNHWCScale<T> nhwc_scale;
nhwc_scale(params_, stream);
#ifdef PADDLE_WITH_HIP
phi::backends::gpu::GpuMemcpyAsync(mean_data,
params_.redBuffer,
params_.n * groups * sizeof(float),
hipMemcpyDeviceToHost,
stream);
#else
phi::backends::gpu::GpuMemcpyAsync(mean_data,
params_.redBuffer,
params_.n * groups * sizeof(float),
hipMemcpyDeviceToHost,
stream);
#endif
}
template <typename T, typename AccT>
__global__ void GroupNormForwardGetMeanAndVar(const T* x,
int N,
int C,
int W,
int imsize,
int groups,
int group_size,
AccT* mean,
AccT* var) {
int gid = blockIdx.y;
int cid = blockIdx.x;
int bid = blockIdx.z;
int H = imsize / W;
int number = min(group_size, static_cast<int>(C - gid * group_size));
int ccid = gid * group_size + cid;
if (ccid >= C) return;
AccT x_mean = static_cast<AccT>(0);
AccT x_var = static_cast<AccT>(0);
for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
AccT val;
int hid = imid / W;
int wid = imid % W;
val = static_cast<AccT>(x[(bid * H + hid) * W * C + wid * C + ccid]);
x_mean += val;
x_var += val * val;
}
x_mean /= number * imsize;
x_var /= number * imsize;
CudaAtomicAddWithWarp(&mean[bid * groups + gid], x_mean);
CudaAtomicAddWithWarp(&var[bid * groups + gid], x_var);
}
template <typename T, typename AccT, int flags>
__global__ void GroupNormForward(const T* x,
const AccT* mean,
const AccT* var,
const T* scale,
const T* bias,
int N,
int C,
int W,
int imsize,
int groups,
int group_size,
AccT epsilon,
T* y,
AccT* real_var,
const DataLayout data_layout) {
int gid = blockIdx.y;
int cid = blockIdx.x;
int bid = blockIdx.z;
int H = imsize / W;
int ccid = gid * group_size + cid;
if (ccid >= C) return;
auto ng = bid * groups + gid;
AccT x_mean = mean[ng];
AccT x_var = var[ng];
x_var = x_var - x_mean * x_mean;
AccT var_inv = rsqrt(x_var + epsilon);
if (cid == 0 && threadIdx.x == 0) {
real_var[ng] = x_var;
}
for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
AccT val;
int hid, wid;
int index = (bid * C + ccid) * imsize + imid;
if (data_layout == DataLayout::kNCHW) {
val = static_cast<AccT>(x[index]);
} else {
hid = imid / W;
wid = imid % W;
val = static_cast<AccT>(x[(bid * H + hid) * W * C + wid * C + ccid]);
}
val = (val - x_mean) * var_inv;
if (flags & kHasScale) {
val *= static_cast<AccT>(scale[ccid]);
}
if (flags & kHasBias) {
val += static_cast<AccT>(bias[ccid]);
}
if (data_layout == DataLayout::kNCHW) {
y[index] = static_cast<T>(val);
} else {
y[(bid * H + hid) * W * C + wid * C + ccid] = static_cast<T>(val);
}
}
}
template <typename T, typename AccT>
void GroupNormDirectCUDAFunctor<T, AccT>::operator()(
gpuStream_t stream,
const T* input,
std::vector<int> input_shape,
const T* bias,
const T* scale,
AccT* temp_variance,
int groups,
float eps,
T* output,
AccT* mean,
AccT* variance,
const DataLayout data_layout) {
const auto input_ddim = phi::make_ddim(input_shape);
const int C =
(data_layout == DataLayout::kNCHW ? input_ddim[1]
: input_ddim[input_ddim.size() - 1]);
const int group_size = C / groups;
const int W =
(data_layout == DataLayout::kNCHW ? input_ddim[input_ddim.size() - 1]
: input_ddim[input_ddim.size() - 2]);
int image_size = 1;
if (data_layout == DataLayout::kNCHW) {
for (int i = 2; i < input_ddim.size(); ++i) {
image_size *= input_ddim[i];
}
} else {
for (int i = 1; i < input_ddim.size() - 1; ++i) {
image_size *= input_ddim[i];
}
}
int block_size = ::min(1024, image_size);
dim3 grid(group_size, groups, input_ddim[0]);
dim3 threads(block_size, 1, 1);
if (data_layout == DataLayout::kNCHW) {
constexpr int vec_size = sizeof(float4) / sizeof(T);
int size = group_size * image_size; // group element size
const int max_num_threads = 1024;
int max_block_size = ::min(size / vec_size, max_num_threads);
int block_size_nchw = 1;
while (block_size_nchw < max_block_size) {
block_size_nchw *= 2;
}
block_size_nchw = ::max(block_size_nchw, phi::kps::details::kWarpSize);
dim3 grids(input_ddim[0] * groups);
dim3 blocks(block_size_nchw);
if (size < vec_size * block_size_nchw) {
hipLaunchKernelGGL(( phi::ScalarGetMeanAndVarNCHW<T, AccT>)
, dim3(grids), dim3(blocks), 0, stream, input, mean, temp_variance, size);
} else {
hipLaunchKernelGGL(( phi::VectorizedGetMeanAndVarNCHW<T, AccT, vec_size>)
, dim3(grids), dim3(blocks), 0, stream, input, mean, temp_variance, size);
}
} else {
#ifdef PADDLE_WITH_HIP
hipMemset(mean, 0, sizeof(AccT) * input_ddim[0] * groups);
hipMemset(temp_variance, 0, sizeof(AccT) * input_ddim[0] * groups);
#else
hipMemset(mean, 0, sizeof(AccT) * input_ddim[0] * groups);
hipMemset(temp_variance, 0, sizeof(AccT) * input_ddim[0] * groups);
#endif
hipLaunchKernelGGL(( phi::GroupNormForwardGetMeanAndVar<T, AccT>)
, dim3(grid), dim3(threads), 0, stream, input,
input_ddim[0],
C,
W,
image_size,
groups,
group_size,
mean,
temp_variance);
}
hipLaunchKernelGGL(( GroupNormForward<T, AccT, 3>)
, dim3(grid), dim3(threads), 0, stream, input,
mean,
temp_variance,
scale,
bias,
input_ddim[0],
C,
W,
image_size,
groups,
group_size,
static_cast<AccT>(eps),
output,
variance,
data_layout);
}
template class GroupNormDirectCUDAFunctor<float, float>;
#if defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
template class GroupNormDirectCUDAFunctor<half, float>;
#endif
template <typename T, typename Context>
void GroupNormGeneralCaseKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& scale,
const paddle::optional<DenseTensor>& bias,
float epsilon,
int groups,
const std::string& data_layout_str,
DenseTensor* y,
DenseTensor* mean,
DenseTensor* var) {
using AccT = typename phi::dtype::MPTypeTrait<T>::Type;
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
const auto scale_ptr = scale.get_ptr();
const auto bias_ptr = bias.get_ptr();
const auto x_dims = x.dims();
const int C = (data_layout == DataLayout::kNCHW ? x_dims[1]
: x_dims[x_dims.size() - 1]);
const int group_size = C / groups;
const int W = (data_layout == DataLayout::kNCHW ? x_dims[x_dims.size() - 1]
: x_dims[x_dims.size() - 2]);
dev_ctx.template Alloc<T>(y);
dev_ctx.template Alloc<AccT>(mean);
dev_ctx.template Alloc<AccT>(var);
// temp_var is used to calculate the mean^2
DenseTensor temp_var;
temp_var.Resize(var->dims());
dev_ctx.template Alloc<AccT>(&temp_var);
phi::funcs::SetConstant<GPUContext, T> set_zero;
phi::funcs::SetConstant<GPUContext, AccT> set_zero_AccT;
auto* x_data = x.data<T>();
auto* y_data = y->data<T>();
auto* mean_data = mean->data<AccT>();
auto* var_data = var->data<AccT>();
auto* temp_var_data = temp_var.data<AccT>();
const T* scale_data = nullptr;
if (scale_ptr) scale_data = scale_ptr->data<T>();
const T* bias_data = nullptr;
if (bias_ptr) bias_data = bias_ptr->data<T>();
int imsize = 1;
if (data_layout == DataLayout::kNCHW) {
for (int i = 2; i < x_dims.size(); ++i) {
imsize *= x_dims[i];
}
} else {
for (int i = 1; i < x_dims.size() - 1; ++i) {
imsize *= x_dims[i];
}
}
int block_size = ::min(1024, imsize);
dim3 grid(group_size, groups, x_dims[0]);
dim3 threads(block_size, 1, 1);
if (data_layout == DataLayout::kNCHW) {
constexpr int vec_size = sizeof(float4) / sizeof(T);
int size = group_size * imsize;
const int max_num_threads = 1024;
int max_block_size = ::min(size / vec_size, max_num_threads);
int block_size_nchw = 1;
while (block_size_nchw < max_block_size) {
block_size_nchw *= 2;
}
block_size_nchw = ::max(block_size_nchw, kps::details::kWarpSize);
dim3 grids(x_dims[0] * groups);
dim3 blocks(block_size_nchw);
if (size < vec_size * block_size_nchw) {
hipLaunchKernelGGL(( ScalarGetMeanAndVarNCHW<T, AccT>), dim3(grids), dim3(blocks), 0, dev_ctx.stream(),
x_data, mean_data, temp_var_data, size);
} else {
hipLaunchKernelGGL(( VectorizedGetMeanAndVarNCHW<T, AccT, vec_size>)
, dim3(grids), dim3(blocks), 0, dev_ctx.stream(),
x_data, mean_data, temp_var_data, size);
}
} else {
set_zero_AccT(dev_ctx, mean, static_cast<AccT>(0));
set_zero_AccT(dev_ctx, &temp_var, static_cast<AccT>(0));
hipLaunchKernelGGL(( GroupNormForwardGetMeanAndVar<T, AccT>)
, dim3(grid), dim3(threads), 0, dev_ctx.stream(), x_data,
x_dims[0],
C,
W,
imsize,
groups,
group_size,
mean_data,
temp_var_data);
}
int flags =
(scale_data != nullptr) * kHasScale + (bias_data != nullptr) * kHasBias;
UNROLL_ALL_CASES(flags,
GroupNormForward,
x_data,
mean_data,
temp_var_data,
scale_data,
bias_data,
x_dims[0],
C,
W,
imsize,
groups,
group_size,
static_cast<AccT>(epsilon),
y_data,
var_data,
data_layout);
}
template <typename T, typename Context>
void GroupNormKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& scale,
const paddle::optional<DenseTensor>& bias,
float epsilon,
int groups,
const std::string& data_layout_str,
DenseTensor* y,
DenseTensor* mean,
DenseTensor* var) {
using std::is_same;
if (is_same<T, phi::dtype::float16>::value && data_layout_str == "NHWC") {
GroupNormNHWCKernel<phi::dtype::float16, Context>(dev_ctx,
x,
scale,
bias,
epsilon,
groups,
data_layout_str,
y,
mean,
var);
return;
}
#ifdef PADDLE_CUDA_BF16
if (is_same<T, phi::dtype::bfloat16>::value && data_layout_str == "NHWC") {
GroupNormNHWCKernel<phi::dtype::bfloat16, Context>(dev_ctx,
x,
scale,
bias,
epsilon,
groups,
data_layout_str,
y,
mean,
var);
return;
}
#endif
GroupNormGeneralCaseKernel<T, Context>(
dev_ctx, x, scale, bias, epsilon, groups, data_layout_str, y, mean, var);
}
} // namespace phi
PD_REGISTER_KERNEL(group_norm,
GPU,
ALL_LAYOUT,
phi::GroupNormKernel,
float,
double,
phi::dtype::bfloat16,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::BFLOAT16 ||
kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
}
}
| b20e25faebecad3c178c13a93c31f8b471404e22.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/group_norm_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/gpu/group_norm_utils.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/device_context.h"
namespace phi {
static inline int32_t divUp(int32_t m, int32_t n) { return (m + n - 1) / n; }
static inline __device__ __host__ float sigmoid(float x) {
return 1.F / (1.F + expf(-x));
}
#ifdef PADDLE_CUDA_BF16
__host__ __device__ inline float2 bfloat1622float2(const __nv_bfloat162 a) {
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
return __bfloat1622float2(a);
#else
float hi_float;
float lo_float;
lo_float = __internal_bfloat162float(((__nv_bfloat162_raw)a).x);
hi_float = __internal_bfloat162float(((__nv_bfloat162_raw)a).y);
return make_float2(lo_float, hi_float);
#endif
}
__host__ __device__ inline __nv_bfloat162 float22bfloat162_rn(const float2 a) {
__nv_bfloat162 val;
#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 800))
val = __float22bfloat162_rn(a);
#else
val.x = __float2bfloat16_rn(a.x);
val.y = __float2bfloat16_rn(a.y);
#endif
return val;
}
#endif
template <typename T>
__host__ __device__ inline float __2float(const T a) {
return static_cast<float>(a);
}
template <>
__host__ __device__ inline float __2float<__half>(const __half a) {
return __half2float(a);
}
template <typename T>
__host__ __device__ inline T __2dst(const float a) {
return static_cast<T>(a);
}
template <>
__host__ __device__ inline __half __2dst<__half>(const float a) {
return __float2half(a);
}
struct GroupSums {
// Is it the 1st element of the group?
int32_t flag;
// The sum.
float sum;
// The sum of squares.
float sumSq;
};
struct GroupSumsOp {
inline __device__ GroupSums operator()(GroupSums const& a,
GroupSums const& b) {
GroupSums dst;
dst.sum = b.flag ? b.sum : (a.sum + b.sum);
dst.sumSq = b.flag ? b.sumSq : (a.sumSq + b.sumSq);
dst.flag = a.flag + b.flag;
return dst;
}
};
static int32_t findMaxDivisor(int32_t n, int32_t maxAllowedDivisor) {
int32_t maxDivisor = -1;
for (int32_t i = 1; i <= std::sqrt(n); i++) {
if (n % i == 0) {
int32_t divisor1 = n / i;
int32_t divisor2 = i;
if (divisor1 > maxDivisor && divisor1 < maxAllowedDivisor) {
maxDivisor = divisor1;
}
if (divisor2 > maxDivisor && divisor2 < maxAllowedDivisor) {
maxDivisor = divisor2;
}
}
}
return maxDivisor;
}
template <typename T, int THREADS_PER_CHANNEL>
inline __device__ void UpdateSum(const T* srcX, float* sum, float* sumSq) {
float src_data = phi::__2float<T>(*srcX);
*sum += src_data;
*sumSq += src_data * src_data;
}
template <>
inline __device__ void UpdateSum<__half, 2>(const __half* srcX,
float* sum,
float* sumSq) {
__half2 h2 = *reinterpret_cast<__half2 const*>(srcX);
float2 f2 = __half22float2(h2);
*sum += f2.x + f2.y;
*sumSq += f2.x * f2.x + f2.y * f2.y;
}
template <>
inline __device__ void UpdateSum<phi::dtype::float16, 2>(
const phi::dtype::float16* srcX, float* sum, float* sumSq) {
__half2 h2 = *reinterpret_cast<__half2 const*>(srcX);
float2 f2 = __half22float2(h2);
*sum += f2.x + f2.y;
*sumSq += f2.x * f2.x + f2.y * f2.y;
}
#ifdef PADDLE_CUDA_BF16
template <>
inline __device__ void UpdateSum<phi::dtype::bfloat16, 2>(
const phi::dtype::bfloat16* srcX, float* sum, float* sumSq) {
__nv_bfloat162 h2 = *reinterpret_cast<__nv_bfloat162 const*>(srcX);
float2 f2 = phi::bfloat1622float2(h2);
*sum += f2.x + f2.y;
*sumSq += f2.x * f2.x + f2.y * f2.y;
}
#endif
template <typename T, int THREADS_PER_BLOCK>
__global__ void groupNormNHWCSumSingerChannelKernel(
const GroupNormNHWCParams<T> params) {
// The instance in the batch.
__shared__ float2 smem[THREADS_PER_BLOCK];
int32_t ni = blockIdx.z;
int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x;
if (ci >= params.c) {
return;
}
// The first activation loaded by that block.
int32_t hwBegin = blockIdx.y * params.hwPerBlock;
// The last activation loaded by that block.
int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw);
// The sums.
float sum = 0.F;
float sumSq = 0.F;
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The offset.
int64_t offset = static_cast<int64_t>(ni) * params.hwc +
static_cast<int64_t>(hwi) * params.c + ci;
float src_data = *reinterpret_cast<float const*>(¶ms.srcX[offset]);
UpdateSum<T, 1>(¶ms.srcX[offset], &sum, &sumSq);
}
smem[threadIdx.x] = make_float2(sum, sumSq);
__syncthreads();
float2 sums = smem[threadIdx.x];
atomicAdd(¶ms.redBuffer[(2 * ni + 0) * params.groups + ci],
sums.x * params.invHWC);
atomicAdd(¶ms.redBuffer[(2 * ni + 1) * params.groups + ci], sums.y);
}
template <typename T, int THREADS_PER_BLOCK, int THREADS_PER_CHANNEL>
__global__ void groupNormNHWCSumKernel(const GroupNormNHWCParams<T> params) {
// The object in charge of doing the sums for the different blocks.
typedef cub::BlockScan<GroupSums, THREADS_PER_BLOCK> BlockScan;
__shared__ typename BlockScan::TempStorage tempStorage;
// Allocate shared memory for BlockScan.
// Allocate shared memory for the groups. We could reduce the amount of shared
// memory reserved.
__shared__ float2 smem[THREADS_PER_BLOCK];
// The instance in the batch.
int32_t ni = blockIdx.z;
// The channel loaded by that thread (2 channels per thread for F16x2).
int32_t ci =
blockIdx.x * params.cPerBlock + threadIdx.x * THREADS_PER_CHANNEL;
if (ci >= params.c || threadIdx.x * THREADS_PER_CHANNEL >= params.cPerBlock) {
return;
}
// The first activation loaded by that block.
int32_t hwBegin = blockIdx.y * params.hwPerBlock;
// The last activation loaded by that block.
int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw);
// The sums.
float sum = 0.F;
float sumSq = 0.F;
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The offset.
int64_t offset = static_cast<int64_t>(ni) * params.hwc +
static_cast<int64_t>(hwi) * params.c + ci;
float src_data = *reinterpret_cast<float const*>(¶ms.srcX[offset]);
UpdateSum<T, THREADS_PER_CHANNEL>(¶ms.srcX[offset], &sum, &sumSq);
}
// The group that thread works on and the channel in the group (modulus).
int32_t gi =
ci / params.cPerGroup - blockIdx.x * params.cPerBlock / params.cPerGroup;
int32_t cj = ci % params.cPerGroup;
int flag = (cj == 0 || threadIdx.x == 0) ? 1 : 0;
GroupSums inp{flag, sum, sumSq};
GroupSums out;
BlockScan(tempStorage).InclusiveScan(inp, out, GroupSumsOp());
if (cj == params.cPerGroup - THREADS_PER_CHANNEL ||
threadIdx.x * THREADS_PER_CHANNEL ==
params.cPerBlock - THREADS_PER_CHANNEL) {
smem[gi] = make_float2(out.sum, out.sumSq);
}
__syncthreads();
int32_t gj = ci / params.cPerGroup;
if (cj == params.cPerGroup - THREADS_PER_CHANNEL ||
threadIdx.x * THREADS_PER_CHANNEL ==
params.cPerBlock - THREADS_PER_CHANNEL) {
float2 sums = smem[gi];
atomicAdd(¶ms.redBuffer[(2 * ni + 0) * params.groups + gj],
sums.x * params.invHWC);
atomicAdd(¶ms.redBuffer[(2 * ni + 1) * params.groups + gj], sums.y);
}
}
template <typename T>
void groupNormNHWCSum<T>::operator()(GroupNormNHWCParams<T>* params,
gpuStream_t stream) {
dim3 grid;
grid.x = divUp(params->c, params->cPerBlock);
grid.y = divUp(params->hw, params->hwPerBlock);
grid.z = params->n;
if (params->cPerGroup % 2 == 0) {
switch (params->cPerBlock) {
case 512:
case 480:
groupNormNHWCSumKernel<T, 256, 2><<<grid, 256, 0, stream>>>(*params);
break;
case 320:
groupNormNHWCSumKernel<T, 160, 2><<<grid, 160, 0, stream>>>(*params);
break;
case 256:
groupNormNHWCSumKernel<T, 128, 2><<<grid, 128, 0, stream>>>(*params);
break;
case 128:
groupNormNHWCSumKernel<T, 64, 2><<<grid, 64, 0, stream>>>(*params);
break;
default:
grid.x = divUp(params->c, 128);
params->cPerBlock = 128;
groupNormNHWCSumKernel<T, 64, 2><<<grid, 64, 0, stream>>>(*params);
}
} else {
if (params->cPerGroup != 1) {
switch (params->cPerBlock) {
case 512:
groupNormNHWCSumKernel<T, 512, 1><<<grid, 512, 0, stream>>>(*params);
break;
case 480:
groupNormNHWCSumKernel<T, 480, 1><<<grid, 480, 0, stream>>>(*params);
break;
case 320:
groupNormNHWCSumKernel<T, 320, 1><<<grid, 320, 0, stream>>>(*params);
break;
case 256:
groupNormNHWCSumKernel<T, 256, 1><<<grid, 256, 0, stream>>>(*params);
break;
case 128:
groupNormNHWCSumKernel<T, 128, 1><<<grid, 128, 0, stream>>>(*params);
break;
default:
grid.x = divUp(params->c, 128);
params->cPerBlock = 128;
groupNormNHWCSumKernel<T, 128, 1><<<grid, 128, 0, stream>>>(*params);
}
} else {
switch (params->cPerBlock) {
case 512:
groupNormNHWCSumSingerChannelKernel<T, 512>
<<<grid, 512, 0, stream>>>(*params);
break;
case 480:
groupNormNHWCSumSingerChannelKernel<T, 480>
<<<grid, 480, 0, stream>>>(*params);
break;
case 320:
groupNormNHWCSumSingerChannelKernel<T, 320>
<<<grid, 320, 0, stream>>>(*params);
break;
case 256:
groupNormNHWCSumSingerChannelKernel<T, 256>
<<<grid, 256, 0, stream>>>(*params);
break;
case 128:
groupNormNHWCSumSingerChannelKernel<T, 128>
<<<grid, 128, 0, stream>>>(*params);
break;
default:
grid.x = divUp(params->c, 128);
params->cPerBlock = 128;
groupNormNHWCSumSingerChannelKernel<T, 128>
<<<grid, 128, 0, stream>>>(*params);
}
}
}
}
template class groupNormNHWCSum<half>;
template <typename T, int THREADS_PER_CHANNEL>
inline __device__ void GroupNormCompute(int32_t hwBegin,
int32_t hwEnd,
int32_t ci,
const GroupNormNHWCParams<T>& params,
float mean,
float invStdDev) {
float gamma =
phi::__2float<T>(*(reinterpret_cast<T const*>(params.gamma) + ci));
float beta =
phi::__2float<T>(*(reinterpret_cast<T const*>(params.beta) + ci));
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The src/dst offset.
int64_t offset = (int64_t)blockIdx.z * params.hwc + hwi * params.c + ci;
const float src_data = phi::__2float<T>(params.srcX[offset]);
// Normalize the channels.
float dst_data = (src_data - mean) * invStdDev;
// Scale by gamma and add beta.
dst_data = gamma * dst_data + beta;
// Apply Silu if needed.
if (params.withSilu) {
dst_data = dst_data * sigmoid(dst_data);
}
// Store the scaled values.
*reinterpret_cast<T*>(¶ms.dst[offset]) = phi::__2dst<T>(dst_data);
}
}
template <>
inline __device__ void GroupNormCompute<phi::dtype::float16, 2>(
int32_t hwBegin,
int32_t hwEnd,
int32_t ci,
const GroupNormNHWCParams<phi::dtype::float16>& params,
float mean,
float invStdDev) {
float2 gammaF2, betaF2;
gammaF2 = __half22float2(*reinterpret_cast<__half2 const*>(
reinterpret_cast<half const*>(params.gamma) + ci));
betaF2 = __half22float2(*reinterpret_cast<__half2 const*>(
reinterpret_cast<half const*>(params.beta) + ci));
// Iterate over the activations to compute the sums.
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The src/dst offset.
int64_t offset = (int64_t)blockIdx.z * params.hwc + hwi * params.c + ci;
// Fetch two channels per thread.
__half2 h2 = *reinterpret_cast<__half2 const*>(¶ms.srcX[offset]);
// Extract the two half values.
float2 f2 = __half22float2(h2);
// Normalize the channels.
f2.x = (f2.x - mean) * invStdDev;
f2.y = (f2.y - mean) * invStdDev;
// Scale by gamma and add beta.
f2.x = gammaF2.x * f2.x + betaF2.x;
f2.y = gammaF2.y * f2.y + betaF2.y;
// Apply Silu if needed.
if (params.withSilu) {
f2.x = f2.x * sigmoid(f2.x);
f2.y = f2.y * sigmoid(f2.y);
}
// Store the scaled values.
*reinterpret_cast<__half2*>(¶ms.dst[offset]) = __float22half2_rn(f2);
}
}
template <>
inline __device__ void GroupNormCompute<__half, 2>(
int32_t hwBegin,
int32_t hwEnd,
int32_t ci,
const GroupNormNHWCParams<__half>& params,
float mean,
float invStdDev) {
float2 gammaF2, betaF2;
gammaF2 = __half22float2(*reinterpret_cast<__half2 const*>(
reinterpret_cast<half const*>(params.gamma) + ci));
betaF2 = __half22float2(*reinterpret_cast<__half2 const*>(
reinterpret_cast<half const*>(params.beta) + ci));
// Iterate over the activations to compute the sums.
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The src/dst offset.
int64_t offset = (int64_t)blockIdx.z * params.hwc + hwi * params.c + ci;
// Fetch two channels per thread.
__half2 h2 = *reinterpret_cast<__half2 const*>(¶ms.srcX[offset]);
// Extract the two half values.
float2 f2 = __half22float2(h2);
// Normalize the channels.
f2.x = (f2.x - mean) * invStdDev;
f2.y = (f2.y - mean) * invStdDev;
// Scale by gamma and add beta.
f2.x = gammaF2.x * f2.x + betaF2.x;
f2.y = gammaF2.y * f2.y + betaF2.y;
// Apply Silu if needed.
if (params.withSilu) {
f2.x = f2.x * sigmoid(f2.x);
f2.y = f2.y * sigmoid(f2.y);
}
// Store the scaled values.
*reinterpret_cast<__half2*>(¶ms.dst[offset]) = __float22half2_rn(f2);
}
}
#ifdef PADDLE_CUDA_BF16
template <>
inline __device__ void GroupNormCompute<phi::dtype::bfloat16, 2>(
int32_t hwBegin,
int32_t hwEnd,
int32_t ci,
const GroupNormNHWCParams<phi::dtype::bfloat16>& params,
float mean,
float invStdDev) {
float2 gammaF2, betaF2;
gammaF2 = phi::bfloat1622float2(*reinterpret_cast<__nv_bfloat162 const*>(
reinterpret_cast<__nv_bfloat16 const*>(params.gamma) + ci));
betaF2 = phi::bfloat1622float2(*reinterpret_cast<__nv_bfloat162 const*>(
reinterpret_cast<__nv_bfloat16 const*>(params.beta) + ci));
// Iterate over the activations to compute the sums.
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The src/dst offset.
int64_t offset = (int64_t)blockIdx.z * params.hwc + hwi * params.c + ci;
// Fetch two channels per thread.
__nv_bfloat162 h2 =
*reinterpret_cast<__nv_bfloat162 const*>(¶ms.srcX[offset]);
// Extract the two half values.
float2 f2 = phi::bfloat1622float2(h2);
// Normalize the channels.
f2.x = (f2.x - mean) * invStdDev;
f2.y = (f2.y - mean) * invStdDev;
// Scale by gamma and add beta.
f2.x = gammaF2.x * f2.x + betaF2.x;
f2.y = gammaF2.y * f2.y + betaF2.y;
// Apply Silu if needed.
if (params.withSilu) {
f2.x = f2.x * sigmoid(f2.x);
f2.y = f2.y * sigmoid(f2.y);
}
// Store the scaled values.
*reinterpret_cast<__nv_bfloat162*>(¶ms.dst[offset]) =
phi::float22bfloat162_rn(f2);
}
}
#endif
template <typename T, int THREADS_PER_CHANNEL>
__global__ void groupNormNHWCScaleKernel(const GroupNormNHWCParams<T> params) {
// The instance in the batch.
int32_t ni = blockIdx.z;
// The channel loaded by that thread (2 channels per thread for F16x2).
int32_t ci =
blockIdx.x * params.cPerBlock + threadIdx.x * THREADS_PER_CHANNEL;
// The group that thread works on and the channel in the group (modulus).
int32_t gi = ci / params.cPerGroup;
if (ci >= params.c || gi >= params.groups) {
return;
}
// Load the sum and sum of squares for the group.
float mean = params.redBuffer[(2 * ni + 0) * params.groups + gi];
float sumSq = params.redBuffer[(2 * ni + 1) * params.groups + gi];
// Compute the variance.
float var = sumSq * params.invHWC - (mean * mean);
if (params.var_data != nullptr) {
params.var_data[ni * params.groups + gi] = var;
}
// Compute the inverse of the stddev.
float invStdDev = rsqrtf(var + params.eps);
// The first activation loaded by that block.
int32_t hwBegin = blockIdx.y * params.hwPerBlock;
// The last activation loaded by that block.
int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw);
GroupNormCompute<T, THREADS_PER_CHANNEL>(
hwBegin, hwEnd, ci, params, mean, invStdDev);
}
template <typename T>
void groupNormNHWCScale<T>::operator()(const GroupNormNHWCParams<T>& params,
gpuStream_t stream) {
dim3 grid;
// The number of blocks to compute all the channels.
grid.x = divUp(params.c, params.cPerBlock);
// The number of blocks to compute all the activations in a given instance.
grid.y = divUp(params.hw, params.hwPerBlock);
// The number of instances.
grid.z = params.n;
if (params.cPerGroup % 2 == 0) {
switch (params.cPerBlock) {
case 512:
case 480:
groupNormNHWCScaleKernel<T, 2><<<grid, 256, 0, stream>>>(params);
break;
case 320:
groupNormNHWCScaleKernel<T, 2><<<grid, 160, 0, stream>>>(params);
break;
case 256:
groupNormNHWCScaleKernel<T, 2><<<grid, 128, 0, stream>>>(params);
break;
case 128:
groupNormNHWCScaleKernel<T, 2><<<grid, 64, 0, stream>>>(params);
break;
default:
grid.x = divUp(params.c, 128);
groupNormNHWCScaleKernel<T, 2><<<grid, 64, 0, stream>>>(params);
}
} else {
switch (params.cPerBlock) {
case 512:
groupNormNHWCScaleKernel<T, 1><<<grid, 512, 0, stream>>>(params);
break;
case 480:
groupNormNHWCScaleKernel<T, 1><<<grid, 480, 0, stream>>>(params);
break;
case 320:
groupNormNHWCScaleKernel<T, 1><<<grid, 320, 0, stream>>>(params);
break;
case 256:
groupNormNHWCScaleKernel<T, 1><<<grid, 256, 0, stream>>>(params);
break;
case 128:
groupNormNHWCScaleKernel<T, 1><<<grid, 128, 0, stream>>>(params);
break;
default:
grid.x = divUp(params.c, 128);
groupNormNHWCScaleKernel<T, 1><<<grid, 128, 0, stream>>>(params);
}
}
}
template class groupNormNHWCScale<half>;
template <typename T, typename Context>
void GroupNormNHWCKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& scale,
const paddle::optional<DenseTensor>& bias,
float epsilon,
int groups,
const std::string& data_layout_str,
DenseTensor* y,
DenseTensor* mean,
DenseTensor* var) {
using AccT = typename phi::dtype::MPTypeTrait<T>::Type;
GroupNormNHWCParams<T> params_;
params_.withSilu = false;
const auto x_dims = x.dims();
dev_ctx.template Alloc<T>(y);
const T* x_data = x.data<T>();
T* y_data = y->data<T>();
const auto scale_ptr = scale.get_ptr();
const auto bias_ptr = bias.get_ptr();
const T* scale_data = nullptr;
if (scale_ptr) scale_data = scale_ptr->data<T>();
const T* bias_data = nullptr;
if (bias_ptr) bias_data = bias_ptr->data<T>();
params_.n = x_dims[0];
params_.c = x_dims[3];
params_.h = x_dims[1];
params_.w = x_dims[2];
dev_ctx.template Alloc<AccT>(mean);
dev_ctx.template Alloc<AccT>(var);
auto* mean_data = mean->data<AccT>();
auto* var_data = var->data<AccT>();
params_.var_data = var_data;
int32_t cPerBlock = 320;
int32_t maxBlocksPerHW = 1024;
switch (params_.c) {
case 2048:
case 1024:
cPerBlock = 512;
break;
case 960:
case 1920:
cPerBlock = 480;
break;
case 512:
case 256:
cPerBlock = 256;
break;
case 128:
cPerBlock = 128;
break;
default:
cPerBlock = 320;
}
params_.groups = groups;
params_.cPerGroup = params_.c / params_.groups;
if (cPerBlock % params_.cPerGroup != 0) {
cPerBlock = params_.cPerGroup;
}
params_.srcX = reinterpret_cast<const T*>(x_data);
params_.dst = reinterpret_cast<T*>(y_data);
params_.gamma = scale_data;
params_.beta = bias_data;
params_.hw = params_.h * params_.w;
const int32_t blocksPerHW = findMaxDivisor(params_.hw, maxBlocksPerHW);
params_.hwPerBlock = divUp(params_.hw, blocksPerHW);
params_.cPerBlock = cPerBlock;
params_.hwc = params_.hw * params_.c;
params_.invHWC = 1.F / static_cast<float>(params_.hw * params_.cPerGroup);
params_.eps = epsilon;
auto stream = dev_ctx.stream();
DenseTensor redBuffer;
int buffer_sizes = 2 * params_.n * groups;
redBuffer.Resize({1, buffer_sizes});
params_.redBuffer = dev_ctx.template Alloc<float>(&redBuffer);
#ifdef PADDLE_WITH_HIP
hipMemset(params_.redBuffer, 0, buffer_sizes * sizeof(float));
#else
cudaMemset(params_.redBuffer, 0, buffer_sizes * sizeof(float));
#endif
groupNormNHWCSum<T> nhwc_sum;
nhwc_sum(¶ms_, stream);
groupNormNHWCScale<T> nhwc_scale;
nhwc_scale(params_, stream);
#ifdef PADDLE_WITH_HIP
phi::backends::gpu::GpuMemcpyAsync(mean_data,
params_.redBuffer,
params_.n * groups * sizeof(float),
hipMemcpyDeviceToHost,
stream);
#else
phi::backends::gpu::GpuMemcpyAsync(mean_data,
params_.redBuffer,
params_.n * groups * sizeof(float),
cudaMemcpyDeviceToHost,
stream);
#endif
}
template <typename T, typename AccT>
__global__ void GroupNormForwardGetMeanAndVar(const T* x,
int N,
int C,
int W,
int imsize,
int groups,
int group_size,
AccT* mean,
AccT* var) {
int gid = blockIdx.y;
int cid = blockIdx.x;
int bid = blockIdx.z;
int H = imsize / W;
int number = min(group_size, static_cast<int>(C - gid * group_size));
int ccid = gid * group_size + cid;
if (ccid >= C) return;
AccT x_mean = static_cast<AccT>(0);
AccT x_var = static_cast<AccT>(0);
for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
AccT val;
int hid = imid / W;
int wid = imid % W;
val = static_cast<AccT>(x[(bid * H + hid) * W * C + wid * C + ccid]);
x_mean += val;
x_var += val * val;
}
x_mean /= number * imsize;
x_var /= number * imsize;
CudaAtomicAddWithWarp(&mean[bid * groups + gid], x_mean);
CudaAtomicAddWithWarp(&var[bid * groups + gid], x_var);
}
template <typename T, typename AccT, int flags>
__global__ void GroupNormForward(const T* x,
const AccT* mean,
const AccT* var,
const T* scale,
const T* bias,
int N,
int C,
int W,
int imsize,
int groups,
int group_size,
AccT epsilon,
T* y,
AccT* real_var,
const DataLayout data_layout) {
int gid = blockIdx.y;
int cid = blockIdx.x;
int bid = blockIdx.z;
int H = imsize / W;
int ccid = gid * group_size + cid;
if (ccid >= C) return;
auto ng = bid * groups + gid;
AccT x_mean = mean[ng];
AccT x_var = var[ng];
x_var = x_var - x_mean * x_mean;
AccT var_inv = rsqrt(x_var + epsilon);
if (cid == 0 && threadIdx.x == 0) {
real_var[ng] = x_var;
}
for (int imid = threadIdx.x; imid < imsize; imid += blockDim.x) {
AccT val;
int hid, wid;
int index = (bid * C + ccid) * imsize + imid;
if (data_layout == DataLayout::kNCHW) {
val = static_cast<AccT>(x[index]);
} else {
hid = imid / W;
wid = imid % W;
val = static_cast<AccT>(x[(bid * H + hid) * W * C + wid * C + ccid]);
}
val = (val - x_mean) * var_inv;
if (flags & kHasScale) {
val *= static_cast<AccT>(scale[ccid]);
}
if (flags & kHasBias) {
val += static_cast<AccT>(bias[ccid]);
}
if (data_layout == DataLayout::kNCHW) {
y[index] = static_cast<T>(val);
} else {
y[(bid * H + hid) * W * C + wid * C + ccid] = static_cast<T>(val);
}
}
}
template <typename T, typename AccT>
void GroupNormDirectCUDAFunctor<T, AccT>::operator()(
gpuStream_t stream,
const T* input,
std::vector<int> input_shape,
const T* bias,
const T* scale,
AccT* temp_variance,
int groups,
float eps,
T* output,
AccT* mean,
AccT* variance,
const DataLayout data_layout) {
const auto input_ddim = phi::make_ddim(input_shape);
const int C =
(data_layout == DataLayout::kNCHW ? input_ddim[1]
: input_ddim[input_ddim.size() - 1]);
const int group_size = C / groups;
const int W =
(data_layout == DataLayout::kNCHW ? input_ddim[input_ddim.size() - 1]
: input_ddim[input_ddim.size() - 2]);
int image_size = 1;
if (data_layout == DataLayout::kNCHW) {
for (int i = 2; i < input_ddim.size(); ++i) {
image_size *= input_ddim[i];
}
} else {
for (int i = 1; i < input_ddim.size() - 1; ++i) {
image_size *= input_ddim[i];
}
}
int block_size = std::min(1024, image_size);
dim3 grid(group_size, groups, input_ddim[0]);
dim3 threads(block_size, 1, 1);
if (data_layout == DataLayout::kNCHW) {
constexpr int vec_size = sizeof(float4) / sizeof(T);
int size = group_size * image_size; // group element size
const int max_num_threads = 1024;
int max_block_size = std::min(size / vec_size, max_num_threads);
int block_size_nchw = 1;
while (block_size_nchw < max_block_size) {
block_size_nchw *= 2;
}
block_size_nchw = std::max(block_size_nchw, phi::kps::details::kWarpSize);
dim3 grids(input_ddim[0] * groups);
dim3 blocks(block_size_nchw);
if (size < vec_size * block_size_nchw) {
phi::ScalarGetMeanAndVarNCHW<T, AccT>
<<<grids, blocks, 0, stream>>>(input, mean, temp_variance, size);
} else {
phi::VectorizedGetMeanAndVarNCHW<T, AccT, vec_size>
<<<grids, blocks, 0, stream>>>(input, mean, temp_variance, size);
}
} else {
#ifdef PADDLE_WITH_HIP
hipMemset(mean, 0, sizeof(AccT) * input_ddim[0] * groups);
hipMemset(temp_variance, 0, sizeof(AccT) * input_ddim[0] * groups);
#else
cudaMemset(mean, 0, sizeof(AccT) * input_ddim[0] * groups);
cudaMemset(temp_variance, 0, sizeof(AccT) * input_ddim[0] * groups);
#endif
phi::GroupNormForwardGetMeanAndVar<T, AccT>
<<<grid, threads, 0, stream>>>(input,
input_ddim[0],
C,
W,
image_size,
groups,
group_size,
mean,
temp_variance);
}
GroupNormForward<T, AccT, 3>
<<<grid, threads, 0, stream>>>(input,
mean,
temp_variance,
scale,
bias,
input_ddim[0],
C,
W,
image_size,
groups,
group_size,
static_cast<AccT>(eps),
output,
variance,
data_layout);
}
template class GroupNormDirectCUDAFunctor<float, float>;
#if defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP)
template class GroupNormDirectCUDAFunctor<half, float>;
#endif
template <typename T, typename Context>
void GroupNormGeneralCaseKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& scale,
const paddle::optional<DenseTensor>& bias,
float epsilon,
int groups,
const std::string& data_layout_str,
DenseTensor* y,
DenseTensor* mean,
DenseTensor* var) {
using AccT = typename phi::dtype::MPTypeTrait<T>::Type;
const DataLayout data_layout = phi::StringToDataLayout(data_layout_str);
const auto scale_ptr = scale.get_ptr();
const auto bias_ptr = bias.get_ptr();
const auto x_dims = x.dims();
const int C = (data_layout == DataLayout::kNCHW ? x_dims[1]
: x_dims[x_dims.size() - 1]);
const int group_size = C / groups;
const int W = (data_layout == DataLayout::kNCHW ? x_dims[x_dims.size() - 1]
: x_dims[x_dims.size() - 2]);
dev_ctx.template Alloc<T>(y);
dev_ctx.template Alloc<AccT>(mean);
dev_ctx.template Alloc<AccT>(var);
// temp_var is used to calculate the mean^2
DenseTensor temp_var;
temp_var.Resize(var->dims());
dev_ctx.template Alloc<AccT>(&temp_var);
phi::funcs::SetConstant<GPUContext, T> set_zero;
phi::funcs::SetConstant<GPUContext, AccT> set_zero_AccT;
auto* x_data = x.data<T>();
auto* y_data = y->data<T>();
auto* mean_data = mean->data<AccT>();
auto* var_data = var->data<AccT>();
auto* temp_var_data = temp_var.data<AccT>();
const T* scale_data = nullptr;
if (scale_ptr) scale_data = scale_ptr->data<T>();
const T* bias_data = nullptr;
if (bias_ptr) bias_data = bias_ptr->data<T>();
int imsize = 1;
if (data_layout == DataLayout::kNCHW) {
for (int i = 2; i < x_dims.size(); ++i) {
imsize *= x_dims[i];
}
} else {
for (int i = 1; i < x_dims.size() - 1; ++i) {
imsize *= x_dims[i];
}
}
int block_size = std::min(1024, imsize);
dim3 grid(group_size, groups, x_dims[0]);
dim3 threads(block_size, 1, 1);
if (data_layout == DataLayout::kNCHW) {
constexpr int vec_size = sizeof(float4) / sizeof(T);
int size = group_size * imsize;
const int max_num_threads = 1024;
int max_block_size = std::min(size / vec_size, max_num_threads);
int block_size_nchw = 1;
while (block_size_nchw < max_block_size) {
block_size_nchw *= 2;
}
block_size_nchw = std::max(block_size_nchw, kps::details::kWarpSize);
dim3 grids(x_dims[0] * groups);
dim3 blocks(block_size_nchw);
if (size < vec_size * block_size_nchw) {
ScalarGetMeanAndVarNCHW<T, AccT><<<grids, blocks, 0, dev_ctx.stream()>>>(
x_data, mean_data, temp_var_data, size);
} else {
VectorizedGetMeanAndVarNCHW<T, AccT, vec_size>
<<<grids, blocks, 0, dev_ctx.stream()>>>(
x_data, mean_data, temp_var_data, size);
}
} else {
set_zero_AccT(dev_ctx, mean, static_cast<AccT>(0));
set_zero_AccT(dev_ctx, &temp_var, static_cast<AccT>(0));
GroupNormForwardGetMeanAndVar<T, AccT>
<<<grid, threads, 0, dev_ctx.stream()>>>(x_data,
x_dims[0],
C,
W,
imsize,
groups,
group_size,
mean_data,
temp_var_data);
}
int flags =
(scale_data != nullptr) * kHasScale + (bias_data != nullptr) * kHasBias;
UNROLL_ALL_CASES(flags,
GroupNormForward,
x_data,
mean_data,
temp_var_data,
scale_data,
bias_data,
x_dims[0],
C,
W,
imsize,
groups,
group_size,
static_cast<AccT>(epsilon),
y_data,
var_data,
data_layout);
}
template <typename T, typename Context>
void GroupNormKernel(const Context& dev_ctx,
const DenseTensor& x,
const paddle::optional<DenseTensor>& scale,
const paddle::optional<DenseTensor>& bias,
float epsilon,
int groups,
const std::string& data_layout_str,
DenseTensor* y,
DenseTensor* mean,
DenseTensor* var) {
using std::is_same;
if (is_same<T, phi::dtype::float16>::value && data_layout_str == "NHWC") {
GroupNormNHWCKernel<phi::dtype::float16, Context>(dev_ctx,
x,
scale,
bias,
epsilon,
groups,
data_layout_str,
y,
mean,
var);
return;
}
#ifdef PADDLE_CUDA_BF16
if (is_same<T, phi::dtype::bfloat16>::value && data_layout_str == "NHWC") {
GroupNormNHWCKernel<phi::dtype::bfloat16, Context>(dev_ctx,
x,
scale,
bias,
epsilon,
groups,
data_layout_str,
y,
mean,
var);
return;
}
#endif
GroupNormGeneralCaseKernel<T, Context>(
dev_ctx, x, scale, bias, epsilon, groups, data_layout_str, y, mean, var);
}
} // namespace phi
PD_REGISTER_KERNEL(group_norm,
GPU,
ALL_LAYOUT,
phi::GroupNormKernel,
float,
double,
phi::dtype::bfloat16,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::BFLOAT16 ||
kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32);
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32);
}
}
|
9f753b6b67e77094a37ad6b23c6c63ff299ff9a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2016 Tommi M. Tykkl
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <image2/Image2.h>
//#include <types.h>
#include "hostUtils.h"
#include <cwchar>
/*
__global__ void undistortHdrRGBKernel( unsigned char *srcPtr, float *dstPtr, unsigned int width, unsigned int height, float *calibDataDev)
{
unsigned int xi = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int yi = blockIdx.y*blockDim.y+threadIdx.y;
int offset = xi + yi*width;
int widthRGB = width*3;
float K[9];
float iK[9];
float kc[5];
for (int i = 0; i < 9; i++) {
K[i] = calibDataDev[i];
iK[i] = calibDataDev[i+9];
}
for (int i = 0; i < 5; i++) kc[i] = calibDataDev[i+9*2];
// convert to float coordinates
float pu[2]; pu[0] = (float)xi; pu[1] = (float)yi;
float pd[2];
pd[0] = iK[0]*pu[0] + iK[1]*pu[1] + iK[2];
pd[1] = iK[3]*pu[0] + iK[4]*pu[1] + iK[5];
// distort point
float r2 = (pd[0]*pd[0])+(pd[1]*pd[1]); float r4 = r2*r2; float r6 = r4 * r2;
float radialDist = 1 + kc[0]*r2 + kc[1]*r4 + kc[4]*r6;
pd[0] *= radialDist;
pd[1] *= radialDist;
// define sampling point in distorted image
float p[2];
p[0] = K[0]*pd[0] + K[1]*pd[1] + K[2];
p[1] = K[3]*pd[0] + K[4]*pd[1] + K[5];
// bi-linear interpolation
int xdi = (int)p[0];
int ydi = (int)p[1];
if (xdi >= 0 && ydi >= 0 && xdi < width-2 && ydi < height-2) {
int offset2 = xdi*3+ydi*widthRGB;
unsigned short grayVal =
float v0 = srcPtr[offset2]; float v1 = srcPtr[offset2+3];
float v2 = srcPtr[offset2+widthRGB]; float v3 = srcPtr[offset2+widthRGB+3];
float fx = p[0] - xdi;
float fy = p[1] - ydi;
dstPtr[offset] = ((1-fx)*(1-fy)*v0 + fx*(1-fy)*v1 + (1-fx)*fy*v2 + fx*fy*v3)/255.0f;
} else {
dstPtr[offset] = 0.0f;
}
}*/
__global__ void undistortHdrKernel( float *srcPtr, float *dstPtr, unsigned int width, unsigned int height, float *calibDataDev)
{
unsigned int xi = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int yi = blockIdx.y*blockDim.y+threadIdx.y;
int offset = xi + yi*width;
float K[9];
float iK[9];
float kc[5];
for (int i = 0; i < 9; i++) {
K[i] = calibDataDev[i];
iK[i] = calibDataDev[i+9];
}
for (int i = 0; i < 5; i++) kc[i] = calibDataDev[i+9*2];
// convert to float coordinates
float pu[2]; pu[0] = (float)xi; pu[1] = (float)yi;
float pd[2];
pd[0] = iK[0]*pu[0] + iK[1]*pu[1] + iK[2];
pd[1] = iK[3]*pu[0] + iK[4]*pu[1] + iK[5];
// distort point
float r2 = (pd[0]*pd[0])+(pd[1]*pd[1]); float r4 = r2*r2; float r6 = r4 * r2;
float radialDist = 1 + kc[0]*r2 + kc[1]*r4 + kc[4]*r6;
pd[0] *= radialDist;
pd[1] *= radialDist;
// define sampling point in distorted image
float p[2];
p[0] = K[0]*pd[0] + K[1]*pd[1] + K[2];
p[1] = K[3]*pd[0] + K[4]*pd[1] + K[5];
// bi-linear interpolation
int xdi = (int)p[0];
int ydi = (int)p[1];
if (xdi >= 0 && ydi >= 0 && xdi < width-2 && ydi < height-2) {
int offset2 = xdi+ydi*width;
float v0 = srcPtr[offset2]; float v1 = srcPtr[offset2+1];
float v2 = srcPtr[offset2+width]; float v3 = srcPtr[offset2+width+1];
float fx = p[0] - xdi;
float fy = p[1] - ydi;
dstPtr[offset] = ((1-fx)*(1-fy)*v0 + fx*(1-fy)*v1 + (1-fx)*fy*v2 + fx*fy*v3)/255.0f;
} else {
dstPtr[offset] = 0.0f;
}
}
__global__ void undistortHdrRGBKernel( unsigned char *srcPtr, float *dstPtr, unsigned int width, unsigned int height, float *calibDataDev)
{
unsigned int xi = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int yi = blockIdx.y*blockDim.y+threadIdx.y;
float K[9];
float iK[9];
float kc[5];
int pitch = width*3;
int offsetR = 3*xi + yi*pitch;
int offsetG = offsetR+1;
int offsetB = offsetR+2;
for (int i = 0; i < 9; i++) {
K[i] = calibDataDev[i];
iK[i] = calibDataDev[i+9];
}
for (int i = 0; i < 5; i++) kc[i] = calibDataDev[i+9*2];
// convert to float coordinates
float pu[2]; pu[0] = (float)xi; pu[1] = (float)yi;
float pd[2];
pd[0] = iK[0]*pu[0] + iK[1]*pu[1] + iK[2];
pd[1] = iK[3]*pu[0] + iK[4]*pu[1] + iK[5];
// distort point
float r2 = (pd[0]*pd[0])+(pd[1]*pd[1]); float r4 = r2*r2; float r6 = r4 * r2;
float radialDist = 1 + kc[0]*r2 + kc[1]*r4 + kc[4]*r6;
pd[0] *= radialDist;
pd[1] *= radialDist;
// define sampling point in distorted image
float p[2];
p[0] = K[0]*pd[0] + K[1]*pd[1] + K[2];
p[1] = K[3]*pd[0] + K[4]*pd[1] + K[5];
// bi-linear interpolation
int xdi = (int)p[0];
int ydi = (int)p[1];
if (xdi >= 0 && ydi >= 0 && xdi < width-2 && ydi < height-2) {
int offsetR2 = 3*xdi+0 + ydi*pitch;
int offsetG2 = offsetR2+1;
int offsetB2 = offsetR2+2;
float fx = p[0] - xdi;
float fy = p[1] - ydi;
float a = (1-fx)*(1-fy);
float b = fx*(1-fy);
float c = (1-fx)*fy;
float d = fx*fy;
float v0 = float(srcPtr[offsetR2])/255.0f; float v1 = float(srcPtr[offsetR2+3])/255.0f;
float v2 = float(srcPtr[offsetR2+pitch])/255.0f; float v3 = float(srcPtr[offsetR2+pitch+3])/255.0f;
dstPtr[offsetR] = a*v0 + b*v1 + c*v2 + d*v3;
v0 = float(srcPtr[offsetG2])/255.0f; v1 = float(srcPtr[offsetG2+3])/255.0f;
v2 = float(srcPtr[offsetG2+pitch])/255.0f; v3 = float(srcPtr[offsetG2+pitch+3])/255.0f;
dstPtr[offsetG] = a*v0 + b*v1 + c*v2 + d*v3;
v0 = float(srcPtr[offsetB2])/255.0f; v1 = float(srcPtr[offsetB2+3])/255.0f;
v2 = float(srcPtr[offsetB2+pitch])/255.0f; v3 = float(srcPtr[offsetB2+pitch+3])/255.0f;
dstPtr[offsetB] = a*v0 + b*v1 + c*v2 + d*v3;
} else {
dstPtr[offsetR] = 0.0f;
dstPtr[offsetG] = 0.0f;
dstPtr[offsetB] = 0.0f;
}
}
/*
__global__ void undistortKernel( unsigned char *srcPtr, unsigned char *dstPtr, unsigned int width, unsigned int height, float *calibDataDev)
{
unsigned int xi = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int yi = blockIdx.y*blockDim.y+threadIdx.y;
int offset = xi + yi*width;
__shared__ float K[9];
__shared__ float iK[9];
__shared__ float kc[5];
for (int i = 0; i < 9; i++) {
K[i] = calibDataDev[i];
iK[i] = calibDataDev[i+9];
}
for (int i = 0; i < 5; i++) kc[i] = calibDataDev[i+9*2];
// convert to float coordinates
float pu[2]; pu[0] = (float)xi; pu[1] = (float)yi;
float pd[2];
pd[0] = iK[0]*pu[0] + iK[1]*pu[1] + iK[2];
pd[1] = iK[3]*pu[0] + iK[4]*pu[1] + iK[5];
// distort point
float r2 = (pd[0]*pd[0])+(pd[1]*pd[1]); float r4 = r2*r2; float r6 = r4 * r2;
float radialDist = 1 + kc[0]*r2 + kc[1]*r4 + kc[4]*r6;
pd[0] *= radialDist;
pd[1] *= radialDist;
// define sampling point in distorted image
float p[2];
p[0] = K[0]*pd[0] + K[1]*pd[1] + K[2];
p[1] = K[3]*pd[0] + K[4]*pd[1] + K[5];
// bi-linear interpolation
int xdi = (int)p[0];
int ydi = (int)p[1];
if (xdi >= 0 && ydi >= 0 && xdi < width-2 && ydi < height-2) {
int offset2 = xdi+ydi*width;
float v0 = (float)srcPtr[offset2]; float v1 = (float)srcPtr[offset2+1];
float v2 = (float)srcPtr[offset2+width]; float v3 = (float)srcPtr[offset2+width+1];
float fx = p[0] - xdi;
float fy = p[1] - ydi;
dstPtr[offset] = (unsigned char)((1-fx)*(1-fy)*v0 + fx*(1-fy)*v1 + (1-fx)*fy*v2 + fx*fy*v3);
} else {
dstPtr[offset] = 0;
}
}
*/
extern "C" void undistortCuda(Image2 *distorted, Image2 *undistorted, float *calibDataDev)
{
if (distorted == 0 || distorted->devPtr == NULL || undistorted == 0 || undistorted->devPtr == NULL || calibDataDev == NULL) return;
float *srcPtr = (float*)distorted->devPtr;
float *dstPtr= (float*)undistorted->devPtr;
dim3 cudaBlockSize(32,30,1);
dim3 cudaGridSize(distorted->width/cudaBlockSize.x,distorted->height/cudaBlockSize.y,1);
hipLaunchKernelGGL(( undistortHdrKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,distorted->cudaStream, srcPtr,dstPtr,(unsigned int)distorted->width,(unsigned int)distorted->height,calibDataDev);
}
extern "C" void undistortRGBCuda(Image2 *distortedRGB, Image2 *undistortedRGB, float *calibDataDev)
{
if (distortedRGB == 0 || distortedRGB->devPtr == NULL || undistortedRGB == 0 || undistortedRGB->devPtr == NULL || calibDataDev == NULL) return;
unsigned char *srcPtr = (unsigned char*)distortedRGB->devPtr;
float *dstPtr= (float*)undistortedRGB->devPtr;
dim3 cudaBlockSize(32,30,1);
dim3 cudaGridSize(distortedRGB->width/cudaBlockSize.x,distortedRGB->height/cudaBlockSize.y,1);
hipLaunchKernelGGL(( undistortHdrRGBKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,distortedRGB->cudaStream, srcPtr,dstPtr,(unsigned int)distortedRGB->width,(unsigned int)distortedRGB->height,calibDataDev);
}
/*
extern "C" void undistortFromRGBCuda(Image2 *distortedRGB, Image2 *undistorted, float *calibDataDev)
{
if (distortedRGB == 0 || distortedRGB->devPtr == NULL || undistorted == 0 || undistorted->devPtr == NULL || calibDataDev == NULL) return;
unsigned char *srcPtr = (unsigned char*)distorted->devPtr;
float *dstPtr= (float*)undistorted->devPtr;
dim3 cudaBlockSize(32,32,1);
dim3 cudaGridSize(undistorted->width/cudaBlockSize.x,undistorted->height/cudaBlockSize.y,1);
hipLaunchKernelGGL(( undistortHdrRGBKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,distortedRGB->cudaStream, srcPtr,dstPtr,(unsigned int)undistorted->width,(unsigned int)undistorted->height,calibDataDev);
}
*/
| 9f753b6b67e77094a37ad6b23c6c63ff299ff9a0.cu | /*
Copyright 2016 Tommi M. Tykkälä
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <image2/Image2.h>
//#include <types.h>
#include "hostUtils.h"
#include <cwchar>
/*
__global__ void undistortHdrRGBKernel( unsigned char *srcPtr, float *dstPtr, unsigned int width, unsigned int height, float *calibDataDev)
{
unsigned int xi = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int yi = blockIdx.y*blockDim.y+threadIdx.y;
int offset = xi + yi*width;
int widthRGB = width*3;
float K[9];
float iK[9];
float kc[5];
for (int i = 0; i < 9; i++) {
K[i] = calibDataDev[i];
iK[i] = calibDataDev[i+9];
}
for (int i = 0; i < 5; i++) kc[i] = calibDataDev[i+9*2];
// convert to float coordinates
float pu[2]; pu[0] = (float)xi; pu[1] = (float)yi;
float pd[2];
pd[0] = iK[0]*pu[0] + iK[1]*pu[1] + iK[2];
pd[1] = iK[3]*pu[0] + iK[4]*pu[1] + iK[5];
// distort point
float r2 = (pd[0]*pd[0])+(pd[1]*pd[1]); float r4 = r2*r2; float r6 = r4 * r2;
float radialDist = 1 + kc[0]*r2 + kc[1]*r4 + kc[4]*r6;
pd[0] *= radialDist;
pd[1] *= radialDist;
// define sampling point in distorted image
float p[2];
p[0] = K[0]*pd[0] + K[1]*pd[1] + K[2];
p[1] = K[3]*pd[0] + K[4]*pd[1] + K[5];
// bi-linear interpolation
int xdi = (int)p[0];
int ydi = (int)p[1];
if (xdi >= 0 && ydi >= 0 && xdi < width-2 && ydi < height-2) {
int offset2 = xdi*3+ydi*widthRGB;
unsigned short grayVal =
float v0 = srcPtr[offset2]; float v1 = srcPtr[offset2+3];
float v2 = srcPtr[offset2+widthRGB]; float v3 = srcPtr[offset2+widthRGB+3];
float fx = p[0] - xdi;
float fy = p[1] - ydi;
dstPtr[offset] = ((1-fx)*(1-fy)*v0 + fx*(1-fy)*v1 + (1-fx)*fy*v2 + fx*fy*v3)/255.0f;
} else {
dstPtr[offset] = 0.0f;
}
}*/
__global__ void undistortHdrKernel( float *srcPtr, float *dstPtr, unsigned int width, unsigned int height, float *calibDataDev)
{
unsigned int xi = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int yi = blockIdx.y*blockDim.y+threadIdx.y;
int offset = xi + yi*width;
float K[9];
float iK[9];
float kc[5];
for (int i = 0; i < 9; i++) {
K[i] = calibDataDev[i];
iK[i] = calibDataDev[i+9];
}
for (int i = 0; i < 5; i++) kc[i] = calibDataDev[i+9*2];
// convert to float coordinates
float pu[2]; pu[0] = (float)xi; pu[1] = (float)yi;
float pd[2];
pd[0] = iK[0]*pu[0] + iK[1]*pu[1] + iK[2];
pd[1] = iK[3]*pu[0] + iK[4]*pu[1] + iK[5];
// distort point
float r2 = (pd[0]*pd[0])+(pd[1]*pd[1]); float r4 = r2*r2; float r6 = r4 * r2;
float radialDist = 1 + kc[0]*r2 + kc[1]*r4 + kc[4]*r6;
pd[0] *= radialDist;
pd[1] *= radialDist;
// define sampling point in distorted image
float p[2];
p[0] = K[0]*pd[0] + K[1]*pd[1] + K[2];
p[1] = K[3]*pd[0] + K[4]*pd[1] + K[5];
// bi-linear interpolation
int xdi = (int)p[0];
int ydi = (int)p[1];
if (xdi >= 0 && ydi >= 0 && xdi < width-2 && ydi < height-2) {
int offset2 = xdi+ydi*width;
float v0 = srcPtr[offset2]; float v1 = srcPtr[offset2+1];
float v2 = srcPtr[offset2+width]; float v3 = srcPtr[offset2+width+1];
float fx = p[0] - xdi;
float fy = p[1] - ydi;
dstPtr[offset] = ((1-fx)*(1-fy)*v0 + fx*(1-fy)*v1 + (1-fx)*fy*v2 + fx*fy*v3)/255.0f;
} else {
dstPtr[offset] = 0.0f;
}
}
__global__ void undistortHdrRGBKernel( unsigned char *srcPtr, float *dstPtr, unsigned int width, unsigned int height, float *calibDataDev)
{
unsigned int xi = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int yi = blockIdx.y*blockDim.y+threadIdx.y;
float K[9];
float iK[9];
float kc[5];
int pitch = width*3;
int offsetR = 3*xi + yi*pitch;
int offsetG = offsetR+1;
int offsetB = offsetR+2;
for (int i = 0; i < 9; i++) {
K[i] = calibDataDev[i];
iK[i] = calibDataDev[i+9];
}
for (int i = 0; i < 5; i++) kc[i] = calibDataDev[i+9*2];
// convert to float coordinates
float pu[2]; pu[0] = (float)xi; pu[1] = (float)yi;
float pd[2];
pd[0] = iK[0]*pu[0] + iK[1]*pu[1] + iK[2];
pd[1] = iK[3]*pu[0] + iK[4]*pu[1] + iK[5];
// distort point
float r2 = (pd[0]*pd[0])+(pd[1]*pd[1]); float r4 = r2*r2; float r6 = r4 * r2;
float radialDist = 1 + kc[0]*r2 + kc[1]*r4 + kc[4]*r6;
pd[0] *= radialDist;
pd[1] *= radialDist;
// define sampling point in distorted image
float p[2];
p[0] = K[0]*pd[0] + K[1]*pd[1] + K[2];
p[1] = K[3]*pd[0] + K[4]*pd[1] + K[5];
// bi-linear interpolation
int xdi = (int)p[0];
int ydi = (int)p[1];
if (xdi >= 0 && ydi >= 0 && xdi < width-2 && ydi < height-2) {
int offsetR2 = 3*xdi+0 + ydi*pitch;
int offsetG2 = offsetR2+1;
int offsetB2 = offsetR2+2;
float fx = p[0] - xdi;
float fy = p[1] - ydi;
float a = (1-fx)*(1-fy);
float b = fx*(1-fy);
float c = (1-fx)*fy;
float d = fx*fy;
float v0 = float(srcPtr[offsetR2])/255.0f; float v1 = float(srcPtr[offsetR2+3])/255.0f;
float v2 = float(srcPtr[offsetR2+pitch])/255.0f; float v3 = float(srcPtr[offsetR2+pitch+3])/255.0f;
dstPtr[offsetR] = a*v0 + b*v1 + c*v2 + d*v3;
v0 = float(srcPtr[offsetG2])/255.0f; v1 = float(srcPtr[offsetG2+3])/255.0f;
v2 = float(srcPtr[offsetG2+pitch])/255.0f; v3 = float(srcPtr[offsetG2+pitch+3])/255.0f;
dstPtr[offsetG] = a*v0 + b*v1 + c*v2 + d*v3;
v0 = float(srcPtr[offsetB2])/255.0f; v1 = float(srcPtr[offsetB2+3])/255.0f;
v2 = float(srcPtr[offsetB2+pitch])/255.0f; v3 = float(srcPtr[offsetB2+pitch+3])/255.0f;
dstPtr[offsetB] = a*v0 + b*v1 + c*v2 + d*v3;
} else {
dstPtr[offsetR] = 0.0f;
dstPtr[offsetG] = 0.0f;
dstPtr[offsetB] = 0.0f;
}
}
/*
__global__ void undistortKernel( unsigned char *srcPtr, unsigned char *dstPtr, unsigned int width, unsigned int height, float *calibDataDev)
{
unsigned int xi = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int yi = blockIdx.y*blockDim.y+threadIdx.y;
int offset = xi + yi*width;
__shared__ float K[9];
__shared__ float iK[9];
__shared__ float kc[5];
for (int i = 0; i < 9; i++) {
K[i] = calibDataDev[i];
iK[i] = calibDataDev[i+9];
}
for (int i = 0; i < 5; i++) kc[i] = calibDataDev[i+9*2];
// convert to float coordinates
float pu[2]; pu[0] = (float)xi; pu[1] = (float)yi;
float pd[2];
pd[0] = iK[0]*pu[0] + iK[1]*pu[1] + iK[2];
pd[1] = iK[3]*pu[0] + iK[4]*pu[1] + iK[5];
// distort point
float r2 = (pd[0]*pd[0])+(pd[1]*pd[1]); float r4 = r2*r2; float r6 = r4 * r2;
float radialDist = 1 + kc[0]*r2 + kc[1]*r4 + kc[4]*r6;
pd[0] *= radialDist;
pd[1] *= radialDist;
// define sampling point in distorted image
float p[2];
p[0] = K[0]*pd[0] + K[1]*pd[1] + K[2];
p[1] = K[3]*pd[0] + K[4]*pd[1] + K[5];
// bi-linear interpolation
int xdi = (int)p[0];
int ydi = (int)p[1];
if (xdi >= 0 && ydi >= 0 && xdi < width-2 && ydi < height-2) {
int offset2 = xdi+ydi*width;
float v0 = (float)srcPtr[offset2]; float v1 = (float)srcPtr[offset2+1];
float v2 = (float)srcPtr[offset2+width]; float v3 = (float)srcPtr[offset2+width+1];
float fx = p[0] - xdi;
float fy = p[1] - ydi;
dstPtr[offset] = (unsigned char)((1-fx)*(1-fy)*v0 + fx*(1-fy)*v1 + (1-fx)*fy*v2 + fx*fy*v3);
} else {
dstPtr[offset] = 0;
}
}
*/
extern "C" void undistortCuda(Image2 *distorted, Image2 *undistorted, float *calibDataDev)
{
if (distorted == 0 || distorted->devPtr == NULL || undistorted == 0 || undistorted->devPtr == NULL || calibDataDev == NULL) return;
float *srcPtr = (float*)distorted->devPtr;
float *dstPtr= (float*)undistorted->devPtr;
dim3 cudaBlockSize(32,30,1);
dim3 cudaGridSize(distorted->width/cudaBlockSize.x,distorted->height/cudaBlockSize.y,1);
undistortHdrKernel<<<cudaGridSize,cudaBlockSize,0,distorted->cudaStream>>>(srcPtr,dstPtr,(unsigned int)distorted->width,(unsigned int)distorted->height,calibDataDev);
}
extern "C" void undistortRGBCuda(Image2 *distortedRGB, Image2 *undistortedRGB, float *calibDataDev)
{
if (distortedRGB == 0 || distortedRGB->devPtr == NULL || undistortedRGB == 0 || undistortedRGB->devPtr == NULL || calibDataDev == NULL) return;
unsigned char *srcPtr = (unsigned char*)distortedRGB->devPtr;
float *dstPtr= (float*)undistortedRGB->devPtr;
dim3 cudaBlockSize(32,30,1);
dim3 cudaGridSize(distortedRGB->width/cudaBlockSize.x,distortedRGB->height/cudaBlockSize.y,1);
undistortHdrRGBKernel<<<cudaGridSize,cudaBlockSize,0,distortedRGB->cudaStream>>>(srcPtr,dstPtr,(unsigned int)distortedRGB->width,(unsigned int)distortedRGB->height,calibDataDev);
}
/*
extern "C" void undistortFromRGBCuda(Image2 *distortedRGB, Image2 *undistorted, float *calibDataDev)
{
if (distortedRGB == 0 || distortedRGB->devPtr == NULL || undistorted == 0 || undistorted->devPtr == NULL || calibDataDev == NULL) return;
unsigned char *srcPtr = (unsigned char*)distorted->devPtr;
float *dstPtr= (float*)undistorted->devPtr;
dim3 cudaBlockSize(32,32,1);
dim3 cudaGridSize(undistorted->width/cudaBlockSize.x,undistorted->height/cudaBlockSize.y,1);
undistortHdrRGBKernel<<<cudaGridSize,cudaBlockSize,0,distortedRGB->cudaStream>>>(srcPtr,dstPtr,(unsigned int)undistorted->width,(unsigned int)undistorted->height,calibDataDev);
}
*/
|
05f700d66c5a2843d8fc9dc01ec46321faa20d88.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Matrix Inversion
* Group F: M. Lechner, P. Knbel, J. Lvhall
*
* reduction to check if matrix is ID
*/
#include "includes.h"
/* Sets the diagonal to 1 */
__global__
void identity(int n, float *mat)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n)
{
if(j==i)
{
mat[i*n + j] = 1;
}
// printf("%d, %d\n",i,j);
}
}
/* Sets all values to 0.0f (because of float data-type memset does not work) */
__global__
void setzero(int n, float *mat)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n)
{
mat[i*n + j] = 0;
}
}
/* Accumulate function checks if current value and position matches to the definition
of the identity matrix, i.e. 1s in the diagonal, 0s elsewhere.
Returns 0 if value and position matches,
returns 1 if there is a conflict, i.e. matrix is not identity matrix */
__device__
void accumulate(float mat, int i, int n, int &ret)
{
int x=i/n;
int y = i%n;
if(y==x)
{
if(mat>1.1 || mat<0.9)
ret = 1;
else ret = 0;
}
else
{
if(mat>0.1 || mat < -0.1)
ret = 1;
else ret = 0;
}
}
/* Allocates identity matrix of dimension n by n on the device */
float* get_dev_identity_matrix(int n)
{
int size = n*n;
float *d_mat;
if(hipMalloc((void **)&d_mat, size*sizeof(float)) != hipSuccess)
{
return NULL;
}
if(hipMemset(d_mat, 0, size*sizeof(float)) != hipSuccess)
{
return NULL;
}
/* Let 16 by 16 threads run in parallel per block */
dim3 threadsPerBlock(16, 16);
int dimx = n / threadsPerBlock.x;
int dimy = n / threadsPerBlock.y;
/* Is n not divisible by 16 -> increment n by 1 to process the remaining elements */
if( n > dimx * threadsPerBlock.x)
dimx++;
if( n > dimy * threadsPerBlock.y)
dimy++;
dim3 numBlocks(dimx, dimy);
// Setting all to 0 and the diagonal to 1 is done with 2 kernels, because of caching and memory access
// Set all entries to 0 first, cannot use hipMemset because of float (32 bit) data type
hipLaunchKernelGGL(( setzero), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, n, d_mat);
// Set the elements in the diagonal to 1
hipLaunchKernelGGL(( identity), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, n, d_mat);
return d_mat;
}
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
template <unsigned int blockSize>
__global__ void
reduce6(float *g_idata, int *g_odata, int size, int n)
{
int *sdata = SharedMemory<int>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
int mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < size)
{
int acc=0;
accumulate(g_idata[i], i, n, acc);
mySum += acc;
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (i + blockSize < size)
{
int acc2=0;
accumulate(g_idata[i+blockSize], i+blockSize, n, acc2);
mySum += acc2;
}
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += __shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
#ifndef MIN
#define MIN(x,y) ((x < y) ? x : y)
#endif
unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
////////////////////////////////////////////////////////////////////////////////
// Compute the number of threads and blocks to use for the given reduction kernel
// For the kernels >= 3, we set threads / block to the minimum of maxThreads and
// n/2. For kernels < 3, we set to the minimum of maxThreads and n. For kernel
// 6, we observe the maximum specified number of blocks, because each thread in
// that kernel can process a variable number of elements.
////////////////////////////////////////////////////////////////////////////////
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
//get device capability, to avoid block/grid size exceed the upper bound
hipDeviceProp_t prop;
int device;
hipGetDevice(&device);
hipGetDeviceProperties(&prop, device);
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
if ((float)threads*blocks > (float)prop.maxGridSize[0] * prop.maxThreadsPerBlock)
{
printf("n is too large, please choose a smaller number!\n");
}
if (blocks > prop.maxGridSize[0])
{
printf("Grid size <%d> exceeds the device capability <%d>, set block size as %d (original %d)\n",
blocks, prop.maxGridSize[0], threads*2, threads);
blocks /= 2;
threads *= 2;
}
if (whichKernel == 6)
{
blocks = MIN(maxBlocks, blocks);
}
}
void reduce(int n, int threads, int blocks, float *d_idata, int *d_odata)
{
int size = n*n;
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(int) : threads * sizeof(int);
switch (threads)
{
case 512:
hipLaunchKernelGGL(( reduce6< 512>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n);
break;
case 256:
hipLaunchKernelGGL(( reduce6< 256>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n);
break;
case 128:
hipLaunchKernelGGL(( reduce6< 128>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n);
break;
case 64:
hipLaunchKernelGGL(( reduce6< 64>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n);
break;
case 32:
hipLaunchKernelGGL(( reduce6< 32>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n);
break;
case 16:
hipLaunchKernelGGL(( reduce6< 16>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n);
break;
case 8:
hipLaunchKernelGGL(( reduce6< 8>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n);
break;
case 4:
hipLaunchKernelGGL(( reduce6< 4>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n);
break;
case 2:
hipLaunchKernelGGL(( reduce6< 2>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n);
break;
case 1:
hipLaunchKernelGGL(( reduce6< 1>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size,n);
break;
}
}
int identity_matrix(float* d_mat, int n)
{
// Allocate memory for partial results
int *d_sum;
gpuErrchk(hipMalloc((void**)&d_sum, n*n* sizeof(int)))
// Set all partial results to 0
gpuErrchk(hipMemset(d_sum, 0,n*n* sizeof(int)))
// Define grid
int numBlocks=4;
int numThreads = 32;
//getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
getNumBlocksAndThreads(6, n*n, 100, 32, numBlocks, numThreads);
// Launches kernel
reduce(n, numThreads, numBlocks, d_mat, d_sum);
// Allocate block size of memory on host
int *h_sum = (int *)malloc(numBlocks* sizeof(int));
// Copy last block to host
gpuErrchk(hipMemcpy(h_sum, d_sum, numBlocks* sizeof(int), hipMemcpyDeviceToHost))
// Process last block
int ret =0;
for(int i=0;i<numBlocks;i++)
{
ret+= h_sum[i];
}
// Free allocated resources
hipFree(d_sum);
free(h_sum);
return !ret;
}
| 05f700d66c5a2843d8fc9dc01ec46321faa20d88.cu | /* Matrix Inversion
* Group F: M. Lechner, P. Knöbel, J. Lövhall
*
* reduction to check if matrix is ID
*/
#include "includes.h"
/* Sets the diagonal to 1 */
__global__
void identity(int n, float *mat)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n)
{
if(j==i)
{
mat[i*n + j] = 1;
}
// printf("%d, %d\n",i,j);
}
}
/* Sets all values to 0.0f (because of float data-type memset does not work) */
__global__
void setzero(int n, float *mat)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n)
{
mat[i*n + j] = 0;
}
}
/* Accumulate function checks if current value and position matches to the definition
of the identity matrix, i.e. 1s in the diagonal, 0s elsewhere.
Returns 0 if value and position matches,
returns 1 if there is a conflict, i.e. matrix is not identity matrix */
__device__
void accumulate(float mat, int i, int n, int &ret)
{
int x=i/n;
int y = i%n;
if(y==x)
{
if(mat>1.1 || mat<0.9)
ret = 1;
else ret = 0;
}
else
{
if(mat>0.1 || mat < -0.1)
ret = 1;
else ret = 0;
}
}
/* Allocates identity matrix of dimension n by n on the device */
float* get_dev_identity_matrix(int n)
{
int size = n*n;
float *d_mat;
if(cudaMalloc((void **)&d_mat, size*sizeof(float)) != cudaSuccess)
{
return NULL;
}
if(cudaMemset(d_mat, 0, size*sizeof(float)) != cudaSuccess)
{
return NULL;
}
/* Let 16 by 16 threads run in parallel per block */
dim3 threadsPerBlock(16, 16);
int dimx = n / threadsPerBlock.x;
int dimy = n / threadsPerBlock.y;
/* Is n not divisible by 16 -> increment n by 1 to process the remaining elements */
if( n > dimx * threadsPerBlock.x)
dimx++;
if( n > dimy * threadsPerBlock.y)
dimy++;
dim3 numBlocks(dimx, dimy);
// Setting all to 0 and the diagonal to 1 is done with 2 kernels, because of caching and memory access
// Set all entries to 0 first, cannot use cudaMemset because of float (32 bit) data type
setzero<<<numBlocks, threadsPerBlock>>>(n, d_mat);
// Set the elements in the diagonal to 1
identity<<<numBlocks, threadsPerBlock>>>(n, d_mat);
return d_mat;
}
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T *()
{
extern __shared__ int __smem[];
return (T *)__smem;
}
__device__ inline operator const T *() const
{
extern __shared__ int __smem[];
return (T *)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double *()
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
__device__ inline operator const double *() const
{
extern __shared__ double __smem_d[];
return (double *)__smem_d;
}
};
template <unsigned int blockSize>
__global__ void
reduce6(float *g_idata, int *g_odata, int size, int n)
{
int *sdata = SharedMemory<int>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
int mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < size)
{
int acc=0;
accumulate(g_idata[i], i, n, acc);
mySum += acc;
// ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
if (i + blockSize < size)
{
int acc2=0;
accumulate(g_idata[i+blockSize], i+blockSize, n, acc2);
mySum += acc2;
}
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if ((blockSize >= 512) && (tid < 256))
{
sdata[tid] = mySum = mySum + sdata[tid + 256];
}
__syncthreads();
if ((blockSize >= 256) &&(tid < 128))
{
sdata[tid] = mySum = mySum + sdata[tid + 128];
}
__syncthreads();
if ((blockSize >= 128) && (tid < 64))
{
sdata[tid] = mySum = mySum + sdata[tid + 64];
}
__syncthreads();
#if (__CUDA_ARCH__ >= 300 )
if ( tid < 32 )
{
// Fetch final intermediate sum from 2nd warp
if (blockSize >= 64) mySum += sdata[tid + 32];
// Reduce final warp using shuffle
for (int offset = warpSize/2; offset > 0; offset /= 2)
{
mySum += __shfl_down(mySum, offset);
}
}
#else
// fully unroll reduction within a single warp
if ((blockSize >= 64) && (tid < 32))
{
sdata[tid] = mySum = mySum + sdata[tid + 32];
}
__syncthreads();
if ((blockSize >= 32) && (tid < 16))
{
sdata[tid] = mySum = mySum + sdata[tid + 16];
}
__syncthreads();
if ((blockSize >= 16) && (tid < 8))
{
sdata[tid] = mySum = mySum + sdata[tid + 8];
}
__syncthreads();
if ((blockSize >= 8) && (tid < 4))
{
sdata[tid] = mySum = mySum + sdata[tid + 4];
}
__syncthreads();
if ((blockSize >= 4) && (tid < 2))
{
sdata[tid] = mySum = mySum + sdata[tid + 2];
}
__syncthreads();
if ((blockSize >= 2) && ( tid < 1))
{
sdata[tid] = mySum = mySum + sdata[tid + 1];
}
__syncthreads();
#endif
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
#ifndef MIN
#define MIN(x,y) ((x < y) ? x : y)
#endif
unsigned int nextPow2(unsigned int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
////////////////////////////////////////////////////////////////////////////////
// Compute the number of threads and blocks to use for the given reduction kernel
// For the kernels >= 3, we set threads / block to the minimum of maxThreads and
// n/2. For kernels < 3, we set to the minimum of maxThreads and n. For kernel
// 6, we observe the maximum specified number of blocks, because each thread in
// that kernel can process a variable number of elements.
////////////////////////////////////////////////////////////////////////////////
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
//get device capability, to avoid block/grid size exceed the upper bound
cudaDeviceProp prop;
int device;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
if ((float)threads*blocks > (float)prop.maxGridSize[0] * prop.maxThreadsPerBlock)
{
printf("n is too large, please choose a smaller number!\n");
}
if (blocks > prop.maxGridSize[0])
{
printf("Grid size <%d> exceeds the device capability <%d>, set block size as %d (original %d)\n",
blocks, prop.maxGridSize[0], threads*2, threads);
blocks /= 2;
threads *= 2;
}
if (whichKernel == 6)
{
blocks = MIN(maxBlocks, blocks);
}
}
void reduce(int n, int threads, int blocks, float *d_idata, int *d_odata)
{
int size = n*n;
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(int) : threads * sizeof(int);
switch (threads)
{
case 512:
reduce6< 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n);
break;
case 256:
reduce6< 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n);
break;
case 128:
reduce6< 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n);
break;
case 64:
reduce6< 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n);
break;
case 32:
reduce6< 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n);
break;
case 16:
reduce6< 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n);
break;
case 8:
reduce6< 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n);
break;
case 4:
reduce6< 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n);
break;
case 2:
reduce6< 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n);
break;
case 1:
reduce6< 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size,n);
break;
}
}
int identity_matrix(float* d_mat, int n)
{
// Allocate memory for partial results
int *d_sum;
gpuErrchk(cudaMalloc((void**)&d_sum, n*n* sizeof(int)))
// Set all partial results to 0
gpuErrchk(cudaMemset(d_sum, 0,n*n* sizeof(int)))
// Define grid
int numBlocks=4;
int numThreads = 32;
//getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
getNumBlocksAndThreads(6, n*n, 100, 32, numBlocks, numThreads);
// Launches kernel
reduce(n, numThreads, numBlocks, d_mat, d_sum);
// Allocate block size of memory on host
int *h_sum = (int *)malloc(numBlocks* sizeof(int));
// Copy last block to host
gpuErrchk(cudaMemcpy(h_sum, d_sum, numBlocks* sizeof(int), cudaMemcpyDeviceToHost))
// Process last block
int ret =0;
for(int i=0;i<numBlocks;i++)
{
ret+= h_sum[i];
}
// Free allocated resources
cudaFree(d_sum);
free(h_sum);
return !ret;
}
|
df39e3176b6d16cfeebb0b283cd296f01bfd02b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <chrono>
#include <math.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/transform.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/count.h>
using namespace std;
static const int BLOCK_SIZE = 256;
// Timer
class Timer {
typedef std::chrono::time_point<std::chrono::high_resolution_clock> Clock;
long long count;
bool running;
Clock prev_start_;
Clock Now() {
return std::chrono::high_resolution_clock::now();
}
public:
void Start() {
running = true;
prev_start_ = Now();
}
void Pause() {
if (running) {
running = false;
auto diff = Now() - prev_start_;
count += std::chrono::duration_cast<std::chrono::nanoseconds>(diff).count();
}
}
void Reset() {
running = false;
count = 0;
}
long long get_count() {
return count;
}
Timer() { Reset(); }
};
// for gen random vector 2
struct int2prg {
__host__ __device__
int2 operator()(const int n) const {
thrust::default_random_engine rng;
thrust::uniform_int_distribution<int> dist(-2, 1);
rng.discard(n);
return make_int2(dist(rng), dist(rng));
}
};
// for gen random vector 4
struct int4prg {
__host__ __device__
int4 operator()(const int n) const {
thrust::default_random_engine rng;
thrust::uniform_int_distribution<int> dist(-2, 1);
rng.discard(n);
return make_int4(dist(rng), dist(rng), dist(rng), dist(rng));
}
};
struct poseStep {
float4 s4;
float2 s2;
};
struct poseBound {
float2 tx;
float2 ty;
float2 tz;
float2 rx;
float2 rz0;
float2 rz1;
};
struct isValidTest {
__host__ __device__
bool operator()(const thrust::tuple<float4, float2, bool>& a ) {
return (thrust::get<2>(a) == false);
};
};
__global__
void expand_kernel(float4* Poses4, float2* Poses2, const int numPoses, const int newSize) {
const int tIdx = threadIdx.x;
const int Idx = blockIdx.x * 256 + tIdx;
if (Idx >= numPoses)
return;
for (int i = Idx + numPoses; i < newSize; i += numPoses) {
Poses4[i] = Poses4[Idx];
Poses2[i] = Poses2[Idx];
}
}
__global__
void add_kernel(float4* Poses4, float2* Poses2, int4* rand4, int2* rand2, bool* isValid,
const float4 s4, const float2 s2, const float2 btx, const float2 bty, const float2 btz,
const float2 brx, const float2 brz0, const float2 brz1, const float2 marker, const int numPoses, const int expandSize) {
const int tIdx = threadIdx.x;
const int Idx = blockIdx.x * 256 + tIdx;
if (Idx >= expandSize)
return;
float isPlus;
// mem
float Otx = Poses4[Idx + numPoses].x;
float Oty = Poses4[Idx + numPoses].y;
float Otz = Poses4[Idx + numPoses].z;
float Orx = Poses4[Idx + numPoses].w;
float Orz0 = Poses2[Idx + numPoses].x;
float Orz1 = Poses2[Idx + numPoses].y;
// tx ty
float weight = Otz + sqrtf(marker.x*marker.x + marker.y*marker.y) * sinf(Orx);
Poses4[Idx + numPoses].x = Otx + float(rand4[Idx].x) * weight * s4.x;
Poses4[Idx + numPoses].y = Oty + float(rand4[Idx].y) * weight * s4.y;
// tz
isPlus = float(rand4[Idx].z);
float vtz = 1 - isPlus * s4.z * Otz;
Poses4[Idx + numPoses].z = Otz + isPlus * s4.z * (Otz * Otz) / vtz;
// rx
isPlus = float(rand4[Idx].w);
float sinrx = 2 - 1/(1/(2 - sinf(Orx)) + isPlus*s4.w);
Poses4[Idx + numPoses].w = Orx + isPlus * isPlus * (asinf(sinrx) - Orx);
// rz0 rz1
weight = sqrtf(btz.x * btz.y);
Poses2[Idx + numPoses].x = Orz0 + float(rand2[Idx].x)*s2.x*weight;
Poses2[Idx + numPoses].y = Orz1 + float(rand2[Idx].y)*s2.y*weight;
// condition
isValid[Idx + numPoses] = (vtz != 0) & (abs(sinrx) <= 1) & (Poses4[Idx + numPoses].z >= btz.x) & (Poses4[Idx + numPoses].z <= btz.y) & (Poses4[Idx + numPoses].w >= brx.x) & (Poses4[Idx + numPoses].w <= brx.y);
}
void randVector(thrust::device_vector<int4>* rand4, thrust::device_vector<int2>* rand2, const int& num) {
thrust::counting_iterator<int> i04(0);
thrust::counting_iterator<int> i02(22);
thrust::transform(i04, i04 + num, rand4->begin(), int4prg());
thrust::transform(i02, i02 + num, rand2->begin(), int2prg());
}
void expandPoses(thrust::device_vector<float4>* Poses4, thrust::device_vector<float2>* Poses2,
const float& factor, poseStep* step, const poseBound bound, const float2 marker, int* numPoses) {
// number of expand points
const int numPoints = 80;
int expandSize = (*numPoses) * numPoints;
int newSize = (*numPoses) * (numPoints + 1);
// decrease step
step->s4.x /= factor;
step->s4.y /= factor;
step->s4.z /= factor;
step->s4.w /= factor;
step->s2.x /= factor;
step->s2.y /= factor;
// gen random set
thrust::device_vector<int4> rand4(expandSize);
thrust::device_vector<int2> rand2(expandSize);
randVector(&rand4, &rand2, expandSize);
// expand origin set
const int BLOCK_NUM0 = ((*numPoses) - 1) / 256 + 1;
Poses4->resize(newSize);
Poses2->resize(newSize);
expand_kernel << < BLOCK_NUM0, 256 >> > (thrust::raw_pointer_cast(Poses4->data()), thrust::raw_pointer_cast(Poses2->data()), *numPoses, newSize);
// add finer delta
const int BLOCK_NUM1 = (expandSize - 1) / 256 + 1;
thrust::device_vector<bool> isValid(newSize, true);
hipLaunchKernelGGL(( add_kernel) , dim3(BLOCK_NUM1), dim3(256) , 0, 0, thrust::raw_pointer_cast(Poses4->data()), thrust::raw_pointer_cast(Poses2->data()),
thrust::raw_pointer_cast(rand4.data()), thrust::raw_pointer_cast(rand2.data()),
thrust::raw_pointer_cast(isValid.data()), step->s4, step->s2,
bound.tx, bound.ty, bound.tz, bound.rx, bound.rz0, bound.rz1, marker, *numPoses, expandSize);
// remove invalid
typedef thrust::tuple< thrust::device_vector< float4 >::iterator, thrust::device_vector< float2 >::iterator, thrust::device_vector< bool >::iterator > TupleIt;
typedef thrust::zip_iterator< TupleIt > ZipIt;
ZipIt Zend = thrust::remove_if(
thrust::make_zip_iterator(thrust::make_tuple(Poses4->begin(), Poses2->begin(), isValid.begin())),
thrust::make_zip_iterator(thrust::make_tuple(Poses4->end(), Poses2->end(), isValid.end())),
isValidTest()
);
Poses4->erase(thrust::get<0>(Zend.get_iterator_tuple()), Poses4->end());
Poses2->erase(thrust::get<1>(Zend.get_iterator_tuple()), Poses2->end());
*numPoses = Poses4->size();
}
int main() {
// read poses
const float delta = 0.25;
int numPoses = 25887;
int numPoints = 80;
float4 *Pose4 = new float4[numPoses];
float2 *Pose2 = new float2[numPoses];
int4 *rand4 = new int4[numPoses*numPoints];
int2 *rand2 = new int2[numPoses*numPoints];
ifstream inFile("poses.txt");
if (!inFile)
return 0;
for (int i = 0; i < numPoses; i++) {
inFile >> Pose4[i].x;
inFile >> Pose4[i].y;
inFile >> Pose4[i].z;
inFile >> Pose4[i].w;
inFile >> Pose2[i].x;
inFile >> Pose2[i].y;
}
inFile.close();
cout << "read pose complete!" << endl;
// read rand
//ifstream inFile1("rand.txt");
//if (!inFile1)
// return 0;
//for (int i = 0; i < numPoses*numPoints; i++) {
// float tmp;
// inFile1 >> tmp;
// rand4[i].x = int(tmp);
// inFile1 >> tmp;
// rand4[i].y = int(tmp);
// inFile1 >> tmp;
// rand4[i].z = int(tmp);
// inFile1 >> tmp;
// rand4[i].w = int(tmp);
// inFile1 >> tmp;
// rand2[i].x = int(tmp);
// inFile1 >> tmp;
// rand2[i].y = int(tmp);
//}
//inFile1.close();
//cout << "read rand complete!" << endl;
cout << "original " << numPoses << " poses." << endl;
// create parameter
poseStep step;
poseBound bound;
float2 marker;
step.s4.x = 0.036084;
step.s4.y = 0.036084;
step.s4.z = 0.036084;
step.s4.w = 0.036084;
step.s2.x = 0.072169;
step.s2.y = 0.072169;
bound.tx = make_float2(-2.7, 2.7);
bound.ty = make_float2(-1.9, 1.9);
bound.tz = make_float2(3, 8);
bound.rx = make_float2(0, 1.3963);
bound.rz0 = make_float2(-3.1416, 3.1416);
bound.rz1 = make_float2(-3.1416, 3.1416);
marker.x = 0.6667;
marker.y = 0.5;
// load to gpu
thrust::device_vector<float4> Poses4(Pose4, Pose4 + numPoses);
thrust::device_vector<float2> Poses2(Pose2, Pose2 + numPoses);
thrust::device_vector<int4> rands4(rand4, rand4 + numPoses*numPoints);
thrust::device_vector<int2> rands2(rand2, rand2 + numPoses*numPoints);
Timer timer;
timer.Reset(); timer.Start();
expandPoses(&Poses4, &Poses2, 1.511, &step, bound, marker, &numPoses);
//expandPoses(&Poses4, &Poses2, 1.511, &step, bound, marker, &numPoses, &rands4, &rands2);
cout << hipGetErrorString(hipGetLastError()) << endl;
timer.Pause();
cout << "Time: " << timer.get_count() << " ns." << endl;
cout << "now " << numPoses << " poses." << endl;
ofstream outFile("poses1Cuda.txt");
if (!outFile)
return 0;
for (int i = 0; i < numPoses; i++) {
float4 p4 = Poses4[i];
float2 p2 = Poses2[i];
outFile << p4.x << " " << p4.y << " " << p4.z << " " << p4.w << " ";
outFile << p2.x << " " << p2.y << endl;
}
outFile.close();
delete[] Pose4;
delete[] Pose2;
delete[] rand4;
delete[] rand2;
return 0;
}
| df39e3176b6d16cfeebb0b283cd296f01bfd02b9.cu | #include <iostream>
#include <fstream>
#include <chrono>
#include <math.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/transform.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include <thrust/count.h>
using namespace std;
static const int BLOCK_SIZE = 256;
// Timer
class Timer {
typedef std::chrono::time_point<std::chrono::high_resolution_clock> Clock;
long long count;
bool running;
Clock prev_start_;
Clock Now() {
return std::chrono::high_resolution_clock::now();
}
public:
void Start() {
running = true;
prev_start_ = Now();
}
void Pause() {
if (running) {
running = false;
auto diff = Now() - prev_start_;
count += std::chrono::duration_cast<std::chrono::nanoseconds>(diff).count();
}
}
void Reset() {
running = false;
count = 0;
}
long long get_count() {
return count;
}
Timer() { Reset(); }
};
// for gen random vector 2
struct int2prg {
__host__ __device__
int2 operator()(const int n) const {
thrust::default_random_engine rng;
thrust::uniform_int_distribution<int> dist(-2, 1);
rng.discard(n);
return make_int2(dist(rng), dist(rng));
}
};
// for gen random vector 4
struct int4prg {
__host__ __device__
int4 operator()(const int n) const {
thrust::default_random_engine rng;
thrust::uniform_int_distribution<int> dist(-2, 1);
rng.discard(n);
return make_int4(dist(rng), dist(rng), dist(rng), dist(rng));
}
};
struct poseStep {
float4 s4;
float2 s2;
};
struct poseBound {
float2 tx;
float2 ty;
float2 tz;
float2 rx;
float2 rz0;
float2 rz1;
};
struct isValidTest {
__host__ __device__
bool operator()(const thrust::tuple<float4, float2, bool>& a ) {
return (thrust::get<2>(a) == false);
};
};
__global__
void expand_kernel(float4* Poses4, float2* Poses2, const int numPoses, const int newSize) {
const int tIdx = threadIdx.x;
const int Idx = blockIdx.x * 256 + tIdx;
if (Idx >= numPoses)
return;
for (int i = Idx + numPoses; i < newSize; i += numPoses) {
Poses4[i] = Poses4[Idx];
Poses2[i] = Poses2[Idx];
}
}
__global__
void add_kernel(float4* Poses4, float2* Poses2, int4* rand4, int2* rand2, bool* isValid,
const float4 s4, const float2 s2, const float2 btx, const float2 bty, const float2 btz,
const float2 brx, const float2 brz0, const float2 brz1, const float2 marker, const int numPoses, const int expandSize) {
const int tIdx = threadIdx.x;
const int Idx = blockIdx.x * 256 + tIdx;
if (Idx >= expandSize)
return;
float isPlus;
// mem
float Otx = Poses4[Idx + numPoses].x;
float Oty = Poses4[Idx + numPoses].y;
float Otz = Poses4[Idx + numPoses].z;
float Orx = Poses4[Idx + numPoses].w;
float Orz0 = Poses2[Idx + numPoses].x;
float Orz1 = Poses2[Idx + numPoses].y;
// tx ty
float weight = Otz + sqrtf(marker.x*marker.x + marker.y*marker.y) * sinf(Orx);
Poses4[Idx + numPoses].x = Otx + float(rand4[Idx].x) * weight * s4.x;
Poses4[Idx + numPoses].y = Oty + float(rand4[Idx].y) * weight * s4.y;
// tz
isPlus = float(rand4[Idx].z);
float vtz = 1 - isPlus * s4.z * Otz;
Poses4[Idx + numPoses].z = Otz + isPlus * s4.z * (Otz * Otz) / vtz;
// rx
isPlus = float(rand4[Idx].w);
float sinrx = 2 - 1/(1/(2 - sinf(Orx)) + isPlus*s4.w);
Poses4[Idx + numPoses].w = Orx + isPlus * isPlus * (asinf(sinrx) - Orx);
// rz0 rz1
weight = sqrtf(btz.x * btz.y);
Poses2[Idx + numPoses].x = Orz0 + float(rand2[Idx].x)*s2.x*weight;
Poses2[Idx + numPoses].y = Orz1 + float(rand2[Idx].y)*s2.y*weight;
// condition
isValid[Idx + numPoses] = (vtz != 0) & (abs(sinrx) <= 1) & (Poses4[Idx + numPoses].z >= btz.x) & (Poses4[Idx + numPoses].z <= btz.y) & (Poses4[Idx + numPoses].w >= brx.x) & (Poses4[Idx + numPoses].w <= brx.y);
}
void randVector(thrust::device_vector<int4>* rand4, thrust::device_vector<int2>* rand2, const int& num) {
thrust::counting_iterator<int> i04(0);
thrust::counting_iterator<int> i02(22);
thrust::transform(i04, i04 + num, rand4->begin(), int4prg());
thrust::transform(i02, i02 + num, rand2->begin(), int2prg());
}
void expandPoses(thrust::device_vector<float4>* Poses4, thrust::device_vector<float2>* Poses2,
const float& factor, poseStep* step, const poseBound bound, const float2 marker, int* numPoses) {
// number of expand points
const int numPoints = 80;
int expandSize = (*numPoses) * numPoints;
int newSize = (*numPoses) * (numPoints + 1);
// decrease step
step->s4.x /= factor;
step->s4.y /= factor;
step->s4.z /= factor;
step->s4.w /= factor;
step->s2.x /= factor;
step->s2.y /= factor;
// gen random set
thrust::device_vector<int4> rand4(expandSize);
thrust::device_vector<int2> rand2(expandSize);
randVector(&rand4, &rand2, expandSize);
// expand origin set
const int BLOCK_NUM0 = ((*numPoses) - 1) / 256 + 1;
Poses4->resize(newSize);
Poses2->resize(newSize);
expand_kernel << < BLOCK_NUM0, 256 >> > (thrust::raw_pointer_cast(Poses4->data()), thrust::raw_pointer_cast(Poses2->data()), *numPoses, newSize);
// add finer delta
const int BLOCK_NUM1 = (expandSize - 1) / 256 + 1;
thrust::device_vector<bool> isValid(newSize, true);
add_kernel <<< BLOCK_NUM1, 256 >>> (thrust::raw_pointer_cast(Poses4->data()), thrust::raw_pointer_cast(Poses2->data()),
thrust::raw_pointer_cast(rand4.data()), thrust::raw_pointer_cast(rand2.data()),
thrust::raw_pointer_cast(isValid.data()), step->s4, step->s2,
bound.tx, bound.ty, bound.tz, bound.rx, bound.rz0, bound.rz1, marker, *numPoses, expandSize);
// remove invalid
typedef thrust::tuple< thrust::device_vector< float4 >::iterator, thrust::device_vector< float2 >::iterator, thrust::device_vector< bool >::iterator > TupleIt;
typedef thrust::zip_iterator< TupleIt > ZipIt;
ZipIt Zend = thrust::remove_if(
thrust::make_zip_iterator(thrust::make_tuple(Poses4->begin(), Poses2->begin(), isValid.begin())),
thrust::make_zip_iterator(thrust::make_tuple(Poses4->end(), Poses2->end(), isValid.end())),
isValidTest()
);
Poses4->erase(thrust::get<0>(Zend.get_iterator_tuple()), Poses4->end());
Poses2->erase(thrust::get<1>(Zend.get_iterator_tuple()), Poses2->end());
*numPoses = Poses4->size();
}
int main() {
// read poses
const float delta = 0.25;
int numPoses = 25887;
int numPoints = 80;
float4 *Pose4 = new float4[numPoses];
float2 *Pose2 = new float2[numPoses];
int4 *rand4 = new int4[numPoses*numPoints];
int2 *rand2 = new int2[numPoses*numPoints];
ifstream inFile("poses.txt");
if (!inFile)
return 0;
for (int i = 0; i < numPoses; i++) {
inFile >> Pose4[i].x;
inFile >> Pose4[i].y;
inFile >> Pose4[i].z;
inFile >> Pose4[i].w;
inFile >> Pose2[i].x;
inFile >> Pose2[i].y;
}
inFile.close();
cout << "read pose complete!" << endl;
// read rand
//ifstream inFile1("rand.txt");
//if (!inFile1)
// return 0;
//for (int i = 0; i < numPoses*numPoints; i++) {
// float tmp;
// inFile1 >> tmp;
// rand4[i].x = int(tmp);
// inFile1 >> tmp;
// rand4[i].y = int(tmp);
// inFile1 >> tmp;
// rand4[i].z = int(tmp);
// inFile1 >> tmp;
// rand4[i].w = int(tmp);
// inFile1 >> tmp;
// rand2[i].x = int(tmp);
// inFile1 >> tmp;
// rand2[i].y = int(tmp);
//}
//inFile1.close();
//cout << "read rand complete!" << endl;
cout << "original " << numPoses << " poses." << endl;
// create parameter
poseStep step;
poseBound bound;
float2 marker;
step.s4.x = 0.036084;
step.s4.y = 0.036084;
step.s4.z = 0.036084;
step.s4.w = 0.036084;
step.s2.x = 0.072169;
step.s2.y = 0.072169;
bound.tx = make_float2(-2.7, 2.7);
bound.ty = make_float2(-1.9, 1.9);
bound.tz = make_float2(3, 8);
bound.rx = make_float2(0, 1.3963);
bound.rz0 = make_float2(-3.1416, 3.1416);
bound.rz1 = make_float2(-3.1416, 3.1416);
marker.x = 0.6667;
marker.y = 0.5;
// load to gpu
thrust::device_vector<float4> Poses4(Pose4, Pose4 + numPoses);
thrust::device_vector<float2> Poses2(Pose2, Pose2 + numPoses);
thrust::device_vector<int4> rands4(rand4, rand4 + numPoses*numPoints);
thrust::device_vector<int2> rands2(rand2, rand2 + numPoses*numPoints);
Timer timer;
timer.Reset(); timer.Start();
expandPoses(&Poses4, &Poses2, 1.511, &step, bound, marker, &numPoses);
//expandPoses(&Poses4, &Poses2, 1.511, &step, bound, marker, &numPoses, &rands4, &rands2);
cout << cudaGetErrorString(cudaGetLastError()) << endl;
timer.Pause();
cout << "Time: " << timer.get_count() << " ns." << endl;
cout << "now " << numPoses << " poses." << endl;
ofstream outFile("poses1Cuda.txt");
if (!outFile)
return 0;
for (int i = 0; i < numPoses; i++) {
float4 p4 = Poses4[i];
float2 p2 = Poses2[i];
outFile << p4.x << " " << p4.y << " " << p4.z << " " << p4.w << " ";
outFile << p2.x << " " << p2.y << endl;
}
outFile.close();
delete[] Pose4;
delete[] Pose2;
delete[] rand4;
delete[] rand2;
return 0;
}
|
260a35891989bb5c6fe52a6bca94f361ed0438f7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Implements the file command for jim
*
* (c) 2008 Steve Bennett <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE JIM TCL PROJECT ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* JIM TCL PROJECT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation
* are those of the authors and should not be interpreted as representing
* official policies, either expressed or implied, of the Jim Tcl Project.
*
* Based on code originally from Tcl 6.7:
*
* Copyright 1987-1991 Regents of the University of California
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies. The University of California
* makes no representations about the suitability of this
* software for any purpose. It is provided "as is" without
* express or implied warranty.
*/
#include <limits.h>
#include <stdlibcu.h>
#include <stringcu.h>
#include <stdiocu.h>
#include <errnocu.h>
#include <sys/statcu.h>
#include "jimautoconf.h"
#include "jim-subcmd.h"
#ifdef HAVE_UTIMES
#include <sys/time.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistdcu.h>
#elif defined(_MSC_VER)
#include <direct.h>
#define F_OK 0
#define W_OK 2
#define R_OK 4
#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
#endif
#ifndef MAXPATHLEN
#define MAXPATHLEN JIM_PATH_LEN
#endif
#if defined(__MINGW32__) || defined(_MSC_VER)
#define ISWINDOWS 1
#define HAVE_MKDIR_ONE_ARG
#else
#define ISWINDOWS 0
#endif
// JimGetFileType --
// Given a mode word, returns a string identifying the type of a file.
// Results:
// A static text string giving the file type from mode.
// Side effects:
// None.
//
static __device__ const char *JimGetFileType(int mode)
{
if (S_ISREG(mode)) return "file";
else if (S_ISDIR(mode)) return "directory";
#ifdef S_ISCHR
else if (S_ISCHR(mode)) return "characterSpecial";
#endif
#ifdef S_ISBLK
else if (S_ISBLK(mode)) return "blockSpecial";
#endif
#ifdef S_ISFIFO
else if (S_ISFIFO(mode)) return "fifo";
#endif
#ifdef S_ISLNK
else if (S_ISLNK(mode)) return "link";
#endif
#ifdef S_ISSOCK
else if (S_ISSOCK(mode)) return "socket";
#endif
return "unknown";
}
// StoreStatData --
// This is a utility procedure that breaks out the fields of a "stat" structure and stores them in textual form into the
// elements of an associative array.
//
// Results:
// Returns a standard Tcl return value. If an error occurs then a message is left in interp->result.
//
// Side effects:
// Elements of the associative array given by "varName" are modified.
static __device__ void AppendStatElement(Jim_Interp *interp, Jim_Obj *listObj, const char *key, jim_wide value)
{
Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, key, -1));
Jim_ListAppendElement(interp, listObj, Jim_NewIntObj(interp, value));
}
static __device__ int StoreStatData(Jim_Interp *interp, Jim_Obj *varName, const struct stat *sb)
{
// Just use a list to store the data
Jim_Obj *listObj = Jim_NewListObj(interp, NULL, 0);
#if !__HIPCC__
AppendStatElement(interp, listObj, "dev", sb->st_dev);
AppendStatElement(interp, listObj, "ino", sb->st_ino);
#endif
AppendStatElement(interp, listObj, "mode", sb->st_mode);
#if !__HIPCC__
AppendStatElement(interp, listObj, "nlink", sb->st_nlink);
#endif
AppendStatElement(interp, listObj, "uid", sb->st_uid);
AppendStatElement(interp, listObj, "gid", sb->st_gid);
AppendStatElement(interp, listObj, "size", sb->st_size);
AppendStatElement(interp, listObj, "atime", sb->st_atime);
AppendStatElement(interp, listObj, "mtime", sb->st_mtime);
AppendStatElement(interp, listObj, "ctime", sb->st_ctime);
Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, "type", -1));
Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, JimGetFileType((int)sb->st_mode), -1));
// Was a variable specified?
if (varName) {
Jim_Obj *objPtr = Jim_GetVariable(interp, varName, JIM_NONE);
if (objPtr) {
if (Jim_DictSize(interp, objPtr) < 0) {
// This message matches the one from Tcl
Jim_SetResultFormatted(interp, "can't set \"%#s(dev)\": variable isn't array", varName);
Jim_FreeNewObj(interp, listObj);
return JIM_ERROR;
}
if (Jim_IsShared(objPtr))
objPtr = Jim_DuplicateObj(interp, objPtr);
// Just cheat here and append as a list and convert to a dict
Jim_ListAppendList(interp, objPtr, listObj);
Jim_DictSize(interp, objPtr);
Jim_InvalidateStringRep(objPtr);
Jim_FreeNewObj(interp, listObj);
listObj = objPtr;
}
Jim_SetVariable(interp, varName, listObj);
}
// And also return the value
Jim_SetResult(interp, listObj);
return JIM_OK;
}
static __device__ int file_cmd_dirname(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
const char *path = Jim_String(argv[0]);
const char *p = strrchr((char *)path, '/');
if (!p && path[0] == '.' && path[1] == '.' && path[2] == '\0') Jim_SetResultString(interp, "..", -1);
else if (!p) Jim_SetResultString(interp, ".", -1);
else if (p == path) Jim_SetResultString(interp, "/", -1);
else if (ISWINDOWS && p[-1] == ':') Jim_SetResultString(interp, path, (int)(p - path) + 1); // z:/dir => z:/
else Jim_SetResultString(interp, path, (int)(p - path));
return JIM_OK;
}
static __device__ int file_cmd_rootname(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
const char *path = Jim_String(argv[0]);
const char *lastSlash = strrchr((char *)path, '/');
const char *p = strrchr((char *)path, '.');
if (p == NULL || (lastSlash != NULL && lastSlash > p))
Jim_SetResult(interp, argv[0]);
else
Jim_SetResultString(interp, path, (int)(p - path));
return JIM_OK;
}
static __device__ int file_cmd_extension(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
const char *path = Jim_String(argv[0]);
const char *lastSlash = strrchr((char *)path, '/');
const char *p = strrchr((char *)path, '.');
if (p == NULL || (lastSlash != NULL && lastSlash >= p))
p = "";
Jim_SetResultString(interp, p, -1);
return JIM_OK;
}
static __device__ int file_cmd_tail(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
const char *path = Jim_String(argv[0]);
const char *lastSlash = strrchr((char *)path, '/');
if (lastSlash)
Jim_SetResultString(interp, lastSlash + 1, -1);
else
Jim_SetResult(interp, argv[0]);
return JIM_OK;
}
static __device__ int file_cmd_normalize(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
#ifdef HAVE_REALPATH
const char *path = Jim_String(argv[0]);
char *newname = Jim_Alloc(MAXPATHLEN + 1);
if (realpath(path, newname)) {
Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, newname, -1));
return JIM_OK;
}
else {
Jim_Free(newname);
Jim_SetResultFormatted(interp, "can't normalize \"%#s\": %s", argv[0], strerror(errno));
return JIM_ERROR;
}
#else
Jim_SetResultString(interp, "Not implemented", -1);
return JIM_ERROR;
#endif
}
static __device__ int file_cmd_join(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
char *newname = (char *)Jim_Alloc(MAXPATHLEN + 1);
char *last = newname;
*newname = 0;
// Simple implementation for now
for (int i = 0; i < argc; i++) {
int len;
const char *part = Jim_GetString(argv[i], &len);
// Absolute component, so go back to the start
if (*part == '/')
last = newname;
// Absolute component on mingw, so go back to the start
else if (ISWINDOWS && strchr(part, ':'))
last = newname;
else if (part[0] == '.') {
if (part[1] == '/') { part += 2; len -= 2; }
// Adding '.' to an existing path does nothing
else if (part[1] == 0 && last != newname) continue;
}
// Add a slash if needed
if (last != newname && last[-1] != '/')
*last++ = '/';
if (len) {
if (last + len - newname >= MAXPATHLEN) {
Jim_Free(newname);
Jim_SetResultString(interp, "Path too long", -1);
return JIM_ERROR;
}
memcpy(last, part, len);
last += len;
}
// Remove a slash if needed
if (last > newname + 1 && last[-1] == '/')
if (!ISWINDOWS || !(last > newname + 2 && last[-2] == ':')) // but on on Windows, leave the trailing slash on "c:/ "
*--last = 0;
}
*last = 0;
// Probably need to handle some special cases ...
Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, newname, (int)(last - newname)));
return JIM_OK;
}
static __device__ int file_access(Jim_Interp *interp, Jim_Obj *filename, int mode)
{
Jim_SetResultBool(interp, access(Jim_String(filename), mode) != -1);
return JIM_OK;
}
static __device__ int file_cmd_readable(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
return file_access(interp, argv[0], R_OK);
}
static __device__ int file_cmd_writable(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
return file_access(interp, argv[0], W_OK);
}
static __device__ int file_cmd_executable(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
#ifdef X_OK
return file_access(interp, argv[0], X_OK);
#else
// If no X_OK, just assume true
Jim_SetResultBool(interp, 1);
return JIM_OK;
#endif
}
static __device__ int file_cmd_exists(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
return file_access(interp, argv[0], F_OK);
}
static __device__ int file_cmd_delete(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
int force = Jim_CompareStringImmediate(interp, argv[0], "-force");
if (force || Jim_CompareStringImmediate(interp, argv[0], "--")) {
argc++;
argv--;
}
while (argc--) {
const char *path = Jim_String(argv[0]);
if (unlink(path) == -1 && errno != ENOENT) {
if (rmdir(path) == -1)
if (!force || Jim_EvalPrefix(interp, "file delete force", 1, argv) != JIM_OK) { // Maybe try using the script helper
Jim_SetResultFormatted(interp, "couldn't delete file \"%s\": %s", path, strerror(errno));
return JIM_ERROR;
}
}
argv++;
}
return JIM_OK;
}
#if defined(HAVE_MKDIR_ONE_ARG) && !defined(__CUDA_ARCH__)
#define MKDIR_DEFAULT(PATHNAME) mkdir(PATHNAME)
#else
#define MKDIR_DEFAULT(PATHNAME) mkdir(PATHNAME, 0755)
#endif
// Create directory, creating all intermediate paths if necessary.
// Returns 0 if OK or -1 on failure (and sets errno)
// Note: The path may be modified.
static __device__ int mkdir_all(char *path)
{
int ok = 1;
// First time just try to make the dir
goto first;
while (ok--) {
// Must have failed the first time, so recursively make the parent and try again
{
char *slash = strrchr(path, '/');
if (slash && slash != path) {
*slash = 0;
if (mkdir_all(path) != 0)
return -1;
*slash = '/';
}
}
first:
if (MKDIR_DEFAULT(path) == 0)
return 0;
if (errno == ENOENT)
continue; // Create the parent and try again
// Maybe it already exists as a directory
if (errno == EEXIST) {
struct stat sb;
if (stat(path, &sb) == 0 && S_ISDIR(sb.st_mode))
return 0;
// Restore errno
errno = EEXIST;
}
// Failed
break;
}
return -1;
}
static __device__ int file_cmd_mkdir(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
while (argc--) {
char *path = Jim_StrDup(Jim_String(argv[0]));
int rc = mkdir_all(path);
Jim_Free(path);
if (rc != 0) {
Jim_SetResultFormatted(interp, "can't create directory \"%#s\": %s", argv[0], strerror(errno));
return JIM_ERROR;
}
argv++;
}
return JIM_OK;
}
static __device__ int file_cmd_tempfile(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
int fd = Jim_MakeTempFile(interp, (argc >= 1) ? Jim_String(argv[0]) : NULL);
if (fd < 0)
return JIM_ERROR;
close(fd);
return JIM_OK;
}
static __device__ int file_cmd_rename(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
int force = 0;
if (argc == 3) {
if (!Jim_CompareStringImmediate(interp, argv[0], "-force"))
return -1;
force++;
argv++;
argc--;
}
const char *source = Jim_String(argv[0]);
const char *dest = Jim_String(argv[1]);
if (!force && access(dest, F_OK) == 0) {
Jim_SetResultFormatted(interp, "error renaming \"%#s\" to \"%#s\": target exists", argv[0], argv[1]);
return JIM_ERROR;
}
if (rename(source, dest) != 0) {
Jim_SetResultFormatted(interp, "error renaming \"%#s\" to \"%#s\": %s", argv[0], argv[1], strerror(errno));
return JIM_ERROR;
}
return JIM_OK;
}
#if defined(HAVE_LINK) && defined(HAVE_SYMLINK)
static const char * const _link_options[] = { "-hard", "-symbolic", NULL };
static __device__ int file_cmd_link(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
enum { OPT_HARD, OPT_SYMBOLIC, };
int option = OPT_HARD;
if (argc == 3) {
if (Jim_GetEnum(interp, argv[0], _link_options, &option, NULL, JIM_ENUM_ABBREV | JIM_ERRMSG) != JIM_OK)
return JIM_ERROR;
argv++;
argc--;
}
const char *dest = Jim_String(argv[0]);
const char *source = Jim_String(argv[1]);
int ret = (option == OPT_HARD ? link(source, dest) : symlink(source, dest));
if (ret != 0) {
Jim_SetResultFormatted(interp, "error linking \"%#s\" to \"%#s\": %s", argv[0], argv[1], strerror(errno));
return JIM_ERROR;
}
return JIM_OK;
}
#endif
static __device__ int file_stat(Jim_Interp *interp, Jim_Obj *filename, struct stat *sb)
{
const char *path = Jim_String(filename);
if (stat(path, sb) == -1) {
Jim_SetResultFormatted(interp, "could not read \"%#s\": %s", filename, strerror(errno));
return JIM_ERROR;
}
return JIM_OK;
}
#ifdef HAVE_LSTAT
static __device__ int file_lstat(Jim_Interp *interp, Jim_Obj *filename, struct _stat *sb)
{
const char *path = Jim_String(filename);
if (_lstat(path, sb) == -1) {
Jim_SetResultFormatted(interp, "could not read \"%#s\": %s", filename, strerror(errno));
return JIM_ERROR;
}
return JIM_OK;
}
#else
#define file_lstat file_stat
#endif
static __device__ int file_cmd_atime(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct stat sb;
if (file_stat(interp, argv[0], &sb) != JIM_OK)
return JIM_ERROR;
Jim_SetResultInt(interp, sb.st_atime);
return JIM_OK;
}
static __device__ int file_cmd_mtime(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
if (argc == 2) {
#ifdef HAVE_UTIMES
jim_wide newtime;
struct timeval times[2];
if (Jim_GetWide(interp, argv[1], &newtime) != JIM_OK)
return JIM_ERROR;
times[1].tv_sec = times[0].tv_sec = newtime;
times[1].tv_usec = times[0].tv_usec = 0;
if (utimes(Jim_String(argv[0]), times) != 0) {
Jim_SetResultFormatted(interp, "can't set time on \"%#s\": %s", argv[0], strerror(errno));
return JIM_ERROR;
}
#else
Jim_SetResultString(interp, "Not implemented", -1);
return JIM_ERROR;
#endif
}
struct stat sb;
if (file_stat(interp, argv[0], &sb) != JIM_OK)
return JIM_ERROR;
Jim_SetResultInt(interp, sb.st_mtime);
return JIM_OK;
}
static __device__ int file_cmd_copy(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
return Jim_EvalPrefix(interp, "file copy", argc, argv);
}
static __device__ int file_cmd_size(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct stat sb;
if (file_stat(interp, argv[0], &sb) != JIM_OK)
return JIM_ERROR;
Jim_SetResultInt(interp, sb.st_size);
return JIM_OK;
}
static __device__ int file_cmd_isdirectory(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct stat sb;
int ret = 0;
if (file_stat(interp, argv[0], &sb) == JIM_OK)
ret = S_ISDIR(sb.st_mode);
Jim_SetResultInt(interp, ret);
return JIM_OK;
}
static __device__ int file_cmd_isfile(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct stat sb;
int ret = 0;
if (file_stat(interp, argv[0], &sb) == JIM_OK)
ret = S_ISREG(sb.st_mode);
Jim_SetResultInt(interp, ret);
return JIM_OK;
}
#ifdef HAVE_GETEUID
static __device__ int file_cmd_owned(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct _stat sb;
int ret = 0;
if (file_stat(interp, argv[0], &sb) == JIM_OK)
ret = (geteuid() == sb.st_uid);
Jim_SetResultInt(interp, ret);
return JIM_OK;
}
#endif
#if defined(HAVE_READLINK)
static __device__ int file_cmd_readlink(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
const char *path = Jim_String(argv[0]);
char *linkValue = Jim_Alloc(MAXPATHLEN + 1);
int linkLength = readlink(path, linkValue, MAXPATHLEN);
if (linkLength == -1) {
Jim_Free(linkValue);
Jim_SetResultFormatted(interp, "couldn't readlink \"%#s\": %s", argv[0], strerror(errno));
return JIM_ERROR;
}
linkValue[linkLength] = 0;
Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, linkValue, linkLength));
return JIM_OK;
}
#endif
static __device__ int file_cmd_type(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct stat sb;
if (file_lstat(interp, argv[0], &sb) != JIM_OK)
return JIM_ERROR;
Jim_SetResultString(interp, JimGetFileType((int)sb.st_mode), -1);
return JIM_OK;
}
#ifdef HAVE_LSTAT
static __device__ int file_cmd_lstat(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct _stat sb;
if (file_lstat(interp, argv[0], &sb) != JIM_OK)
return JIM_ERROR;
return StoreStatData(interp, argc == 2 ? argv[1] : NULL, &sb);
}
#else
#define file_cmd_lstat file_cmd_stat
#endif
static __device__ int file_cmd_stat(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct stat sb;
if (file_stat(interp, argv[0], &sb) != JIM_OK)
return JIM_ERROR;
return StoreStatData(interp, argc == 2 ? argv[1] : NULL, &sb);
}
__constant__ static const jim_subcmd_type _file_command_table[] = {
{ "atime", "name", file_cmd_atime, 1, 1 }, // Description: Last access time
{ "mtime", "name ?time?", file_cmd_mtime, 1, 2 }, // Description: Get or set last modification time
{ "copy", "?-force? source dest", file_cmd_copy, 2, 3 }, // Description: Copy source file to destination file
{ "dirname", "name", file_cmd_dirname, 1, 1, }, // Description: Directory part of the name
{ "rootname", "name", file_cmd_rootname, 1, 1 }, // Description: Name without any extension
{ "extension", "name", file_cmd_extension, 1, 1, }, // Description: Last extension including the dot
{ "tail", "name", file_cmd_tail, 1, 1 }, // Description: Last component of the name
{ "normalize", "name", file_cmd_normalize, 1, 1 }, // Description: Normalized path of name
{ "join", "name ?name ...?", file_cmd_join, 1, -1 }, // Description: Join multiple path components
{ "readable", "name", file_cmd_readable, 1, 1 }, // Description: Is file readable
{ "writable", "name", file_cmd_writable, 1, 1 }, // Description: Is file writable
{ "executable", "name", file_cmd_executable, 1, 1 }, // Description: Is file executable
{ "exists", "name", file_cmd_exists, 1, 1 }, // Description: Does file exist
{ "delete", "?-force|--? name ...", file_cmd_delete, 1, -1 }, // Description: Deletes the files or directories (must be empty unless -force)
{ "mkdir", "dir ...", file_cmd_mkdir, 1, -1 }, // Description: Creates the directories
{ "tempfile", "?template?", file_cmd_tempfile, 0, 1 }, // Description: Creates a temporary filename
{ "rename", "?-force? source dest", file_cmd_rename, 2, 3 }, // Description: Renames a file
#if defined(HAVE_LINK) && defined(HAVE_SYMLINK)
{ "link", "?-symbolic|-hard? newname target", file_cmd_link, 2, 3 }, // Description: Creates a hard or soft link
#endif
#if defined(HAVE_READLINK)
{ "readlink", "name", file_cmd_readlink, 1, 1 }, // Description: Value of the symbolic link
#endif
{ "size", "name", file_cmd_size, 1, 1 }, // Description: Size of file
{ "stat", "name ?var?", file_cmd_stat, 1, 2 }, // Description: Returns results of stat, and may store in var array
{ "lstat", "name ?var?", file_cmd_lstat, 1, 2 }, // Description: Returns results of lstat, and may store in var array
{ "type", "name", file_cmd_type, 1, 1 }, // Description: Returns type of the file
#ifdef HAVE_GETEUID
{ "owned", "name", file_cmd_owned, 1, 1 }, // Description: Returns 1 if owned by the current owner
#endif
{ "isdirectory", "name", file_cmd_isdirectory, 1, 1 }, // Description: Returns 1 if name is a directory
{ "isfile", "name", file_cmd_isfile, 1, 1 }, // Description: Returns 1 if name is a file
{ NULL }
};
static __device__ int Jim_CdCmd(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
if (argc != 2) {
Jim_WrongNumArgs(interp, 1, argv, "dirname");
return JIM_ERROR;
}
const char *path = Jim_String(argv[1]);
if (chdir(path) != 0) {
Jim_SetResultFormatted(interp, "couldn't change working directory to \"%s\": %s", path, strerror(errno));
return JIM_ERROR;
}
return JIM_OK;
}
static __device__ int Jim_PwdCmd(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
char *cwd = (char *)Jim_Alloc(MAXPATHLEN);
if (getcwd(cwd, MAXPATHLEN) == NULL) {
Jim_SetResultString(interp, "Failed to get pwd", -1);
Jim_Free(cwd);
return JIM_ERROR;
}
else if (ISWINDOWS) {
// Try to keep backslashes out of paths
char *p = cwd;
while ((p = (char *)strchr(p, '\\')) != NULL)
*p++ = '/';
}
Jim_SetResultString(interp, cwd, -1);
Jim_Free(cwd);
return JIM_OK;
}
__device__ int Jim_fileInit(Jim_Interp *interp)
{
if (Jim_PackageProvide(interp, "file", "1.0", JIM_ERRMSG))
return JIM_ERROR;
Jim_CreateCommand(interp, "file", Jim_SubCmdProc, (void *)_file_command_table, NULL);
Jim_CreateCommand(interp, "pwd", Jim_PwdCmd, NULL, NULL);
Jim_CreateCommand(interp, "cd", Jim_CdCmd, NULL, NULL);
return JIM_OK;
}
| 260a35891989bb5c6fe52a6bca94f361ed0438f7.cu | /*
* Implements the file command for jim
*
* (c) 2008 Steve Bennett <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE JIM TCL PROJECT ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* JIM TCL PROJECT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation
* are those of the authors and should not be interpreted as representing
* official policies, either expressed or implied, of the Jim Tcl Project.
*
* Based on code originally from Tcl 6.7:
*
* Copyright 1987-1991 Regents of the University of California
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies. The University of California
* makes no representations about the suitability of this
* software for any purpose. It is provided "as is" without
* express or implied warranty.
*/
#include <limits.h>
#include <stdlibcu.h>
#include <stringcu.h>
#include <stdiocu.h>
#include <errnocu.h>
#include <sys/statcu.h>
#include "jimautoconf.h"
#include "jim-subcmd.h"
#ifdef HAVE_UTIMES
#include <sys/time.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistdcu.h>
#elif defined(_MSC_VER)
#include <direct.h>
#define F_OK 0
#define W_OK 2
#define R_OK 4
#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
#endif
#ifndef MAXPATHLEN
#define MAXPATHLEN JIM_PATH_LEN
#endif
#if defined(__MINGW32__) || defined(_MSC_VER)
#define ISWINDOWS 1
#define HAVE_MKDIR_ONE_ARG
#else
#define ISWINDOWS 0
#endif
// JimGetFileType --
// Given a mode word, returns a string identifying the type of a file.
// Results:
// A static text string giving the file type from mode.
// Side effects:
// None.
//
static __device__ const char *JimGetFileType(int mode)
{
if (S_ISREG(mode)) return "file";
else if (S_ISDIR(mode)) return "directory";
#ifdef S_ISCHR
else if (S_ISCHR(mode)) return "characterSpecial";
#endif
#ifdef S_ISBLK
else if (S_ISBLK(mode)) return "blockSpecial";
#endif
#ifdef S_ISFIFO
else if (S_ISFIFO(mode)) return "fifo";
#endif
#ifdef S_ISLNK
else if (S_ISLNK(mode)) return "link";
#endif
#ifdef S_ISSOCK
else if (S_ISSOCK(mode)) return "socket";
#endif
return "unknown";
}
// StoreStatData --
// This is a utility procedure that breaks out the fields of a "stat" structure and stores them in textual form into the
// elements of an associative array.
//
// Results:
// Returns a standard Tcl return value. If an error occurs then a message is left in interp->result.
//
// Side effects:
// Elements of the associative array given by "varName" are modified.
static __device__ void AppendStatElement(Jim_Interp *interp, Jim_Obj *listObj, const char *key, jim_wide value)
{
Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, key, -1));
Jim_ListAppendElement(interp, listObj, Jim_NewIntObj(interp, value));
}
static __device__ int StoreStatData(Jim_Interp *interp, Jim_Obj *varName, const struct stat *sb)
{
// Just use a list to store the data
Jim_Obj *listObj = Jim_NewListObj(interp, NULL, 0);
#if !__CUDACC__
AppendStatElement(interp, listObj, "dev", sb->st_dev);
AppendStatElement(interp, listObj, "ino", sb->st_ino);
#endif
AppendStatElement(interp, listObj, "mode", sb->st_mode);
#if !__CUDACC__
AppendStatElement(interp, listObj, "nlink", sb->st_nlink);
#endif
AppendStatElement(interp, listObj, "uid", sb->st_uid);
AppendStatElement(interp, listObj, "gid", sb->st_gid);
AppendStatElement(interp, listObj, "size", sb->st_size);
AppendStatElement(interp, listObj, "atime", sb->st_atime);
AppendStatElement(interp, listObj, "mtime", sb->st_mtime);
AppendStatElement(interp, listObj, "ctime", sb->st_ctime);
Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, "type", -1));
Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, JimGetFileType((int)sb->st_mode), -1));
// Was a variable specified?
if (varName) {
Jim_Obj *objPtr = Jim_GetVariable(interp, varName, JIM_NONE);
if (objPtr) {
if (Jim_DictSize(interp, objPtr) < 0) {
// This message matches the one from Tcl
Jim_SetResultFormatted(interp, "can't set \"%#s(dev)\": variable isn't array", varName);
Jim_FreeNewObj(interp, listObj);
return JIM_ERROR;
}
if (Jim_IsShared(objPtr))
objPtr = Jim_DuplicateObj(interp, objPtr);
// Just cheat here and append as a list and convert to a dict
Jim_ListAppendList(interp, objPtr, listObj);
Jim_DictSize(interp, objPtr);
Jim_InvalidateStringRep(objPtr);
Jim_FreeNewObj(interp, listObj);
listObj = objPtr;
}
Jim_SetVariable(interp, varName, listObj);
}
// And also return the value
Jim_SetResult(interp, listObj);
return JIM_OK;
}
static __device__ int file_cmd_dirname(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
const char *path = Jim_String(argv[0]);
const char *p = strrchr((char *)path, '/');
if (!p && path[0] == '.' && path[1] == '.' && path[2] == '\0') Jim_SetResultString(interp, "..", -1);
else if (!p) Jim_SetResultString(interp, ".", -1);
else if (p == path) Jim_SetResultString(interp, "/", -1);
else if (ISWINDOWS && p[-1] == ':') Jim_SetResultString(interp, path, (int)(p - path) + 1); // z:/dir => z:/
else Jim_SetResultString(interp, path, (int)(p - path));
return JIM_OK;
}
static __device__ int file_cmd_rootname(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
const char *path = Jim_String(argv[0]);
const char *lastSlash = strrchr((char *)path, '/');
const char *p = strrchr((char *)path, '.');
if (p == NULL || (lastSlash != NULL && lastSlash > p))
Jim_SetResult(interp, argv[0]);
else
Jim_SetResultString(interp, path, (int)(p - path));
return JIM_OK;
}
static __device__ int file_cmd_extension(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
const char *path = Jim_String(argv[0]);
const char *lastSlash = strrchr((char *)path, '/');
const char *p = strrchr((char *)path, '.');
if (p == NULL || (lastSlash != NULL && lastSlash >= p))
p = "";
Jim_SetResultString(interp, p, -1);
return JIM_OK;
}
static __device__ int file_cmd_tail(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
const char *path = Jim_String(argv[0]);
const char *lastSlash = strrchr((char *)path, '/');
if (lastSlash)
Jim_SetResultString(interp, lastSlash + 1, -1);
else
Jim_SetResult(interp, argv[0]);
return JIM_OK;
}
static __device__ int file_cmd_normalize(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
#ifdef HAVE_REALPATH
const char *path = Jim_String(argv[0]);
char *newname = Jim_Alloc(MAXPATHLEN + 1);
if (realpath(path, newname)) {
Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, newname, -1));
return JIM_OK;
}
else {
Jim_Free(newname);
Jim_SetResultFormatted(interp, "can't normalize \"%#s\": %s", argv[0], strerror(errno));
return JIM_ERROR;
}
#else
Jim_SetResultString(interp, "Not implemented", -1);
return JIM_ERROR;
#endif
}
static __device__ int file_cmd_join(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
char *newname = (char *)Jim_Alloc(MAXPATHLEN + 1);
char *last = newname;
*newname = 0;
// Simple implementation for now
for (int i = 0; i < argc; i++) {
int len;
const char *part = Jim_GetString(argv[i], &len);
// Absolute component, so go back to the start
if (*part == '/')
last = newname;
// Absolute component on mingw, so go back to the start
else if (ISWINDOWS && strchr(part, ':'))
last = newname;
else if (part[0] == '.') {
if (part[1] == '/') { part += 2; len -= 2; }
// Adding '.' to an existing path does nothing
else if (part[1] == 0 && last != newname) continue;
}
// Add a slash if needed
if (last != newname && last[-1] != '/')
*last++ = '/';
if (len) {
if (last + len - newname >= MAXPATHLEN) {
Jim_Free(newname);
Jim_SetResultString(interp, "Path too long", -1);
return JIM_ERROR;
}
memcpy(last, part, len);
last += len;
}
// Remove a slash if needed
if (last > newname + 1 && last[-1] == '/')
if (!ISWINDOWS || !(last > newname + 2 && last[-2] == ':')) // but on on Windows, leave the trailing slash on "c:/ "
*--last = 0;
}
*last = 0;
// Probably need to handle some special cases ...
Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, newname, (int)(last - newname)));
return JIM_OK;
}
static __device__ int file_access(Jim_Interp *interp, Jim_Obj *filename, int mode)
{
Jim_SetResultBool(interp, access(Jim_String(filename), mode) != -1);
return JIM_OK;
}
static __device__ int file_cmd_readable(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
return file_access(interp, argv[0], R_OK);
}
static __device__ int file_cmd_writable(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
return file_access(interp, argv[0], W_OK);
}
static __device__ int file_cmd_executable(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
#ifdef X_OK
return file_access(interp, argv[0], X_OK);
#else
// If no X_OK, just assume true
Jim_SetResultBool(interp, 1);
return JIM_OK;
#endif
}
static __device__ int file_cmd_exists(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
return file_access(interp, argv[0], F_OK);
}
static __device__ int file_cmd_delete(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
int force = Jim_CompareStringImmediate(interp, argv[0], "-force");
if (force || Jim_CompareStringImmediate(interp, argv[0], "--")) {
argc++;
argv--;
}
while (argc--) {
const char *path = Jim_String(argv[0]);
if (unlink(path) == -1 && errno != ENOENT) {
if (rmdir(path) == -1)
if (!force || Jim_EvalPrefix(interp, "file delete force", 1, argv) != JIM_OK) { // Maybe try using the script helper
Jim_SetResultFormatted(interp, "couldn't delete file \"%s\": %s", path, strerror(errno));
return JIM_ERROR;
}
}
argv++;
}
return JIM_OK;
}
#if defined(HAVE_MKDIR_ONE_ARG) && !defined(__CUDA_ARCH__)
#define MKDIR_DEFAULT(PATHNAME) mkdir(PATHNAME)
#else
#define MKDIR_DEFAULT(PATHNAME) mkdir(PATHNAME, 0755)
#endif
// Create directory, creating all intermediate paths if necessary.
// Returns 0 if OK or -1 on failure (and sets errno)
// Note: The path may be modified.
static __device__ int mkdir_all(char *path)
{
int ok = 1;
// First time just try to make the dir
goto first;
while (ok--) {
// Must have failed the first time, so recursively make the parent and try again
{
char *slash = strrchr(path, '/');
if (slash && slash != path) {
*slash = 0;
if (mkdir_all(path) != 0)
return -1;
*slash = '/';
}
}
first:
if (MKDIR_DEFAULT(path) == 0)
return 0;
if (errno == ENOENT)
continue; // Create the parent and try again
// Maybe it already exists as a directory
if (errno == EEXIST) {
struct stat sb;
if (stat(path, &sb) == 0 && S_ISDIR(sb.st_mode))
return 0;
// Restore errno
errno = EEXIST;
}
// Failed
break;
}
return -1;
}
static __device__ int file_cmd_mkdir(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
while (argc--) {
char *path = Jim_StrDup(Jim_String(argv[0]));
int rc = mkdir_all(path);
Jim_Free(path);
if (rc != 0) {
Jim_SetResultFormatted(interp, "can't create directory \"%#s\": %s", argv[0], strerror(errno));
return JIM_ERROR;
}
argv++;
}
return JIM_OK;
}
static __device__ int file_cmd_tempfile(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
int fd = Jim_MakeTempFile(interp, (argc >= 1) ? Jim_String(argv[0]) : NULL);
if (fd < 0)
return JIM_ERROR;
close(fd);
return JIM_OK;
}
static __device__ int file_cmd_rename(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
int force = 0;
if (argc == 3) {
if (!Jim_CompareStringImmediate(interp, argv[0], "-force"))
return -1;
force++;
argv++;
argc--;
}
const char *source = Jim_String(argv[0]);
const char *dest = Jim_String(argv[1]);
if (!force && access(dest, F_OK) == 0) {
Jim_SetResultFormatted(interp, "error renaming \"%#s\" to \"%#s\": target exists", argv[0], argv[1]);
return JIM_ERROR;
}
if (rename(source, dest) != 0) {
Jim_SetResultFormatted(interp, "error renaming \"%#s\" to \"%#s\": %s", argv[0], argv[1], strerror(errno));
return JIM_ERROR;
}
return JIM_OK;
}
#if defined(HAVE_LINK) && defined(HAVE_SYMLINK)
static const char * const _link_options[] = { "-hard", "-symbolic", NULL };
static __device__ int file_cmd_link(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
enum { OPT_HARD, OPT_SYMBOLIC, };
int option = OPT_HARD;
if (argc == 3) {
if (Jim_GetEnum(interp, argv[0], _link_options, &option, NULL, JIM_ENUM_ABBREV | JIM_ERRMSG) != JIM_OK)
return JIM_ERROR;
argv++;
argc--;
}
const char *dest = Jim_String(argv[0]);
const char *source = Jim_String(argv[1]);
int ret = (option == OPT_HARD ? link(source, dest) : symlink(source, dest));
if (ret != 0) {
Jim_SetResultFormatted(interp, "error linking \"%#s\" to \"%#s\": %s", argv[0], argv[1], strerror(errno));
return JIM_ERROR;
}
return JIM_OK;
}
#endif
static __device__ int file_stat(Jim_Interp *interp, Jim_Obj *filename, struct stat *sb)
{
const char *path = Jim_String(filename);
if (stat(path, sb) == -1) {
Jim_SetResultFormatted(interp, "could not read \"%#s\": %s", filename, strerror(errno));
return JIM_ERROR;
}
return JIM_OK;
}
#ifdef HAVE_LSTAT
static __device__ int file_lstat(Jim_Interp *interp, Jim_Obj *filename, struct _stat *sb)
{
const char *path = Jim_String(filename);
if (_lstat(path, sb) == -1) {
Jim_SetResultFormatted(interp, "could not read \"%#s\": %s", filename, strerror(errno));
return JIM_ERROR;
}
return JIM_OK;
}
#else
#define file_lstat file_stat
#endif
static __device__ int file_cmd_atime(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct stat sb;
if (file_stat(interp, argv[0], &sb) != JIM_OK)
return JIM_ERROR;
Jim_SetResultInt(interp, sb.st_atime);
return JIM_OK;
}
static __device__ int file_cmd_mtime(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
if (argc == 2) {
#ifdef HAVE_UTIMES
jim_wide newtime;
struct timeval times[2];
if (Jim_GetWide(interp, argv[1], &newtime) != JIM_OK)
return JIM_ERROR;
times[1].tv_sec = times[0].tv_sec = newtime;
times[1].tv_usec = times[0].tv_usec = 0;
if (utimes(Jim_String(argv[0]), times) != 0) {
Jim_SetResultFormatted(interp, "can't set time on \"%#s\": %s", argv[0], strerror(errno));
return JIM_ERROR;
}
#else
Jim_SetResultString(interp, "Not implemented", -1);
return JIM_ERROR;
#endif
}
struct stat sb;
if (file_stat(interp, argv[0], &sb) != JIM_OK)
return JIM_ERROR;
Jim_SetResultInt(interp, sb.st_mtime);
return JIM_OK;
}
static __device__ int file_cmd_copy(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
return Jim_EvalPrefix(interp, "file copy", argc, argv);
}
static __device__ int file_cmd_size(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct stat sb;
if (file_stat(interp, argv[0], &sb) != JIM_OK)
return JIM_ERROR;
Jim_SetResultInt(interp, sb.st_size);
return JIM_OK;
}
static __device__ int file_cmd_isdirectory(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct stat sb;
int ret = 0;
if (file_stat(interp, argv[0], &sb) == JIM_OK)
ret = S_ISDIR(sb.st_mode);
Jim_SetResultInt(interp, ret);
return JIM_OK;
}
static __device__ int file_cmd_isfile(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct stat sb;
int ret = 0;
if (file_stat(interp, argv[0], &sb) == JIM_OK)
ret = S_ISREG(sb.st_mode);
Jim_SetResultInt(interp, ret);
return JIM_OK;
}
#ifdef HAVE_GETEUID
static __device__ int file_cmd_owned(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct _stat sb;
int ret = 0;
if (file_stat(interp, argv[0], &sb) == JIM_OK)
ret = (geteuid() == sb.st_uid);
Jim_SetResultInt(interp, ret);
return JIM_OK;
}
#endif
#if defined(HAVE_READLINK)
static __device__ int file_cmd_readlink(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
const char *path = Jim_String(argv[0]);
char *linkValue = Jim_Alloc(MAXPATHLEN + 1);
int linkLength = readlink(path, linkValue, MAXPATHLEN);
if (linkLength == -1) {
Jim_Free(linkValue);
Jim_SetResultFormatted(interp, "couldn't readlink \"%#s\": %s", argv[0], strerror(errno));
return JIM_ERROR;
}
linkValue[linkLength] = 0;
Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, linkValue, linkLength));
return JIM_OK;
}
#endif
static __device__ int file_cmd_type(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct stat sb;
if (file_lstat(interp, argv[0], &sb) != JIM_OK)
return JIM_ERROR;
Jim_SetResultString(interp, JimGetFileType((int)sb.st_mode), -1);
return JIM_OK;
}
#ifdef HAVE_LSTAT
static __device__ int file_cmd_lstat(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct _stat sb;
if (file_lstat(interp, argv[0], &sb) != JIM_OK)
return JIM_ERROR;
return StoreStatData(interp, argc == 2 ? argv[1] : NULL, &sb);
}
#else
#define file_cmd_lstat file_cmd_stat
#endif
static __device__ int file_cmd_stat(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
struct stat sb;
if (file_stat(interp, argv[0], &sb) != JIM_OK)
return JIM_ERROR;
return StoreStatData(interp, argc == 2 ? argv[1] : NULL, &sb);
}
__constant__ static const jim_subcmd_type _file_command_table[] = {
{ "atime", "name", file_cmd_atime, 1, 1 }, // Description: Last access time
{ "mtime", "name ?time?", file_cmd_mtime, 1, 2 }, // Description: Get or set last modification time
{ "copy", "?-force? source dest", file_cmd_copy, 2, 3 }, // Description: Copy source file to destination file
{ "dirname", "name", file_cmd_dirname, 1, 1, }, // Description: Directory part of the name
{ "rootname", "name", file_cmd_rootname, 1, 1 }, // Description: Name without any extension
{ "extension", "name", file_cmd_extension, 1, 1, }, // Description: Last extension including the dot
{ "tail", "name", file_cmd_tail, 1, 1 }, // Description: Last component of the name
{ "normalize", "name", file_cmd_normalize, 1, 1 }, // Description: Normalized path of name
{ "join", "name ?name ...?", file_cmd_join, 1, -1 }, // Description: Join multiple path components
{ "readable", "name", file_cmd_readable, 1, 1 }, // Description: Is file readable
{ "writable", "name", file_cmd_writable, 1, 1 }, // Description: Is file writable
{ "executable", "name", file_cmd_executable, 1, 1 }, // Description: Is file executable
{ "exists", "name", file_cmd_exists, 1, 1 }, // Description: Does file exist
{ "delete", "?-force|--? name ...", file_cmd_delete, 1, -1 }, // Description: Deletes the files or directories (must be empty unless -force)
{ "mkdir", "dir ...", file_cmd_mkdir, 1, -1 }, // Description: Creates the directories
{ "tempfile", "?template?", file_cmd_tempfile, 0, 1 }, // Description: Creates a temporary filename
{ "rename", "?-force? source dest", file_cmd_rename, 2, 3 }, // Description: Renames a file
#if defined(HAVE_LINK) && defined(HAVE_SYMLINK)
{ "link", "?-symbolic|-hard? newname target", file_cmd_link, 2, 3 }, // Description: Creates a hard or soft link
#endif
#if defined(HAVE_READLINK)
{ "readlink", "name", file_cmd_readlink, 1, 1 }, // Description: Value of the symbolic link
#endif
{ "size", "name", file_cmd_size, 1, 1 }, // Description: Size of file
{ "stat", "name ?var?", file_cmd_stat, 1, 2 }, // Description: Returns results of stat, and may store in var array
{ "lstat", "name ?var?", file_cmd_lstat, 1, 2 }, // Description: Returns results of lstat, and may store in var array
{ "type", "name", file_cmd_type, 1, 1 }, // Description: Returns type of the file
#ifdef HAVE_GETEUID
{ "owned", "name", file_cmd_owned, 1, 1 }, // Description: Returns 1 if owned by the current owner
#endif
{ "isdirectory", "name", file_cmd_isdirectory, 1, 1 }, // Description: Returns 1 if name is a directory
{ "isfile", "name", file_cmd_isfile, 1, 1 }, // Description: Returns 1 if name is a file
{ NULL }
};
static __device__ int Jim_CdCmd(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
if (argc != 2) {
Jim_WrongNumArgs(interp, 1, argv, "dirname");
return JIM_ERROR;
}
const char *path = Jim_String(argv[1]);
if (chdir(path) != 0) {
Jim_SetResultFormatted(interp, "couldn't change working directory to \"%s\": %s", path, strerror(errno));
return JIM_ERROR;
}
return JIM_OK;
}
static __device__ int Jim_PwdCmd(ClientData dummy, Jim_Interp *interp, int argc, Jim_Obj *const *argv)
{
char *cwd = (char *)Jim_Alloc(MAXPATHLEN);
if (getcwd(cwd, MAXPATHLEN) == NULL) {
Jim_SetResultString(interp, "Failed to get pwd", -1);
Jim_Free(cwd);
return JIM_ERROR;
}
else if (ISWINDOWS) {
// Try to keep backslashes out of paths
char *p = cwd;
while ((p = (char *)strchr(p, '\\')) != NULL)
*p++ = '/';
}
Jim_SetResultString(interp, cwd, -1);
Jim_Free(cwd);
return JIM_OK;
}
__device__ int Jim_fileInit(Jim_Interp *interp)
{
if (Jim_PackageProvide(interp, "file", "1.0", JIM_ERRMSG))
return JIM_ERROR;
Jim_CreateCommand(interp, "file", Jim_SubCmdProc, (void *)_file_command_table, NULL);
Jim_CreateCommand(interp, "pwd", Jim_PwdCmd, NULL, NULL);
Jim_CreateCommand(interp, "cd", Jim_CdCmd, NULL, NULL);
return JIM_OK;
}
|
6facead1c98aaf42af9a676f39aab5db14ecb082.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/utility.hpp"
namespace cv { namespace cuda { namespace device
{
namespace fast
{
__device__ unsigned int g_counter = 0;
///////////////////////////////////////////////////////////////////////////
// calcKeypoints
__constant__ uchar c_table[] = { 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0xc0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> x - th <= v <= x + th
__device__ __forceinline__ int diffType(const int v, const int x, const int th)
{
const int diff = x - v;
return static_cast<int>(diff < -th) + (static_cast<int>(diff > th) << 1);
}
__device__ void calcMask(const uint C[4], const int v, const int th, int& mask1, int& mask2)
{
mask1 = 0;
mask2 = 0;
int d1, d2;
d1 = diffType(v, C[0] & 0xff, th);
d2 = diffType(v, C[2] & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 0;
mask2 |= ((d1 & 2) >> 1) << 0;
mask1 |= (d2 & 1) << 8;
mask2 |= ((d2 & 2) >> 1) << 8;
d1 = diffType(v, C[1] & 0xff, th);
d2 = diffType(v, C[3] & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 4;
mask2 |= ((d1 & 2) >> 1) << 4;
mask1 |= (d2 & 1) << 12;
mask2 |= ((d2 & 2) >> 1) << 12;
d1 = diffType(v, (C[0] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 2;
mask2 |= ((d1 & 2) >> 1) << 2;
mask1 |= (d2 & 1) << 10;
mask2 |= ((d2 & 2) >> 1) << 10;
d1 = diffType(v, (C[1] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 6;
mask2 |= ((d1 & 2) >> 1) << 6;
mask1 |= (d2 & 1) << 14;
mask2 |= ((d2 & 2) >> 1) << 14;
d1 = diffType(v, (C[0] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 1;
mask2 |= ((d1 & 2) >> 1) << 1;
mask1 |= (d2 & 1) << 9;
mask2 |= ((d2 & 2) >> 1) << 9;
d1 = diffType(v, (C[0] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (3 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 3;
mask2 |= ((d1 & 2) >> 1) << 3;
mask1 |= (d2 & 1) << 11;
mask2 |= ((d2 & 2) >> 1) << 11;
d1 = diffType(v, (C[1] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 5;
mask2 |= ((d1 & 2) >> 1) << 5;
mask1 |= (d2 & 1) << 13;
mask2 |= ((d2 & 2) >> 1) << 13;
d1 = diffType(v, (C[1] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (3 * 8)) & 0xff, th);
mask1 |= (d1 & 1) << 7;
mask2 |= ((d1 & 2) >> 1) << 7;
mask1 |= (d2 & 1) << 15;
mask2 |= ((d2 & 2) >> 1) << 15;
}
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> not a keypoint
__device__ __forceinline__ bool isKeyPoint(int mask1, int mask2)
{
return (__popc(mask1) > 8 && (c_table[(mask1 >> 3) - 63] & (1 << (mask1 & 7)))) ||
(__popc(mask2) > 8 && (c_table[(mask2 >> 3) - 63] & (1 << (mask2 & 7))));
}
__device__ int cornerScore(const uint C[4], const int v, const int threshold)
{
// binary search in [threshold + 1, 255]
int min = threshold + 1;
int max = 255;
while (min <= max)
{
const int mid = (min + max) >> 1;
int mask1 = 0;
int mask2 = 0;
calcMask(C, v, mid, mask1, mask2);
int isKp = static_cast<int>(isKeyPoint(mask1, mask2));
min = isKp * (mid + 1) + (isKp ^ 1) * min;
max = (isKp ^ 1) * (mid - 1) + isKp * max;
}
return min - 1;
}
template <bool calcScore, class Mask>
__global__ void calcKeypoints(const PtrStepSzb img, const Mask mask, short2* kpLoc, const unsigned int maxKeypoints, PtrStepi score, const int threshold)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
const int j = threadIdx.x + blockIdx.x * blockDim.x + 3;
const int i = threadIdx.y + blockIdx.y * blockDim.y + 3;
if (i < img.rows - 3 && j < img.cols - 3 && mask(i, j))
{
int v;
uint C[4] = {0,0,0,0};
C[2] |= static_cast<uint>(img(i - 3, j - 1)) << 8;
C[2] |= static_cast<uint>(img(i - 3, j));
C[1] |= static_cast<uint>(img(i - 3, j + 1)) << (3 * 8);
C[2] |= static_cast<uint>(img(i - 2, j - 2)) << (2 * 8);
C[1] |= static_cast<uint>(img(i - 2, j + 2)) << (2 * 8);
C[2] |= static_cast<uint>(img(i - 1, j - 3)) << (3 * 8);
C[1] |= static_cast<uint>(img(i - 1, j + 3)) << 8;
C[3] |= static_cast<uint>(img(i, j - 3));
v = static_cast<int>(img(i, j));
C[1] |= static_cast<uint>(img(i, j + 3));
int d1 = diffType(v, C[1] & 0xff, threshold);
int d2 = diffType(v, C[3] & 0xff, threshold);
if ((d1 | d2) == 0)
return;
C[3] |= static_cast<uint>(img(i + 1, j - 3)) << 8;
C[0] |= static_cast<uint>(img(i + 1, j + 3)) << (3 * 8);
C[3] |= static_cast<uint>(img(i + 2, j - 2)) << (2 * 8);
C[0] |= static_cast<uint>(img(i + 2, j + 2)) << (2 * 8);
C[3] |= static_cast<uint>(img(i + 3, j - 1)) << (3 * 8);
C[0] |= static_cast<uint>(img(i + 3, j));
C[0] |= static_cast<uint>(img(i + 3, j + 1)) << 8;
int mask1 = 0;
int mask2 = 0;
calcMask(C, v, threshold, mask1, mask2);
if (isKeyPoint(mask1, mask2))
{
if (calcScore) score(i, j) = cornerScore(C, v, threshold);
const unsigned int ind = atomicInc(&g_counter, (unsigned int)(-1));
if (ind < maxKeypoints)
kpLoc[ind] = make_short2(j, i);
}
}
#endif
}
int calcKeypoints_gpu(PtrStepSzb img, PtrStepSzb mask, short2* kpLoc, int maxKeypoints, PtrStepSzi score, int threshold)
{
void* counter_ptr;
cudaSafeCall( hipGetSymbolAddress(&counter_ptr, g_counter) );
dim3 block(32, 8);
dim3 grid;
grid.x = divUp(img.cols - 6, block.x);
grid.y = divUp(img.rows - 6, block.y);
cudaSafeCall( hipMemset(counter_ptr, 0, sizeof(unsigned int)) );
if (score.data)
{
if (mask.data)
hipLaunchKernelGGL(( calcKeypoints<true>), dim3(grid), dim3(block), 0, 0, img, SingleMask(mask), kpLoc, maxKeypoints, score, threshold);
else
hipLaunchKernelGGL(( calcKeypoints<true>), dim3(grid), dim3(block), 0, 0, img, WithOutMask(), kpLoc, maxKeypoints, score, threshold);
}
else
{
if (mask.data)
hipLaunchKernelGGL(( calcKeypoints<false>), dim3(grid), dim3(block), 0, 0, img, SingleMask(mask), kpLoc, maxKeypoints, score, threshold);
else
hipLaunchKernelGGL(( calcKeypoints<false>), dim3(grid), dim3(block), 0, 0, img, WithOutMask(), kpLoc, maxKeypoints, score, threshold);
}
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
unsigned int count;
cudaSafeCall( hipMemcpy(&count, counter_ptr, sizeof(unsigned int), hipMemcpyDeviceToHost) );
return count;
}
///////////////////////////////////////////////////////////////////////////
// nonmaxSuppression
__global__ void nonmaxSuppression(const short2* kpLoc, int count, const PtrStepSzi scoreMat, short2* locFinal, float* responseFinal)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
const int kpIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (kpIdx < count)
{
short2 loc = kpLoc[kpIdx];
int score = scoreMat(loc.y, loc.x);
bool ismax =
score > scoreMat(loc.y - 1, loc.x - 1) &&
score > scoreMat(loc.y - 1, loc.x ) &&
score > scoreMat(loc.y - 1, loc.x + 1) &&
score > scoreMat(loc.y , loc.x - 1) &&
score > scoreMat(loc.y , loc.x + 1) &&
score > scoreMat(loc.y + 1, loc.x - 1) &&
score > scoreMat(loc.y + 1, loc.x ) &&
score > scoreMat(loc.y + 1, loc.x + 1);
if (ismax)
{
const unsigned int ind = atomicInc(&g_counter, (unsigned int)(-1));
locFinal[ind] = loc;
responseFinal[ind] = static_cast<float>(score);
}
}
#endif
}
int nonmaxSuppression_gpu(const short2* kpLoc, int count, PtrStepSzi score, short2* loc, float* response)
{
void* counter_ptr;
cudaSafeCall( hipGetSymbolAddress(&counter_ptr, g_counter) );
dim3 block(256);
dim3 grid;
grid.x = divUp(count, block.x);
cudaSafeCall( hipMemset(counter_ptr, 0, sizeof(unsigned int)) );
hipLaunchKernelGGL(( nonmaxSuppression), dim3(grid), dim3(block), 0, 0, kpLoc, count, score, loc, response);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
unsigned int new_count;
cudaSafeCall( hipMemcpy(&new_count, counter_ptr, sizeof(unsigned int), hipMemcpyDeviceToHost) );
return new_count;
}
} // namespace fast
}}}
#endif /* CUDA_DISABLER */
| 6facead1c98aaf42af9a676f39aab5db14ecb082.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/utility.hpp"
namespace cv { namespace cuda { namespace device
{
namespace fast
{
__device__ unsigned int g_counter = 0;
///////////////////////////////////////////////////////////////////////////
// calcKeypoints
__constant__ uchar c_table[] = { 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0xc0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> x - th <= v <= x + th
__device__ __forceinline__ int diffType(const int v, const int x, const int th)
{
const int diff = x - v;
return static_cast<int>(diff < -th) + (static_cast<int>(diff > th) << 1);
}
__device__ void calcMask(const uint C[4], const int v, const int th, int& mask1, int& mask2)
{
mask1 = 0;
mask2 = 0;
int d1, d2;
d1 = diffType(v, C[0] & 0xff, th);
d2 = diffType(v, C[2] & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 0;
mask2 |= ((d1 & 2) >> 1) << 0;
mask1 |= (d2 & 1) << 8;
mask2 |= ((d2 & 2) >> 1) << 8;
d1 = diffType(v, C[1] & 0xff, th);
d2 = diffType(v, C[3] & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 4;
mask2 |= ((d1 & 2) >> 1) << 4;
mask1 |= (d2 & 1) << 12;
mask2 |= ((d2 & 2) >> 1) << 12;
d1 = diffType(v, (C[0] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 2;
mask2 |= ((d1 & 2) >> 1) << 2;
mask1 |= (d2 & 1) << 10;
mask2 |= ((d2 & 2) >> 1) << 10;
d1 = diffType(v, (C[1] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 6;
mask2 |= ((d1 & 2) >> 1) << 6;
mask1 |= (d2 & 1) << 14;
mask2 |= ((d2 & 2) >> 1) << 14;
d1 = diffType(v, (C[0] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 1;
mask2 |= ((d1 & 2) >> 1) << 1;
mask1 |= (d2 & 1) << 9;
mask2 |= ((d2 & 2) >> 1) << 9;
d1 = diffType(v, (C[0] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (3 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 3;
mask2 |= ((d1 & 2) >> 1) << 3;
mask1 |= (d2 & 1) << 11;
mask2 |= ((d2 & 2) >> 1) << 11;
d1 = diffType(v, (C[1] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 5;
mask2 |= ((d1 & 2) >> 1) << 5;
mask1 |= (d2 & 1) << 13;
mask2 |= ((d2 & 2) >> 1) << 13;
d1 = diffType(v, (C[1] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (3 * 8)) & 0xff, th);
mask1 |= (d1 & 1) << 7;
mask2 |= ((d1 & 2) >> 1) << 7;
mask1 |= (d2 & 1) << 15;
mask2 |= ((d2 & 2) >> 1) << 15;
}
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> not a keypoint
__device__ __forceinline__ bool isKeyPoint(int mask1, int mask2)
{
return (__popc(mask1) > 8 && (c_table[(mask1 >> 3) - 63] & (1 << (mask1 & 7)))) ||
(__popc(mask2) > 8 && (c_table[(mask2 >> 3) - 63] & (1 << (mask2 & 7))));
}
__device__ int cornerScore(const uint C[4], const int v, const int threshold)
{
// binary search in [threshold + 1, 255]
int min = threshold + 1;
int max = 255;
while (min <= max)
{
const int mid = (min + max) >> 1;
int mask1 = 0;
int mask2 = 0;
calcMask(C, v, mid, mask1, mask2);
int isKp = static_cast<int>(isKeyPoint(mask1, mask2));
min = isKp * (mid + 1) + (isKp ^ 1) * min;
max = (isKp ^ 1) * (mid - 1) + isKp * max;
}
return min - 1;
}
template <bool calcScore, class Mask>
__global__ void calcKeypoints(const PtrStepSzb img, const Mask mask, short2* kpLoc, const unsigned int maxKeypoints, PtrStepi score, const int threshold)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
const int j = threadIdx.x + blockIdx.x * blockDim.x + 3;
const int i = threadIdx.y + blockIdx.y * blockDim.y + 3;
if (i < img.rows - 3 && j < img.cols - 3 && mask(i, j))
{
int v;
uint C[4] = {0,0,0,0};
C[2] |= static_cast<uint>(img(i - 3, j - 1)) << 8;
C[2] |= static_cast<uint>(img(i - 3, j));
C[1] |= static_cast<uint>(img(i - 3, j + 1)) << (3 * 8);
C[2] |= static_cast<uint>(img(i - 2, j - 2)) << (2 * 8);
C[1] |= static_cast<uint>(img(i - 2, j + 2)) << (2 * 8);
C[2] |= static_cast<uint>(img(i - 1, j - 3)) << (3 * 8);
C[1] |= static_cast<uint>(img(i - 1, j + 3)) << 8;
C[3] |= static_cast<uint>(img(i, j - 3));
v = static_cast<int>(img(i, j));
C[1] |= static_cast<uint>(img(i, j + 3));
int d1 = diffType(v, C[1] & 0xff, threshold);
int d2 = diffType(v, C[3] & 0xff, threshold);
if ((d1 | d2) == 0)
return;
C[3] |= static_cast<uint>(img(i + 1, j - 3)) << 8;
C[0] |= static_cast<uint>(img(i + 1, j + 3)) << (3 * 8);
C[3] |= static_cast<uint>(img(i + 2, j - 2)) << (2 * 8);
C[0] |= static_cast<uint>(img(i + 2, j + 2)) << (2 * 8);
C[3] |= static_cast<uint>(img(i + 3, j - 1)) << (3 * 8);
C[0] |= static_cast<uint>(img(i + 3, j));
C[0] |= static_cast<uint>(img(i + 3, j + 1)) << 8;
int mask1 = 0;
int mask2 = 0;
calcMask(C, v, threshold, mask1, mask2);
if (isKeyPoint(mask1, mask2))
{
if (calcScore) score(i, j) = cornerScore(C, v, threshold);
const unsigned int ind = atomicInc(&g_counter, (unsigned int)(-1));
if (ind < maxKeypoints)
kpLoc[ind] = make_short2(j, i);
}
}
#endif
}
int calcKeypoints_gpu(PtrStepSzb img, PtrStepSzb mask, short2* kpLoc, int maxKeypoints, PtrStepSzi score, int threshold)
{
void* counter_ptr;
cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, g_counter) );
dim3 block(32, 8);
dim3 grid;
grid.x = divUp(img.cols - 6, block.x);
grid.y = divUp(img.rows - 6, block.y);
cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(unsigned int)) );
if (score.data)
{
if (mask.data)
calcKeypoints<true><<<grid, block>>>(img, SingleMask(mask), kpLoc, maxKeypoints, score, threshold);
else
calcKeypoints<true><<<grid, block>>>(img, WithOutMask(), kpLoc, maxKeypoints, score, threshold);
}
else
{
if (mask.data)
calcKeypoints<false><<<grid, block>>>(img, SingleMask(mask), kpLoc, maxKeypoints, score, threshold);
else
calcKeypoints<false><<<grid, block>>>(img, WithOutMask(), kpLoc, maxKeypoints, score, threshold);
}
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
unsigned int count;
cudaSafeCall( cudaMemcpy(&count, counter_ptr, sizeof(unsigned int), cudaMemcpyDeviceToHost) );
return count;
}
///////////////////////////////////////////////////////////////////////////
// nonmaxSuppression
__global__ void nonmaxSuppression(const short2* kpLoc, int count, const PtrStepSzi scoreMat, short2* locFinal, float* responseFinal)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110)
const int kpIdx = threadIdx.x + blockIdx.x * blockDim.x;
if (kpIdx < count)
{
short2 loc = kpLoc[kpIdx];
int score = scoreMat(loc.y, loc.x);
bool ismax =
score > scoreMat(loc.y - 1, loc.x - 1) &&
score > scoreMat(loc.y - 1, loc.x ) &&
score > scoreMat(loc.y - 1, loc.x + 1) &&
score > scoreMat(loc.y , loc.x - 1) &&
score > scoreMat(loc.y , loc.x + 1) &&
score > scoreMat(loc.y + 1, loc.x - 1) &&
score > scoreMat(loc.y + 1, loc.x ) &&
score > scoreMat(loc.y + 1, loc.x + 1);
if (ismax)
{
const unsigned int ind = atomicInc(&g_counter, (unsigned int)(-1));
locFinal[ind] = loc;
responseFinal[ind] = static_cast<float>(score);
}
}
#endif
}
int nonmaxSuppression_gpu(const short2* kpLoc, int count, PtrStepSzi score, short2* loc, float* response)
{
void* counter_ptr;
cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, g_counter) );
dim3 block(256);
dim3 grid;
grid.x = divUp(count, block.x);
cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(unsigned int)) );
nonmaxSuppression<<<grid, block>>>(kpLoc, count, score, loc, response);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
unsigned int new_count;
cudaSafeCall( cudaMemcpy(&new_count, counter_ptr, sizeof(unsigned int), cudaMemcpyDeviceToHost) );
return new_count;
}
} // namespace fast
}}}
#endif /* CUDA_DISABLER */
|
67178768f7a491f4e08a4a92d13e3b2d90e739a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S3_13.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, mapping_device, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
check_cuda_error(hipFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(hipFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6249845555192,0.00127452925982209,0.781098854878912,0.780945505139612,0.000173127258213963,0.485680542843999,0.00292844596868805,0.999998366997264,1.91530092199862e-08,1.87681747950326e-05,0.999774940058991,1.00702552778216,0.999994275830871,4.68103990785171e-05,0.397558769683448,10.1166549211387,139.567494437918};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.2937045632517,0.000331567271096668,0.000125123240971326,0.000319780240937142,0.230930142679641,0.142554278260413,0.156333434028122,4.66122435867929,0.0134411682726080,1.80597248717533,1099.57883152268,0.000468845160350493,0.251300465852520,0.0155860481845978,0.00288945677711972,6.05964606931935e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| 67178768f7a491f4e08a4a92d13e3b2d90e739a3.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S3_13.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, mapping_device, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
check_cuda_error(cudaFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(cudaFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6249845555192,0.00127452925982209,0.781098854878912,0.780945505139612,0.000173127258213963,0.485680542843999,0.00292844596868805,0.999998366997264,1.91530092199862e-08,1.87681747950326e-05,0.999774940058991,1.00702552778216,0.999994275830871,4.68103990785171e-05,0.397558769683448,10.1166549211387,139.567494437918};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.2937045632517,0.000331567271096668,0.000125123240971326,0.000319780240937142,0.230930142679641,0.142554278260413,0.156333434028122,4.66122435867929,0.0134411682726080,1.80597248717533,1099.57883152268,0.000468845160350493,0.251300465852520,0.0155860481845978,0.00288945677711972,6.05964606931935e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
f6705aade6d403f1bcab04799a974ea490e4f0cf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "rgb2grayKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *imgr = NULL;
hipMalloc(&imgr, XSIZE*YSIZE);
unsigned char *imgg = NULL;
hipMalloc(&imgg, XSIZE*YSIZE);
unsigned char *imgb = NULL;
hipMalloc(&imgb, XSIZE*YSIZE);
unsigned char *img_gray = NULL;
hipMalloc(&img_gray, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
rgb2grayKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, imgr,imgg,imgb,img_gray,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
rgb2grayKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, imgr,imgg,imgb,img_gray,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
rgb2grayKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, imgr,imgg,imgb,img_gray,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f6705aade6d403f1bcab04799a974ea490e4f0cf.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "rgb2grayKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *imgr = NULL;
cudaMalloc(&imgr, XSIZE*YSIZE);
unsigned char *imgg = NULL;
cudaMalloc(&imgg, XSIZE*YSIZE);
unsigned char *imgb = NULL;
cudaMalloc(&imgb, XSIZE*YSIZE);
unsigned char *img_gray = NULL;
cudaMalloc(&img_gray, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
rgb2grayKernel<<<gridBlock,threadBlock>>>(imgr,imgg,imgb,img_gray,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
rgb2grayKernel<<<gridBlock,threadBlock>>>(imgr,imgg,imgb,img_gray,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
rgb2grayKernel<<<gridBlock,threadBlock>>>(imgr,imgg,imgb,img_gray,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
617dddc58b7174797761b4c588254d4eed89ba72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "K_Means.h"
__device__ double calculate_distance_between_positions_gpu(Position* p1, Position* p2)
{
double euclid_distance, x, y, z;
x = p1->x - p2->x;
y = p1->y - p2->y;
z = p1->z - p2->z;
euclid_distance = sqrt(x*x + y*y + z*z);
return euclid_distance;
}
__global__ void assign_points_to_clusters_gpu(Point* dev_points, int num_of_points, Cluster* dev_clusters, int num_of_clusters, int* dev_point_moved_flag)
{
double current_distance, min_distance;
int min_cluster_id, index, i;
index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < num_of_points)
{
min_distance = calculate_distance_between_positions_gpu(&(dev_points[index].position), &(dev_clusters[0].center));
min_cluster_id = dev_clusters[0].id;
for (i = 1; i < num_of_clusters; i++)
{
current_distance = calculate_distance_between_positions_gpu(&(dev_points[index].position), &(dev_clusters[i].center));
if (current_distance < min_distance)
{
min_distance = current_distance;
min_cluster_id = dev_clusters[i].id;
}
}
//point moved to another cluster
if (dev_points[index].cluster_id != min_cluster_id)
{
dev_points[index].cluster_id = min_cluster_id;
*dev_point_moved_flag = 1;
}
}
}
__global__ void update_points_position_gpu(Point* dev_points, int num_of_points, double time)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < num_of_points)
{
dev_points[index].position.x = dev_points[index].intial_position.x + time*dev_points[index].velocity.vx;
dev_points[index].position.y = dev_points[index].intial_position.y + time*dev_points[index].velocity.vy;
dev_points[index].position.z = dev_points[index].intial_position.z + time*dev_points[index].velocity.vz;
}
}
void init_cuda(Point** dev_points, int num_of_points, Cluster** dev_clusters, int num_of_clusters, int** dev_point_moved_flag)
{
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(DEVICE_ID);
handle_errors(cudaStatus, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n", 0);
//allocates memory for each process on the GPU
*dev_points = allocate_points_to_device(num_of_points);
*dev_clusters = allocate_clusters_to_device(num_of_clusters);
*dev_point_moved_flag = allocate_point_moved_flag_to_device();
}
int* allocate_point_moved_flag_to_device()
{
hipError_t cudaStatus;
int* dev_point_moved_flag = 0;
// Allocate GPU buffer for int
cudaStatus = hipMalloc((void**)&dev_point_moved_flag, sizeof(int));
handle_errors(cudaStatus, "hipMalloc failed!\n", 1, dev_point_moved_flag);
return dev_point_moved_flag;
}
Cluster* allocate_clusters_to_device(int num_of_clusters)
{
hipError_t cudaStatus;
Cluster* dev_clusters = 0;
// Allocate GPU buffer for points
cudaStatus = hipMalloc((void**)&dev_clusters, num_of_clusters * sizeof(Cluster));
handle_errors(cudaStatus, "hipMalloc failed!\n", 1, dev_clusters);
return dev_clusters;
}
Point* allocate_points_to_device(int num_of_points)
{
hipError_t cudaStatus;
Point* dev_points = NULL;
// Allocate GPU buffer for points
cudaStatus = hipMalloc((void**)&dev_points, num_of_points * sizeof(Point));
handle_errors(cudaStatus, "hipMalloc failed!\n", 1, dev_points);
return dev_points;
}
void update_points_position_by_time(Point* dev_points, Point* points, int num_of_points, double time)
{
int threads_per_block, num_of_blocks;
hipError_t cudaStatus;
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, DEVICE_ID);
threads_per_block = prop.maxThreadsPerBlock;
num_of_blocks = (num_of_points + threads_per_block - 1) / threads_per_block;
// Copy input points array from host memory to GPU buffer
cudaStatus = hipMemcpy(dev_points, points, num_of_points * sizeof(Point), hipMemcpyHostToDevice);
handle_errors(cudaStatus, "hipMemcpy host to device failed!\n", 1, dev_points);
// Launch a kernel on the GPU with blocks & threads
update_points_position_gpu << <num_of_blocks, threads_per_block >> >(dev_points, num_of_points, time);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
handle_errors(cudaStatus, hipGetErrorString(cudaStatus), 1, dev_points);
// hipDeviceSynchronize waits for the kernel to finish
cudaStatus = hipDeviceSynchronize();
handle_errors(cudaStatus, "hipDeviceSynchronize returned error code after launching Kernel - update_points_position!\n", 1, dev_points);
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(points, dev_points, num_of_points * sizeof(Point), hipMemcpyDeviceToHost);
handle_errors(cudaStatus, "hipMemcpy device to host failed!\n", 1, dev_points);
}
void assign_points_to_clusters_gpu(Point* points, Point* dev_points, int num_of_points, Cluster* clusters, Cluster* dev_clusters, int num_of_clusters, int* point_moved_flag, int* dev_point_moved_flag)
{
int threads_per_block, num_of_blocks;
hipError_t cudaStatus;
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, DEVICE_ID);
threads_per_block = prop.maxThreadsPerBlock;
num_of_blocks = (num_of_points + threads_per_block - 1) / threads_per_block;
// Copy input points array from host memory to GPU buffer
cudaStatus = hipMemcpy(dev_points, points, num_of_points * sizeof(Point), hipMemcpyHostToDevice);
handle_errors(cudaStatus, "hipMemcpy host to device failed!\n", 3, dev_points, dev_clusters, dev_point_moved_flag);
// Copy input points array from host memory to GPU buffer
cudaStatus = hipMemcpy(dev_clusters, clusters, num_of_clusters * sizeof(Cluster), hipMemcpyHostToDevice);
handle_errors(cudaStatus, "hipMemcpy host to device failed!\n", 3, dev_points, dev_clusters, dev_point_moved_flag);
// Copy input points array from host memory to GPU buffer
cudaStatus = hipMemcpy(dev_point_moved_flag, point_moved_flag, sizeof(int), hipMemcpyHostToDevice);
handle_errors(cudaStatus, "hipMemcpy host to device failed!\n", 3, dev_points, dev_clusters, dev_point_moved_flag);
// Launch a kernel on the GPU with blocks & threads
assign_points_to_clusters_gpu << <num_of_blocks, threads_per_block >> >(dev_points, num_of_points, dev_clusters, num_of_clusters, dev_point_moved_flag);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
handle_errors(cudaStatus, hipGetErrorString(cudaStatus), 3, dev_points, dev_clusters, dev_point_moved_flag);
// hipDeviceSynchronize waits for the kernel to finish
cudaStatus = hipDeviceSynchronize();
handle_errors(cudaStatus, "hipDeviceSynchronize returned error code after launching Kernel - assign_points_to_clusters!\n", 3, dev_points, dev_clusters, dev_point_moved_flag);
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(points, dev_points, num_of_points * sizeof(Point), hipMemcpyDeviceToHost);
handle_errors(cudaStatus, "hipMemcpy device to host failed!\n", 3, dev_points, dev_clusters, dev_point_moved_flag);
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(clusters, dev_clusters, num_of_clusters * sizeof(Cluster), hipMemcpyDeviceToHost);
handle_errors(cudaStatus, "hipMemcpy device to host failed!\n", 3, dev_points, dev_clusters, dev_point_moved_flag);
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(point_moved_flag, dev_point_moved_flag, sizeof(int), hipMemcpyDeviceToHost);
handle_errors(cudaStatus, "hipMemcpy device to host failed!\n", 3, dev_points, dev_clusters, dev_point_moved_flag);
}
void end_cuda(Point* dev_my_points, Cluster* dev_clusters, int* dev_point_moved_flag)
{
hipError_t cudaStatus;
//free all cuda allocations
free_cuda_allocations(3, dev_my_points, dev_clusters, dev_point_moved_flag);
//reset device
cudaStatus = hipDeviceReset();
handle_errors(cudaStatus, "hipDeviceReset failed!", 0);
}
void handle_errors(hipError_t cudaStatus, const char* error, int count, ...)
{
//function get cuda status , and all the cuda allocations.
//cuda status will tell us if theres an error, if so we free the allocations
va_list allocs;
va_start(allocs, count);
int i;
if (cudaStatus != hipSuccess)
{
//free cuda allocations
for (i = 0; i < count; i++)
hipFree(va_arg(allocs, void*));
va_end(allocs);
//print the error and abort
printf("Cuda Error: %s", error);
fflush(stdout);
MPI_Abort(MPI_COMM_WORLD, 0);
}
}
void free_cuda_allocations(int count, ...)
{
//function get the number of allocation and the pointers
va_list allocs;
int i;
va_start(allocs, count);
//free each allocation
for (i = 0; i < count; i++)
{
hipFree(va_arg(allocs, void*));
}
va_end(allocs);
}
| 617dddc58b7174797761b4c588254d4eed89ba72.cu | #include "K_Means.h"
__device__ double calculate_distance_between_positions_gpu(Position* p1, Position* p2)
{
double euclid_distance, x, y, z;
x = p1->x - p2->x;
y = p1->y - p2->y;
z = p1->z - p2->z;
euclid_distance = sqrt(x*x + y*y + z*z);
return euclid_distance;
}
__global__ void assign_points_to_clusters_gpu(Point* dev_points, int num_of_points, Cluster* dev_clusters, int num_of_clusters, int* dev_point_moved_flag)
{
double current_distance, min_distance;
int min_cluster_id, index, i;
index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < num_of_points)
{
min_distance = calculate_distance_between_positions_gpu(&(dev_points[index].position), &(dev_clusters[0].center));
min_cluster_id = dev_clusters[0].id;
for (i = 1; i < num_of_clusters; i++)
{
current_distance = calculate_distance_between_positions_gpu(&(dev_points[index].position), &(dev_clusters[i].center));
if (current_distance < min_distance)
{
min_distance = current_distance;
min_cluster_id = dev_clusters[i].id;
}
}
//point moved to another cluster
if (dev_points[index].cluster_id != min_cluster_id)
{
dev_points[index].cluster_id = min_cluster_id;
*dev_point_moved_flag = 1;
}
}
}
__global__ void update_points_position_gpu(Point* dev_points, int num_of_points, double time)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < num_of_points)
{
dev_points[index].position.x = dev_points[index].intial_position.x + time*dev_points[index].velocity.vx;
dev_points[index].position.y = dev_points[index].intial_position.y + time*dev_points[index].velocity.vy;
dev_points[index].position.z = dev_points[index].intial_position.z + time*dev_points[index].velocity.vz;
}
}
void init_cuda(Point** dev_points, int num_of_points, Cluster** dev_clusters, int num_of_clusters, int** dev_point_moved_flag)
{
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(DEVICE_ID);
handle_errors(cudaStatus, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n", 0);
//allocates memory for each process on the GPU
*dev_points = allocate_points_to_device(num_of_points);
*dev_clusters = allocate_clusters_to_device(num_of_clusters);
*dev_point_moved_flag = allocate_point_moved_flag_to_device();
}
int* allocate_point_moved_flag_to_device()
{
cudaError_t cudaStatus;
int* dev_point_moved_flag = 0;
// Allocate GPU buffer for int
cudaStatus = cudaMalloc((void**)&dev_point_moved_flag, sizeof(int));
handle_errors(cudaStatus, "cudaMalloc failed!\n", 1, dev_point_moved_flag);
return dev_point_moved_flag;
}
Cluster* allocate_clusters_to_device(int num_of_clusters)
{
cudaError_t cudaStatus;
Cluster* dev_clusters = 0;
// Allocate GPU buffer for points
cudaStatus = cudaMalloc((void**)&dev_clusters, num_of_clusters * sizeof(Cluster));
handle_errors(cudaStatus, "cudaMalloc failed!\n", 1, dev_clusters);
return dev_clusters;
}
Point* allocate_points_to_device(int num_of_points)
{
cudaError_t cudaStatus;
Point* dev_points = NULL;
// Allocate GPU buffer for points
cudaStatus = cudaMalloc((void**)&dev_points, num_of_points * sizeof(Point));
handle_errors(cudaStatus, "cudaMalloc failed!\n", 1, dev_points);
return dev_points;
}
void update_points_position_by_time(Point* dev_points, Point* points, int num_of_points, double time)
{
int threads_per_block, num_of_blocks;
cudaError_t cudaStatus;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, DEVICE_ID);
threads_per_block = prop.maxThreadsPerBlock;
num_of_blocks = (num_of_points + threads_per_block - 1) / threads_per_block;
// Copy input points array from host memory to GPU buffer
cudaStatus = cudaMemcpy(dev_points, points, num_of_points * sizeof(Point), cudaMemcpyHostToDevice);
handle_errors(cudaStatus, "cudaMemcpy host to device failed!\n", 1, dev_points);
// Launch a kernel on the GPU with blocks & threads
update_points_position_gpu << <num_of_blocks, threads_per_block >> >(dev_points, num_of_points, time);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
handle_errors(cudaStatus, cudaGetErrorString(cudaStatus), 1, dev_points);
// cudaDeviceSynchronize waits for the kernel to finish
cudaStatus = cudaDeviceSynchronize();
handle_errors(cudaStatus, "cudaDeviceSynchronize returned error code after launching Kernel - update_points_position!\n", 1, dev_points);
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(points, dev_points, num_of_points * sizeof(Point), cudaMemcpyDeviceToHost);
handle_errors(cudaStatus, "cudaMemcpy device to host failed!\n", 1, dev_points);
}
void assign_points_to_clusters_gpu(Point* points, Point* dev_points, int num_of_points, Cluster* clusters, Cluster* dev_clusters, int num_of_clusters, int* point_moved_flag, int* dev_point_moved_flag)
{
int threads_per_block, num_of_blocks;
cudaError_t cudaStatus;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, DEVICE_ID);
threads_per_block = prop.maxThreadsPerBlock;
num_of_blocks = (num_of_points + threads_per_block - 1) / threads_per_block;
// Copy input points array from host memory to GPU buffer
cudaStatus = cudaMemcpy(dev_points, points, num_of_points * sizeof(Point), cudaMemcpyHostToDevice);
handle_errors(cudaStatus, "cudaMemcpy host to device failed!\n", 3, dev_points, dev_clusters, dev_point_moved_flag);
// Copy input points array from host memory to GPU buffer
cudaStatus = cudaMemcpy(dev_clusters, clusters, num_of_clusters * sizeof(Cluster), cudaMemcpyHostToDevice);
handle_errors(cudaStatus, "cudaMemcpy host to device failed!\n", 3, dev_points, dev_clusters, dev_point_moved_flag);
// Copy input points array from host memory to GPU buffer
cudaStatus = cudaMemcpy(dev_point_moved_flag, point_moved_flag, sizeof(int), cudaMemcpyHostToDevice);
handle_errors(cudaStatus, "cudaMemcpy host to device failed!\n", 3, dev_points, dev_clusters, dev_point_moved_flag);
// Launch a kernel on the GPU with blocks & threads
assign_points_to_clusters_gpu << <num_of_blocks, threads_per_block >> >(dev_points, num_of_points, dev_clusters, num_of_clusters, dev_point_moved_flag);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
handle_errors(cudaStatus, cudaGetErrorString(cudaStatus), 3, dev_points, dev_clusters, dev_point_moved_flag);
// cudaDeviceSynchronize waits for the kernel to finish
cudaStatus = cudaDeviceSynchronize();
handle_errors(cudaStatus, "cudaDeviceSynchronize returned error code after launching Kernel - assign_points_to_clusters!\n", 3, dev_points, dev_clusters, dev_point_moved_flag);
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(points, dev_points, num_of_points * sizeof(Point), cudaMemcpyDeviceToHost);
handle_errors(cudaStatus, "cudaMemcpy device to host failed!\n", 3, dev_points, dev_clusters, dev_point_moved_flag);
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(clusters, dev_clusters, num_of_clusters * sizeof(Cluster), cudaMemcpyDeviceToHost);
handle_errors(cudaStatus, "cudaMemcpy device to host failed!\n", 3, dev_points, dev_clusters, dev_point_moved_flag);
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(point_moved_flag, dev_point_moved_flag, sizeof(int), cudaMemcpyDeviceToHost);
handle_errors(cudaStatus, "cudaMemcpy device to host failed!\n", 3, dev_points, dev_clusters, dev_point_moved_flag);
}
void end_cuda(Point* dev_my_points, Cluster* dev_clusters, int* dev_point_moved_flag)
{
cudaError_t cudaStatus;
//free all cuda allocations
free_cuda_allocations(3, dev_my_points, dev_clusters, dev_point_moved_flag);
//reset device
cudaStatus = cudaDeviceReset();
handle_errors(cudaStatus, "cudaDeviceReset failed!", 0);
}
void handle_errors(cudaError_t cudaStatus, const char* error, int count, ...)
{
//function get cuda status , and all the cuda allocations.
//cuda status will tell us if theres an error, if so we free the allocations
va_list allocs;
va_start(allocs, count);
int i;
if (cudaStatus != cudaSuccess)
{
//free cuda allocations
for (i = 0; i < count; i++)
cudaFree(va_arg(allocs, void*));
va_end(allocs);
//print the error and abort
printf("Cuda Error: %s", error);
fflush(stdout);
MPI_Abort(MPI_COMM_WORLD, 0);
}
}
void free_cuda_allocations(int count, ...)
{
//function get the number of allocation and the pointers
va_list allocs;
int i;
va_start(allocs, count);
//free each allocation
for (i = 0; i < count; i++)
{
cudaFree(va_arg(allocs, void*));
}
va_end(allocs);
}
|
47f7f35ec1b16fa382f9458f08731a4349eda28e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Open sourced multi-head attention
**/
#include "fastertransformer/allocator.h"
#include "fastertransformer/cuda/multi_head_attention.h"
#include "fastertransformer/cuda/open_attention.h"
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <cmath>
namespace fastertransformer{
namespace cuda{
/**
* Multi-head attetion open sourced
*/
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : -1e20f;
val = warpReduceMax(val);
return val;
}
__inline__ __device__
int target_index(int id1, int id2, int id3, int id4, int dim_1, int dim_2, int dim_3, int dim_4)
{
return id1 * (dim_2 * dim_3 * dim_4) + id3 * (dim_2 * dim_4) + id2 * dim_4 + id4;
}
template<typename T>
__global__
void add_QKV_bias(T* Q, const T* bias_Q, T* K, const T* bias_K, T* V, const T* bias_V, T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int word_per_block)
{
T* data_ptr;
T* buf_ptr;
const T* bias_ptr;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int qkv_id = blockIdx.x * word_per_block / m;
int row_offset = (blockIdx.x * word_per_block % m) * n;
if(qkv_id == 0)
{
data_ptr = Q + row_offset;
buf_ptr = q_buf_;
bias_ptr = bias_Q;
}
else if(qkv_id == 1)
{
data_ptr = K + row_offset;
buf_ptr = k_buf_;
bias_ptr = bias_K;
}
else
{
data_ptr = V + row_offset;
buf_ptr = v_buf_;
bias_ptr = bias_V;
}
int batch_id = (blockIdx.x * word_per_block % m) / seq_len;
int head_id = threadIdx.x / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x * word_per_block) % seq_len;
T bias = __ldg(&bias_ptr[threadIdx.x]);
for(int i = word_start_id; i < word_start_id + word_per_block; ++i)
{
T tmp = data_ptr[threadIdx.x] + bias;
int target_id = batch_id * (seq_len * head_num * size_per_head) + head_id * seq_len * size_per_head +
i * size_per_head + id_in_head;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
}
template <>
__global__
void add_QKV_bias(__half* Q, const __half* bias_Q, __half* K, const __half* bias_K, __half* V, const __half* bias_V,
__half* q_buf_, __half* k_buf_, __half* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int word_per_block)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = tid / (head_num * seq_len * size_per_head);
int seq_id = (tid % (head_num * seq_len * size_per_head)) / (head_num * size_per_head);
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id, batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x;
half2* src_ptr = (half2*)Q;
half2* dst_ptr = (half2*)q_buf_;
const half2* bias_ptr = (const half2*)bias_Q;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)K;
dst_ptr = (half2*)k_buf_;
bias_ptr = (const half2*)bias_K;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)V;
dst_ptr = (half2*)v_buf_;
bias_ptr = (const half2*)bias_V;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
}
template <typename T>
__global__
void softmax_kernel(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num, const int seq_len,
const T scaler)
{
int batch_id = blockIdx.x / head_num;
int qk_offset = blockIdx.x * seq_len * seq_len;
int mask_offset = batch_id * seq_len * seq_len;
__shared__ float s_sum, s_max;
for(int i = 0; i < seq_len; ++i)
{
float qk = threadIdx.x < seq_len ? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scaler + mask_val): -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
qk = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk / s_sum);
qk_offset += seq_len;
mask_offset += seq_len;
}
}
template <typename T>
__global__
void softmax_kernel_v2(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num,
const int seq_len, const float scaler)
{
int batch_id = blockIdx.x / head_num / seq_len;
int seq_id = blockIdx.x % seq_len;
int qk_offset = blockIdx.x * seq_len;
int mask_offset = batch_id * seq_len * seq_len + seq_id * seq_len;
__shared__ float s_sum, s_max;
float qk = threadIdx.x < seq_len ? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scaler + mask_val) : -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
float qk_tmp = threadIdx.x < seq_len ? __expf((float)(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum);
}
template<typename T>
__global__
void transpose(T* src, T* dst, const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len))/ seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) + seq_id * head_num * size_per_head
+ head_id * size_per_head + threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
template<>
__global__
void transpose(__half* src, __half* dst,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = tid / (head_num * seq_len * size_per_head);
int head_id = (tid % (head_num * seq_len * size_per_head)) / (seq_len * size_per_head);
int seq_id = (tid % (seq_len * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, head_id, seq_id, id, batch_size, head_num, seq_len, size_per_head);
half2* src_ptr = (half2*)src;
half2* dst_ptr = (half2*)dst;
dst_ptr[target_id] = src_ptr[tid];
}
template<OperationType OpType_>
void OpenMultiHeadAttention<OpType_>::multiHeadAttr_nofuse_kernelLauncher(
hipStream_t stream,
hipblasHandle_t cublas_handle,
DataType_* Q,
const DataType_* bias_Q,
DataType_* K,
const DataType_* bias_K,
DataType_* V,
const DataType_* bias_V,
const DataType_* attr_mask,
DataType_* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const DataType_ scaler)
{
int m = batch_size * seq_len;
int k = head_num * size_per_head;
dim3 grid;
dim3 block;
if(OpType_ == OperationType::FP32)
{
// const int word_per_block = 32;
const int word_per_block = 1;
assert(k > 1024);
assert(m / word_per_block * 3 > 65536);
dim3 grid(m / word_per_block * 3);
dim3 block(k);
hipLaunchKernelGGL(( add_QKV_bias<DataType_>), dim3(grid), dim3(block), 0, stream, Q, bias_Q, K, bias_K, V, bias_V, q_buf_, k_buf_, v_buf_,
batch_size, seq_len, head_num, size_per_head, word_per_block);
}
else
{
const int word_per_block = 1;
grid.x = batch_size * seq_len / word_per_block;
block.x = head_num * size_per_head * word_per_block / 2;
assert(block.x);
hipLaunchKernelGGL(( add_QKV_bias<DataType_>), dim3(grid), dim3(block), 0, stream, Q, bias_Q, K, bias_K, V, bias_V, q_buf_, k_buf_,
v_buf_, batch_size, seq_len, head_num, size_per_head / 2, word_per_block);
}
DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f;
check_cuda_error(hipblasGemmStridedBatchedEx(cublas_handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
seq_len, seq_len, size_per_head,
&alpha,
k_buf_, AType_, size_per_head, seq_len * size_per_head,
q_buf_, BType_, size_per_head, seq_len * size_per_head,
&beta,
qk_buf_, CType_, seq_len, seq_len * seq_len,
batch_size * head_num,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[1])));
if(seq_len <= 32)
block.x = 32;
else if(seq_len > 32 && seq_len <= 64)
block.x = 64;
else if(seq_len > 64 && seq_len <= 128)
block.x = 128;
else if(seq_len > 128 && seq_len <= 256)
block.x = 256;
else if(seq_len > 256 && seq_len <= 512)
block.x = 512;
else
block.x = 1024;
if(batch_size * head_num <= 120)
{
grid.x = batch_size * head_num * seq_len;
hipLaunchKernelGGL(( softmax_kernel_v2<DataType_>), dim3(grid), dim3(block), 0, stream, qk_buf_, attr_mask, batch_size, head_num, seq_len, scaler);
}
else
{
grid.x = batch_size * head_num;
hipLaunchKernelGGL(( softmax_kernel<DataType_>), dim3(grid), dim3(block), 0, stream, qk_buf_, attr_mask, batch_size, head_num, seq_len, scaler);
}
check_cuda_error(hipblasGemmStridedBatchedEx(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
size_per_head, seq_len, seq_len,
&alpha,
v_buf_, AType_, size_per_head, seq_len * size_per_head,
qk_buf_, BType_, seq_len, seq_len * seq_len,
&beta,
transpose_dst_, CType_, size_per_head, seq_len * size_per_head,
batch_size * head_num,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[2])));
/* for half2 only */
if(OpType_ == OperationType::HALF)
{
const int seq_per_block = 4;
// const int seq_per_block = 1;
grid.x = batch_size * head_num * seq_len / seq_per_block;
block.x = seq_per_block * size_per_head / 2;
assert(grid.x * seq_per_block != batch_size * head_num * seq_len);
hipLaunchKernelGGL(( transpose<DataType_>), dim3(grid), dim3(block), 0, stream, transpose_dst_, dst,
batch_size, seq_len, head_num, size_per_head / 2);
}
else
{
const int seq_per_block = 1;
grid.x = batch_size * head_num * seq_len / seq_per_block;
block.x = seq_per_block * size_per_head;
hipLaunchKernelGGL(( transpose<DataType_>), dim3(grid), dim3(block), 0, stream, transpose_dst_, dst,
batch_size, seq_len, head_num, size_per_head);
}
}
template void OpenMultiHeadAttention<OperationType::FP32>::multiHeadAttr_nofuse_kernelLauncher(
hipStream_t stream,
hipblasHandle_t handle,
float* Q,
const float* bias_Q,
float* K,
const float* bias_K,
float* V,
const float* bias_V,
const float* attr_mask,
float* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const float scaler);
template void OpenMultiHeadAttention<OperationType::HALF>::multiHeadAttr_nofuse_kernelLauncher(
hipStream_t stream,
hipblasHandle_t handle,
__half* Q,
const __half* bias_Q,
__half* K,
const __half* bias_K,
__half* V,
const __half* bias_V,
const __half* attr_mask,
__half* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const __half scaler);
}//namespace cuda
}//namespace fastertransformer
| 47f7f35ec1b16fa382f9458f08731a4349eda28e.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Open sourced multi-head attention
**/
#include "fastertransformer/allocator.h"
#include "fastertransformer/cuda/multi_head_attention.h"
#include "fastertransformer/cuda/open_attention.h"
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cmath>
namespace fastertransformer{
namespace cuda{
/**
* Multi-head attetion open sourced
*/
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : -1e20f;
val = warpReduceMax(val);
return val;
}
__inline__ __device__
int target_index(int id1, int id2, int id3, int id4, int dim_1, int dim_2, int dim_3, int dim_4)
{
return id1 * (dim_2 * dim_3 * dim_4) + id3 * (dim_2 * dim_4) + id2 * dim_4 + id4;
}
template<typename T>
__global__
void add_QKV_bias(T* Q, const T* bias_Q, T* K, const T* bias_K, T* V, const T* bias_V, T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int word_per_block)
{
T* data_ptr;
T* buf_ptr;
const T* bias_ptr;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int qkv_id = blockIdx.x * word_per_block / m;
int row_offset = (blockIdx.x * word_per_block % m) * n;
if(qkv_id == 0)
{
data_ptr = Q + row_offset;
buf_ptr = q_buf_;
bias_ptr = bias_Q;
}
else if(qkv_id == 1)
{
data_ptr = K + row_offset;
buf_ptr = k_buf_;
bias_ptr = bias_K;
}
else
{
data_ptr = V + row_offset;
buf_ptr = v_buf_;
bias_ptr = bias_V;
}
int batch_id = (blockIdx.x * word_per_block % m) / seq_len;
int head_id = threadIdx.x / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x * word_per_block) % seq_len;
T bias = __ldg(&bias_ptr[threadIdx.x]);
for(int i = word_start_id; i < word_start_id + word_per_block; ++i)
{
T tmp = data_ptr[threadIdx.x] + bias;
int target_id = batch_id * (seq_len * head_num * size_per_head) + head_id * seq_len * size_per_head +
i * size_per_head + id_in_head;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
}
template <>
__global__
void add_QKV_bias(__half* Q, const __half* bias_Q, __half* K, const __half* bias_K, __half* V, const __half* bias_V,
__half* q_buf_, __half* k_buf_, __half* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int word_per_block)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = tid / (head_num * seq_len * size_per_head);
int seq_id = (tid % (head_num * seq_len * size_per_head)) / (head_num * size_per_head);
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id, batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x;
half2* src_ptr = (half2*)Q;
half2* dst_ptr = (half2*)q_buf_;
const half2* bias_ptr = (const half2*)bias_Q;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)K;
dst_ptr = (half2*)k_buf_;
bias_ptr = (const half2*)bias_K;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)V;
dst_ptr = (half2*)v_buf_;
bias_ptr = (const half2*)bias_V;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
}
template <typename T>
__global__
void softmax_kernel(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num, const int seq_len,
const T scaler)
{
int batch_id = blockIdx.x / head_num;
int qk_offset = blockIdx.x * seq_len * seq_len;
int mask_offset = batch_id * seq_len * seq_len;
__shared__ float s_sum, s_max;
for(int i = 0; i < seq_len; ++i)
{
float qk = threadIdx.x < seq_len ? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scaler + mask_val): -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
qk = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk / s_sum);
qk_offset += seq_len;
mask_offset += seq_len;
}
}
template <typename T>
__global__
void softmax_kernel_v2(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num,
const int seq_len, const float scaler)
{
int batch_id = blockIdx.x / head_num / seq_len;
int seq_id = blockIdx.x % seq_len;
int qk_offset = blockIdx.x * seq_len;
int mask_offset = batch_id * seq_len * seq_len + seq_id * seq_len;
__shared__ float s_sum, s_max;
float qk = threadIdx.x < seq_len ? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scaler + mask_val) : -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
float qk_tmp = threadIdx.x < seq_len ? __expf((float)(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum);
}
template<typename T>
__global__
void transpose(T* src, T* dst, const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len))/ seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) + seq_id * head_num * size_per_head
+ head_id * size_per_head + threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
template<>
__global__
void transpose(__half* src, __half* dst,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = tid / (head_num * seq_len * size_per_head);
int head_id = (tid % (head_num * seq_len * size_per_head)) / (seq_len * size_per_head);
int seq_id = (tid % (seq_len * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, head_id, seq_id, id, batch_size, head_num, seq_len, size_per_head);
half2* src_ptr = (half2*)src;
half2* dst_ptr = (half2*)dst;
dst_ptr[target_id] = src_ptr[tid];
}
template<OperationType OpType_>
void OpenMultiHeadAttention<OpType_>::multiHeadAttr_nofuse_kernelLauncher(
cudaStream_t stream,
cublasHandle_t cublas_handle,
DataType_* Q,
const DataType_* bias_Q,
DataType_* K,
const DataType_* bias_K,
DataType_* V,
const DataType_* bias_V,
const DataType_* attr_mask,
DataType_* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const DataType_ scaler)
{
int m = batch_size * seq_len;
int k = head_num * size_per_head;
dim3 grid;
dim3 block;
if(OpType_ == OperationType::FP32)
{
// const int word_per_block = 32;
const int word_per_block = 1;
assert(k > 1024);
assert(m / word_per_block * 3 > 65536);
dim3 grid(m / word_per_block * 3);
dim3 block(k);
add_QKV_bias<DataType_><<<grid, block, 0, stream>>>(Q, bias_Q, K, bias_K, V, bias_V, q_buf_, k_buf_, v_buf_,
batch_size, seq_len, head_num, size_per_head, word_per_block);
}
else
{
const int word_per_block = 1;
grid.x = batch_size * seq_len / word_per_block;
block.x = head_num * size_per_head * word_per_block / 2;
assert(block.x);
add_QKV_bias<DataType_><<<grid, block, 0, stream>>>(Q, bias_Q, K, bias_K, V, bias_V, q_buf_, k_buf_,
v_buf_, batch_size, seq_len, head_num, size_per_head / 2, word_per_block);
}
DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f;
check_cuda_error(cublasGemmStridedBatchedEx(cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
seq_len, seq_len, size_per_head,
&alpha,
k_buf_, AType_, size_per_head, seq_len * size_per_head,
q_buf_, BType_, size_per_head, seq_len * size_per_head,
&beta,
qk_buf_, CType_, seq_len, seq_len * seq_len,
batch_size * head_num,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[1])));
if(seq_len <= 32)
block.x = 32;
else if(seq_len > 32 && seq_len <= 64)
block.x = 64;
else if(seq_len > 64 && seq_len <= 128)
block.x = 128;
else if(seq_len > 128 && seq_len <= 256)
block.x = 256;
else if(seq_len > 256 && seq_len <= 512)
block.x = 512;
else
block.x = 1024;
if(batch_size * head_num <= 120)
{
grid.x = batch_size * head_num * seq_len;
softmax_kernel_v2<DataType_><<<grid, block, 0, stream>>>(qk_buf_, attr_mask, batch_size, head_num, seq_len, scaler);
}
else
{
grid.x = batch_size * head_num;
softmax_kernel<DataType_><<<grid, block, 0, stream>>>(qk_buf_, attr_mask, batch_size, head_num, seq_len, scaler);
}
check_cuda_error(cublasGemmStridedBatchedEx(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
size_per_head, seq_len, seq_len,
&alpha,
v_buf_, AType_, size_per_head, seq_len * size_per_head,
qk_buf_, BType_, seq_len, seq_len * seq_len,
&beta,
transpose_dst_, CType_, size_per_head, seq_len * size_per_head,
batch_size * head_num,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[2])));
/* for half2 only */
if(OpType_ == OperationType::HALF)
{
const int seq_per_block = 4;
// const int seq_per_block = 1;
grid.x = batch_size * head_num * seq_len / seq_per_block;
block.x = seq_per_block * size_per_head / 2;
assert(grid.x * seq_per_block != batch_size * head_num * seq_len);
transpose<DataType_><<<grid, block, 0, stream>>>(transpose_dst_, dst,
batch_size, seq_len, head_num, size_per_head / 2);
}
else
{
const int seq_per_block = 1;
grid.x = batch_size * head_num * seq_len / seq_per_block;
block.x = seq_per_block * size_per_head;
transpose<DataType_><<<grid, block, 0, stream>>>(transpose_dst_, dst,
batch_size, seq_len, head_num, size_per_head);
}
}
template void OpenMultiHeadAttention<OperationType::FP32>::multiHeadAttr_nofuse_kernelLauncher(
cudaStream_t stream,
cublasHandle_t handle,
float* Q,
const float* bias_Q,
float* K,
const float* bias_K,
float* V,
const float* bias_V,
const float* attr_mask,
float* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const float scaler);
template void OpenMultiHeadAttention<OperationType::HALF>::multiHeadAttr_nofuse_kernelLauncher(
cudaStream_t stream,
cublasHandle_t handle,
__half* Q,
const __half* bias_Q,
__half* K,
const __half* bias_K,
__half* V,
const __half* bias_V,
const __half* attr_mask,
__half* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const __half scaler);
}//namespace cuda
}//namespace fastertransformer
|
3f681b01c01927d8021a7033af8c0eb406af695f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <locale>
#include <stdio.h>
#define BLOCK_SIZE 16
hipError_t MulMatrixCuda(float* mul_matrix, float* mul_matrix2, float* matrix1, float * matrix2, int n);
void print_matrix(float* mtx, int n);
__global__ void mtxMult(float *C, float *A, float *B, int n)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float sum = 0.0;
int ia = n * BLOCK_SIZE * by + n * ty; // A[i,0] - ( )
int ib = BLOCK_SIZE * bx + tx; // B[0,j] - ( )
for (int k = 0; k < n; k++) //
{
sum += A[ia + k] * B[ib + k * n];
}
int ic = n * BLOCK_SIZE*by + BLOCK_SIZE * bx; //
C[ic + n * ty + tx] = sum; //
}
__global__ void mtxMult2(float *C, float *A, float *B, int n)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = n * BLOCK_SIZE * by;
int aEnd = aBegin + n - 1;
int bBegin = BLOCK_SIZE * bx;
int aStep = BLOCK_SIZE;
int bStep = BLOCK_SIZE * n;
float sum = 0.0f;
for (int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep)
{
__shared__ float as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float bs[BLOCK_SIZE][BLOCK_SIZE];
as[ty][tx] = A[ia + n * ty + tx];
bs[ty][tx] = B[ib + n * ty + tx];
__syncthreads(); // ( )
for (int k = 0; k < BLOCK_SIZE; k++)
sum += as[ty][k] * bs[k][tx];
__syncthreads(); //
}
C[n * BLOCK_SIZE * by + BLOCK_SIZE * bx + n * ty + tx] = sum;
}
int main()
{
setlocale(LC_ALL, "Russian");
const int k = 400;
const int n = k * BLOCK_SIZE; // , BLOCK_SIZE
/*float matrix1[n*n] = { 0 };
float matrix2[n*n] = { 0 };
float mul_matrix[n*n] = { 0 };
float mul_matrix2[n*n] = { 0 };*/
float* matrix1;
matrix1 = new float[n*n];
float * matrix2;
matrix2 = new float[n*n];
float * mul_matrix;
mul_matrix = new float[n*n];
float * mul_matrix2;
mul_matrix2 = new float[n*n];
//
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
matrix1[n * i + j] = i * 10 + j;
}
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
matrix2[n *i + j] = i + 10 * j; // (i == j) ? 1 : 0;
}
}
//
print_matrix(matrix1, n);
print_matrix(matrix2, n);
printf("\n\n");
hipError_t cudaStatus = MulMatrixCuda(mul_matrix, mul_matrix2, matrix1, matrix2, n);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
//
print_matrix(mul_matrix, n);
print_matrix(mul_matrix2, n);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t MulMatrixCuda(float* mul_matrix, float* mul_matrix2, float* matrix1, float * matrix2, int n)
{
int numBytes = n * n * sizeof(float);
float *dev_matrix1 = 0;
float *dev_matrix2 = 0;
float *dev_mul_matrix = 0;
float *dev_mul_matrix2 = 0;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
#pragma region DRAM
cudaStatus = hipMalloc((void**)&dev_mul_matrix, numBytes);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_mul_matrix2, numBytes);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_matrix1, numBytes);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_matrix2, numBytes);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
#pragma endregion // : 1-, 2-
#pragma region CPU DRAM
cudaStatus = hipMemcpy(dev_matrix1, matrix1, numBytes, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_matrix2, matrix2, numBytes, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
#pragma endregion //
dim3 blocks(n / BLOCK_SIZE, n / BLOCK_SIZE);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
// GPU
hipEvent_t start, stop; //
float elapsedTimeInMs = 0;
hipEventCreate(&start); //
hipEventCreate(&stop); //
hipEventRecord(start, 0); //
//
mtxMult << <blocks, threads >> > (dev_mul_matrix, dev_matrix1, dev_matrix2, n);
hipEventRecord(stop, 0); //
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimeInMs, start, stop);
printf(" 1- GPU: %.8f \n\n", elapsedTimeInMs);
hipEventCreate(&start); //
hipEventCreate(&stop); //
hipEventRecord(start, 0); //
mtxMult2 << <blocks, threads >> > (dev_mul_matrix2, dev_matrix1, dev_matrix2, n);
hipEventRecord(stop, 0); //
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTimeInMs, start, stop);
printf(" 2- GPU: %.8f \n\n", elapsedTimeInMs);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(mul_matrix, dev_mul_matrix, numBytes, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(mul_matrix2, dev_mul_matrix2, numBytes, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_mul_matrix);
hipFree(dev_mul_matrix2);
hipFree(dev_matrix1);
hipFree(dev_matrix2);
return cudaStatus;
}
void print_matrix(float* mtx, int n)
{
return;
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
float m = mtx[n * i + j];
printf("%6g|", m);
}
printf("\n");
}
printf("\n");
} | 3f681b01c01927d8021a7033af8c0eb406af695f.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <locale>
#include <stdio.h>
#define BLOCK_SIZE 16
cudaError_t MulMatrixCuda(float* mul_matrix, float* mul_matrix2, float* matrix1, float * matrix2, int n);
void print_matrix(float* mtx, int n);
__global__ void mtxMult(float *C, float *A, float *B, int n)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float sum = 0.0;
int ia = n * BLOCK_SIZE * by + n * ty; // A[i,0] - индекс строки (первого элемента в строке)
int ib = BLOCK_SIZE * bx + tx; // B[0,j] - индекс столбца (первого элемента столбца)
for (int k = 0; k < n; k++) // вычисление элемента
{
sum += A[ia + k] * B[ib + k * n];
}
int ic = n * BLOCK_SIZE*by + BLOCK_SIZE * bx; // Номер начала столбца в блоке результата
C[ic + n * ty + tx] = sum; // запоминаем разультат
}
__global__ void mtxMult2(float *C, float *A, float *B, int n)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = n * BLOCK_SIZE * by;
int aEnd = aBegin + n - 1;
int bBegin = BLOCK_SIZE * bx;
int aStep = BLOCK_SIZE;
int bStep = BLOCK_SIZE * n;
float sum = 0.0f;
for (int ia = aBegin, ib = bBegin; ia <= aEnd; ia += aStep, ib += bStep)
{
__shared__ float as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float bs[BLOCK_SIZE][BLOCK_SIZE];
as[ty][tx] = A[ia + n * ty + tx];
bs[ty][tx] = B[ib + n * ty + tx];
__syncthreads(); // должно синхронизировать (подматрицы полностью загружены)
for (int k = 0; k < BLOCK_SIZE; k++)
sum += as[ty][k] * bs[k][tx];
__syncthreads(); // подматрицы больше не нужны
}
C[n * BLOCK_SIZE * by + BLOCK_SIZE * bx + n * ty + tx] = sum;
}
int main()
{
setlocale(LC_ALL, "Russian");
const int k = 400;
const int n = k * BLOCK_SIZE; // размерность матрицы, кратная BLOCK_SIZE
/*float matrix1[n*n] = { 0 };
float matrix2[n*n] = { 0 };
float mul_matrix[n*n] = { 0 };
float mul_matrix2[n*n] = { 0 };*/
float* matrix1;
matrix1 = new float[n*n];
float * matrix2;
matrix2 = new float[n*n];
float * mul_matrix;
mul_matrix = new float[n*n];
float * mul_matrix2;
mul_matrix2 = new float[n*n];
// инициализация матриц
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
matrix1[n * i + j] = i * 10 + j;
}
}
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
matrix2[n *i + j] = i + 10 * j; // (i == j) ? 1 : 0; для единичной матрицы
}
}
// Вывод матрицы на консоль
print_matrix(matrix1, n);
print_matrix(matrix2, n);
printf("\n\n");
cudaError_t cudaStatus = MulMatrixCuda(mul_matrix, mul_matrix2, matrix1, matrix2, n);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// Вывод матрицы на консоль
print_matrix(mul_matrix, n);
print_matrix(mul_matrix2, n);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t MulMatrixCuda(float* mul_matrix, float* mul_matrix2, float* matrix1, float * matrix2, int n)
{
int numBytes = n * n * sizeof(float);
float *dev_matrix1 = 0;
float *dev_matrix2 = 0;
float *dev_mul_matrix = 0;
float *dev_mul_matrix2 = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
#pragma region Выделение памяти в DRAM
cudaStatus = cudaMalloc((void**)&dev_mul_matrix, numBytes);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_mul_matrix2, numBytes);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_matrix1, numBytes);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_matrix2, numBytes);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
#pragma endregion // для трёх матриц: 1-ой, 2-ой и результирующей
#pragma region Копирование данных из CPU в DRAM
cudaStatus = cudaMemcpy(dev_matrix1, matrix1, numBytes, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_matrix2, matrix2, numBytes, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
#pragma endregion // для двух начальных матриц
dim3 blocks(n / BLOCK_SIZE, n / BLOCK_SIZE);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
//Таймер GPU
cudaEvent_t start, stop; // объявление переменных
float elapsedTimeInMs = 0;
cudaEventCreate(&start); // инициализация
cudaEventCreate(&stop); // инициализация
cudaEventRecord(start, 0); // запуск таймера
// Запуск ядра
mtxMult << <blocks, threads >> > (dev_mul_matrix, dev_matrix1, dev_matrix2, n);
cudaEventRecord(stop, 0); // остановка времени
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimeInMs, start, stop);
printf("Затраченное время 1-го метода GPU: %.8f мс\n\n", elapsedTimeInMs);
cudaEventCreate(&start); // инициализация
cudaEventCreate(&stop); // инициализация
cudaEventRecord(start, 0); // запуск таймера
mtxMult2 << <blocks, threads >> > (dev_mul_matrix2, dev_matrix1, dev_matrix2, n);
cudaEventRecord(stop, 0); // остановка времени
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimeInMs, start, stop);
printf("Затраченное время 2-го метода GPU: %.8f мс\n\n", elapsedTimeInMs);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(mul_matrix, dev_mul_matrix, numBytes, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(mul_matrix2, dev_mul_matrix2, numBytes, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_mul_matrix);
cudaFree(dev_mul_matrix2);
cudaFree(dev_matrix1);
cudaFree(dev_matrix2);
return cudaStatus;
}
void print_matrix(float* mtx, int n)
{
return;
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
float m = mtx[n * i + j];
printf("%6g|", m);
}
printf("\n");
}
printf("\n");
} |
69d0425fb3c8615a1534c6947de1d67836e4c57d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include "common.h"
template <typename Dtype>
__device__ void maxpool_bubblesort(Dtype *a, Dtype *b, int n) {
int i, j;
Dtype temp;
for (i = 1; i < n; i++) {
for (j = 0; j < n- 1; j++) {
if (a[j] > a[j + 1]) {
temp = a[j];
a[j] = a[j + 1];
a[j + 1] = temp;
temp = b[j];
b[j] = b[j + 1];
b[j + 1] = temp;
}
}
}
}
// kernels borrowed from Caffe
template <typename Dtype, bool SORT>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* bottom_mu, const Dtype* bottom_var, Dtype* top_mu, Dtype* top_var,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
// assume 3x3 maxpool at most
Dtype mu_sorted[9];
Dtype var_sorted[9];
bottom_mu += (n * channels + c) * height * width;
bottom_var += (n * channels + c) * height * width;
int cnt = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
mu_sorted[cnt] = bottom_mu[h * width + w];
var_sorted[cnt] = bottom_var[h * width + w];
cnt++;
}
}
// ascending order
if (SORT)
maxpool_bubblesort(mu_sorted, var_sorted, cnt);
Dtype mu1 = mu_sorted[0];
Dtype var1 = fmaxf(var_sorted[0], 1e-20);
for(int k = 1; k < cnt; k++) {
Dtype mu2 = mu_sorted[k];
Dtype var2 = fmaxf(var_sorted[k], 1e-20);
Dtype theta = sqrtf(var1 + var2);
Dtype alpha12 = fdividef(mu1 - mu2, theta);
Dtype cdf12 = normcdff( alpha12); // 0.5*(1.+erf( alpha12/sqrt(2.)));
Dtype cdf21 = normcdff(-alpha12); // 0.5*(1.+erf(-alpha12/sqrt(2.)));
Dtype pdf12 = fdividef(expf(fdividef(-1.*alpha12*alpha12, 2.)), sqrtf(2.*M_PI));
Dtype t_mu = mu1*cdf12 + mu2*cdf21 + theta*pdf12;
Dtype t_var = fmaxf((var1+mu1*mu1)*cdf12 + (var2+mu2*mu2)*cdf21 + (mu1+mu2)*theta*pdf12 - t_mu*t_mu, 1e-20);
mu1 = t_mu;
var1 = t_var;
}
top_mu[index] = mu1;
top_var[index] = var1;
}
}
static int stcunn_StochasticSpatialMaxPooling_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input_mu = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *input_var = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int padW = luaT_getfieldcheckint(L, 1, "padW");
int padH = luaT_getfieldcheckint(L, 1, "padH");
bool sort = luaT_getfieldcheckboolean(L, 1, "sort");
bool ceil_mode = luaT_getfieldcheckboolean(L, 1, "ceil_mode");
THCudaTensor *output_mu = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "mu", "torch.CudaTensor");
THCudaTensor *output_var = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "var", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 4, input_mu, input_var, output_mu, output_var));
luaL_argcheck(L, input_mu->nDimension == 3 || input_mu->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
long nInputCols, nInputRows, nInputPlane, batchSize;
long nOutputCols, nOutputRows;
if (input_mu->nDimension == 3) {
nInputCols = input_mu->size[2];
nInputRows = input_mu->size[1];
nInputPlane = input_mu->size[0];
batchSize = 1;
}
else
{
nInputCols = input_mu->size[3];
nInputRows = input_mu->size[2];
nInputPlane = input_mu->size[1];
batchSize = input_mu->size[0];
}
THArgCheck(nInputCols >= kW - padW && nInputRows >= kH - padH, 2, "input image smaller than kernel size");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2, "pad should be smaller than half of kernel size");
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input_mu = THCudaTensor_newContiguous(state, input_mu);
input_var = THCudaTensor_newContiguous(state, input_var);
float* input_mu_data = THCudaTensor_data(state, input_mu);
float* input_var_data = THCudaTensor_data(state, input_var);
THCudaTensor_resize4d(state, output_mu, batchSize, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_resize4d(state, output_var, batchSize, nInputPlane, nOutputRows, nOutputCols);
float* output_mu_data = THCudaTensor_data(state, output_mu);
float* output_var_data = THCudaTensor_data(state, output_var);
int count = THCudaTensor_nElement(state, output_mu);
if(sort)
hipLaunchKernelGGL(( MaxPoolForward<float, true>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count, input_mu_data, input_var_data, output_mu_data, output_var_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW);
else
hipLaunchKernelGGL(( MaxPoolForward<float, false>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) ,
count, input_mu_data, input_var_data, output_mu_data, output_var_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW);
if(input_mu->nDimension == 3) {
THCudaTensor_resize3d(state, output_mu, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_resize3d(state, output_var, nInputPlane, nOutputRows, nOutputCols);
}
THCudaTensor_free(state, input_mu);
THCudaTensor_free(state, input_var);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in SpatialMaxPooling.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 2;
}
static const struct luaL_Reg stcunn_StochasticSpatialMaxPooling__ [] = {
{"StochasticSpatialMaxPooling_updateOutput", stcunn_StochasticSpatialMaxPooling_updateOutput},
{NULL, NULL}
};
static void stcunn_StochasticSpatialMaxPooling_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, stcunn_StochasticSpatialMaxPooling__, "nn");
lua_pop(L,1);
}
| 69d0425fb3c8615a1534c6947de1d67836e4c57d.cu | #include "utils.h"
#include "common.h"
template <typename Dtype>
__device__ void maxpool_bubblesort(Dtype *a, Dtype *b, int n) {
int i, j;
Dtype temp;
for (i = 1; i < n; i++) {
for (j = 0; j < n- 1; j++) {
if (a[j] > a[j + 1]) {
temp = a[j];
a[j] = a[j + 1];
a[j + 1] = temp;
temp = b[j];
b[j] = b[j + 1];
b[j + 1] = temp;
}
}
}
}
// kernels borrowed from Caffe
template <typename Dtype, bool SORT>
__global__ void MaxPoolForward(const int nthreads,
const Dtype* bottom_mu, const Dtype* bottom_var, Dtype* top_mu, Dtype* top_var,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
// assume 3x3 maxpool at most
Dtype mu_sorted[9];
Dtype var_sorted[9];
bottom_mu += (n * channels + c) * height * width;
bottom_var += (n * channels + c) * height * width;
int cnt = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
mu_sorted[cnt] = bottom_mu[h * width + w];
var_sorted[cnt] = bottom_var[h * width + w];
cnt++;
}
}
// ascending order
if (SORT)
maxpool_bubblesort(mu_sorted, var_sorted, cnt);
Dtype mu1 = mu_sorted[0];
Dtype var1 = fmaxf(var_sorted[0], 1e-20);
for(int k = 1; k < cnt; k++) {
Dtype mu2 = mu_sorted[k];
Dtype var2 = fmaxf(var_sorted[k], 1e-20);
Dtype theta = sqrtf(var1 + var2);
Dtype alpha12 = fdividef(mu1 - mu2, theta);
Dtype cdf12 = normcdff( alpha12); // 0.5*(1.+erf( alpha12/sqrt(2.)));
Dtype cdf21 = normcdff(-alpha12); // 0.5*(1.+erf(-alpha12/sqrt(2.)));
Dtype pdf12 = fdividef(expf(fdividef(-1.*alpha12*alpha12, 2.)), sqrtf(2.*M_PI));
Dtype t_mu = mu1*cdf12 + mu2*cdf21 + theta*pdf12;
Dtype t_var = fmaxf((var1+mu1*mu1)*cdf12 + (var2+mu2*mu2)*cdf21 + (mu1+mu2)*theta*pdf12 - t_mu*t_mu, 1e-20);
mu1 = t_mu;
var1 = t_var;
}
top_mu[index] = mu1;
top_var[index] = var1;
}
}
static int stcunn_StochasticSpatialMaxPooling_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input_mu = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *input_var = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int padW = luaT_getfieldcheckint(L, 1, "padW");
int padH = luaT_getfieldcheckint(L, 1, "padH");
bool sort = luaT_getfieldcheckboolean(L, 1, "sort");
bool ceil_mode = luaT_getfieldcheckboolean(L, 1, "ceil_mode");
THCudaTensor *output_mu = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "mu", "torch.CudaTensor");
THCudaTensor *output_var = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "var", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 4, input_mu, input_var, output_mu, output_var));
luaL_argcheck(L, input_mu->nDimension == 3 || input_mu->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
long nInputCols, nInputRows, nInputPlane, batchSize;
long nOutputCols, nOutputRows;
if (input_mu->nDimension == 3) {
nInputCols = input_mu->size[2];
nInputRows = input_mu->size[1];
nInputPlane = input_mu->size[0];
batchSize = 1;
}
else
{
nInputCols = input_mu->size[3];
nInputRows = input_mu->size[2];
nInputPlane = input_mu->size[1];
batchSize = input_mu->size[0];
}
THArgCheck(nInputCols >= kW - padW && nInputRows >= kH - padH, 2, "input image smaller than kernel size");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2, "pad should be smaller than half of kernel size");
if(ceil_mode) {
nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
else {
nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1;
nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1;
}
if (padW || padH)
{
// ensure that the last pooling starts inside the image
if ((nOutputRows - 1)*dH >= nInputRows + padH)
--nOutputRows;
if ((nOutputCols - 1)*dW >= nInputCols + padW)
--nOutputCols;
}
input_mu = THCudaTensor_newContiguous(state, input_mu);
input_var = THCudaTensor_newContiguous(state, input_var);
float* input_mu_data = THCudaTensor_data(state, input_mu);
float* input_var_data = THCudaTensor_data(state, input_var);
THCudaTensor_resize4d(state, output_mu, batchSize, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_resize4d(state, output_var, batchSize, nInputPlane, nOutputRows, nOutputCols);
float* output_mu_data = THCudaTensor_data(state, output_mu);
float* output_var_data = THCudaTensor_data(state, output_var);
int count = THCudaTensor_nElement(state, output_mu);
if(sort)
MaxPoolForward<float, true>
<<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>
(count, input_mu_data, input_var_data, output_mu_data, output_var_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW);
else
MaxPoolForward<float, false>
<<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>
(count, input_mu_data, input_var_data, output_mu_data, output_var_data,
batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols,
kH, kW, dH, dW, padH, padW);
if(input_mu->nDimension == 3) {
THCudaTensor_resize3d(state, output_mu, nInputPlane, nOutputRows, nOutputCols);
THCudaTensor_resize3d(state, output_var, nInputPlane, nOutputRows, nOutputCols);
}
THCudaTensor_free(state, input_mu);
THCudaTensor_free(state, input_var);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in SpatialMaxPooling.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 2;
}
static const struct luaL_Reg stcunn_StochasticSpatialMaxPooling__ [] = {
{"StochasticSpatialMaxPooling_updateOutput", stcunn_StochasticSpatialMaxPooling_updateOutput},
{NULL, NULL}
};
static void stcunn_StochasticSpatialMaxPooling_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, stcunn_StochasticSpatialMaxPooling__, "nn");
lua_pop(L,1);
}
|
6b2a264bdbc2c05d6433486154c09912840f2e5b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "add_reference_points_norm.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *array = NULL;
hipMalloc(&array, XSIZE*YSIZE);
int width = XSIZE;
int pitch = 2;
int height = YSIZE;
float *norm = NULL;
hipMalloc(&norm, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
add_reference_points_norm), dim3(gridBlock),dim3(threadBlock), 0, 0, array,width,pitch,height,norm);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
add_reference_points_norm), dim3(gridBlock),dim3(threadBlock), 0, 0, array,width,pitch,height,norm);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
add_reference_points_norm), dim3(gridBlock),dim3(threadBlock), 0, 0, array,width,pitch,height,norm);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6b2a264bdbc2c05d6433486154c09912840f2e5b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "add_reference_points_norm.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *array = NULL;
cudaMalloc(&array, XSIZE*YSIZE);
int width = XSIZE;
int pitch = 2;
int height = YSIZE;
float *norm = NULL;
cudaMalloc(&norm, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
add_reference_points_norm<<<gridBlock,threadBlock>>>(array,width,pitch,height,norm);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
add_reference_points_norm<<<gridBlock,threadBlock>>>(array,width,pitch,height,norm);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
add_reference_points_norm<<<gridBlock,threadBlock>>>(array,width,pitch,height,norm);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b6c37bd089b133bd06eba4ee540633c7c81ab346.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <climits>
#include <cstdint>
#include <cstring>
#include <array>
#include <chrono>
#include <iostream>
#include <random>
#include <set>
#include <ext/pb_ds/assoc_container.hpp>
#include <ext/pb_ds/tree_policy.hpp>
#include "helper_cuda.h"
#include <sys/time.h>
#include <hip/hip_cooperative_groups.h>
#include <unistd.h>
using namespace std;
using namespace __gnu_pbds;
#define MAX_WORKSIZE 128000
typedef
tree<
uint64_t,
null_type,
less<uint64_t>,
rb_tree_tag,
tree_order_statistics_node_update>
ordered_set_t;
namespace org {
namespace quarkchain {
__constant__ const uint32_t FNV_PRIME_32 = 0x01000193;
__constant__ const uint64_t FNV_PRIME_64 = 0x100000001b3ULL;
__constant__ const uint32_t ACCESS_ROUND = 64;
__constant__ const uint32_t INIT_SET_ENTRIES = 1024 * 64;
/* Keccak core function */
#define KECCAK_ROUNDS 24
#define ROT_01 36
#define ROT_02 3
#define ROT_03 41
#define ROT_04 18
#define ROT_05 1
#define ROT_06 44
#define ROT_07 10
#define ROT_08 45
#define ROT_09 2
#define ROT_10 62
#define ROT_11 6
#define ROT_12 43
#define ROT_13 15
#define ROT_14 61
#define ROT_15 28
#define ROT_16 55
#define ROT_17 25
#define ROT_18 21
#define ROT_19 56
#define ROT_20 27
#define ROT_21 20
#define ROT_22 39
#define ROT_23 8
#define ROT_24 14
__device__ __constant__ const uint64_t roundconstants[KECCAK_ROUNDS] = {
0x0000000000000001ULL,
0x0000000000008082ULL,
0x800000000000808aULL,
0x8000000080008000ULL,
0x000000000000808bULL,
0x0000000080000001ULL,
0x8000000080008081ULL,
0x8000000000008009ULL,
0x000000000000008aULL,
0x0000000000000088ULL,
0x0000000080008009ULL,
0x000000008000000aULL,
0x000000008000808bULL,
0x800000000000008bULL,
0x8000000000008089ULL,
0x8000000000008003ULL,
0x8000000000008002ULL,
0x8000000000000080ULL,
0x000000000000800aULL,
0x800000008000000aULL,
0x8000000080008081ULL,
0x8000000000008080ULL,
0x0000000080000001ULL,
0x8000000080008008ULL
};
#define ROL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
__device__ void keccak_function (uint64_t *state) {
short i;
/* Temporary variables to avoid indexing overhead */
uint64_t a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12;
uint64_t a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24;
uint64_t b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12;
uint64_t b13, b14, b15, b16, b17, b18, b19, b20, b21, b22, b23, b24;
uint64_t c0, c1, c2, c3, c4, d;
a0 = state[0];
a1 = state[1];
a2 = state[2];
a3 = state[3];
a4 = state[4];
a5 = state[5];
a6 = state[6];
a7 = state[7];
a8 = state[8];
a9 = state[9];
a10 = state[10];
a11 = state[11];
a12 = state[12];
a13 = state[13];
a14 = state[14];
a15 = state[15];
a16 = state[16];
a17 = state[17];
a18 = state[18];
a19 = state[19];
a20 = state[20];
a21 = state[21];
a22 = state[22];
a23 = state[23];
a24 = state[24];
for (i = 0; i < KECCAK_ROUNDS; ++i) {
/*
* Uses temporary variables and loop unrolling to
* avoid array indexing and inner loops overhead
* */
/* Prepare column parity for Theta step */
c0 = a0 ^ a5 ^ a10 ^ a15 ^ a20;
c1 = a1 ^ a6 ^ a11 ^ a16 ^ a21;
c2 = a2 ^ a7 ^ a12 ^ a17 ^ a22;
c3 = a3 ^ a8 ^ a13 ^ a18 ^ a23;
c4 = a4 ^ a9 ^ a14 ^ a19 ^ a24;
/* Theta + Rho + Pi steps */
d = c4 ^ ROL64(c1, 1);
b0 = d ^ a0;
b16 = ROL64(d ^ a5, ROT_01);
b7 = ROL64(d ^ a10, ROT_02);
b23 = ROL64(d ^ a15, ROT_03);
b14 = ROL64(d ^ a20, ROT_04);
d = c0 ^ ROL64(c2, 1);
b10 = ROL64(d ^ a1, ROT_05);
b1 = ROL64(d ^ a6, ROT_06);
b17 = ROL64(d ^ a11, ROT_07);
b8 = ROL64(d ^ a16, ROT_08);
b24 = ROL64(d ^ a21, ROT_09);
d = c1 ^ ROL64(c3, 1);
b20 = ROL64(d ^ a2, ROT_10);
b11 = ROL64(d ^ a7, ROT_11);
b2 = ROL64(d ^ a12, ROT_12);
b18 = ROL64(d ^ a17, ROT_13);
b9 = ROL64(d ^ a22, ROT_14);
d = c2 ^ ROL64(c4, 1);
b5 = ROL64(d ^ a3, ROT_15);
b21 = ROL64(d ^ a8, ROT_16);
b12 = ROL64(d ^ a13, ROT_17);
b3 = ROL64(d ^ a18, ROT_18);
b19 = ROL64(d ^ a23, ROT_19);
d = c3 ^ ROL64(c0, 1);
b15 = ROL64(d ^ a4, ROT_20);
b6 = ROL64(d ^ a9, ROT_21);
b22 = ROL64(d ^ a14, ROT_22);
b13 = ROL64(d ^ a19, ROT_23);
b4 = ROL64(d ^ a24, ROT_24);
/* Chi + Iota steps */
a0 = b0 ^ (~b1 & b2) ^ roundconstants[i];
a1 = b1 ^ (~b2 & b3);
a2 = b2 ^ (~b3 & b4);
a3 = b3 ^ (~b4 & b0);
a4 = b4 ^ (~b0 & b1);
a5 = b5 ^ (~b6 & b7);
a6 = b6 ^ (~b7 & b8);
a7 = b7 ^ (~b8 & b9);
a8 = b8 ^ (~b9 & b5);
a9 = b9 ^ (~b5 & b6);
a10 = b10 ^ (~b11 & b12);
a11 = b11 ^ (~b12 & b13);
a12 = b12 ^ (~b13 & b14);
a13 = b13 ^ (~b14 & b10);
a14 = b14 ^ (~b10 & b11);
a15 = b15 ^ (~b16 & b17);
a16 = b16 ^ (~b17 & b18);
a17 = b17 ^ (~b18 & b19);
a18 = b18 ^ (~b19 & b15);
a19 = b19 ^ (~b15 & b16);
a20 = b20 ^ (~b21 & b22);
a21 = b21 ^ (~b22 & b23);
a22 = b22 ^ (~b23 & b24);
a23 = b23 ^ (~b24 & b20);
a24 = b24 ^ (~b20 & b21);
}
state[0] = a0;
state[1] = a1;
state[2] = a2;
state[3] = a3;
state[4] = a4;
state[5] = a5;
state[6] = a6;
state[7] = a7;
state[8] = a8;
state[9] = a9;
state[10] = a10;
state[11] = a11;
state[12] = a12;
state[13] = a13;
state[14] = a14;
state[15] = a15;
state[16] = a16;
state[17] = a17;
state[18] = a18;
state[19] = a19;
state[20] = a20;
state[21] = a21;
state[22] = a22;
state[23] = a23;
state[24] = a24;
}
/*
* 32-bit FNV function
*/
__device__ __host__ uint32_t fnv32(uint32_t v1, uint32_t v2) {
return (v1 * FNV_PRIME_32) ^ v2;
}
/*
* 64-bit FNV function
*/
__device__ __host__ uint64_t fnv64(uint64_t v1, uint64_t v2) {
return (v1 * FNV_PRIME_64) ^ v2;
}
#define LEFT(x) (items+x->left_offset)
#define RIGHT(x) (items+x->right_offset)
#define HAS_LEFT(x) (x->size_left > 0)
#define HAS_RIGHT(x) (x->size_right > 0)
/*
#define LEFT(x) (x->left)
#define RIGHT(x) (x->right)
#define HAS_LEFT(x) (x->left)
#define HAS_RIGHT(x) (x->right)
*/
typedef struct cuoset {
uint64_t value;
/*struct cuoset *left;
struct cuoset *right;*/
uint16_t left_offset;
uint16_t right_offset;
uint8_t in_use;
uint8_t height;
uint16_t size_left;
uint16_t size_right;
} cuoset;
__device__ __host__ uint32_t cuoset_size(cuoset *s);
__device__ __host__ __forceinline__ uint64_t max(uint64_t a, uint64_t b) {
return (a>b) ? a : b;
}
__device__ __host__ __forceinline__ uint8_t height(cuoset *p) {
//if (p == NULL || p->in_use == 0) return 0;
return p->height;
}
__device__ __host__ cuoset *rightRotate(cuoset *y, cuoset *items) {
cuoset *x = LEFT(y);
cuoset *T2 = RIGHT(x);
/*RIGHT(x) = y;
LEFT(y) = T2;*/
x->right_offset = y-items;
y->left_offset = T2-items;
y->size_left = x->size_right;
x->size_right = y->size_left + y->size_right + 1;
uint8_t yhl = HAS_LEFT(y) ? height(LEFT(y)) : 0;
uint8_t yhr = HAS_RIGHT(y) ? height(RIGHT(y)) : 0;
y->height = max(yhl, yhr)+1;
uint8_t xhl = HAS_LEFT(x) ? height(LEFT(x)) : 0;
uint8_t xhr = HAS_RIGHT(x) ? height(RIGHT(x)) : 0;
x->height = max(xhl, xhr)+1;
return x;
}
__device__ __host__ cuoset *leftRotate(cuoset *x, cuoset *items) {
cuoset *y = RIGHT(x);
cuoset *T2 = LEFT(y);
/*LEFT(y) = x;
RIGHT(x) = T2;*/
y->left_offset = x-items;
x->right_offset = T2-items;
x->size_right = y->size_left;
y->size_left = x->size_left + x->size_right + 1;
uint8_t xhl = HAS_LEFT(x) ? height(LEFT(x)) : 0;
uint8_t xhr = HAS_RIGHT(x) ? height(RIGHT(x)) : 0;
x->height = max(xhl, xhr)+1;
uint8_t yhl = HAS_LEFT(y) ? height(LEFT(y)) : 0;
uint8_t yhr = HAS_RIGHT(y) ? height(RIGHT(y)) : 0;
y->height = max(yhl, yhr)+1;
return y;
}
__device__ __host__ __forceinline__ int8_t getBalance(cuoset *N, cuoset *items) {
if (N == NULL || N->in_use == 0) return 0;
uint8_t hl = HAS_LEFT(N) ? height(LEFT(N)) : 0;
uint8_t hr = HAS_RIGHT(N) ? height(RIGHT(N)) : 0;
return hl-hr;
}
cuoset *h_unused_item;
uint32_t h_oset_size;
__device__ cuoset *unused_item[MAX_WORKSIZE] = {};
__device__ uint32_t oset_size[MAX_WORKSIZE] = {};
__device__ uint32_t depth = 0;
__device__ __host__ cuoset *cuoset_insert(cuoset *node, uint64_t value, uint32_t gid, cuoset *items) {
if (node == NULL || node->in_use == 0) {
#ifdef __CUDA_ARCH__
cuoset *new_node = unused_item[gid];
#else
cuoset *new_node = h_unused_item;
#endif
new_node->value = value;
/*LEFT(new_node) = NULL;
RIGHT(new_node) = NULL;*/
new_node->in_use = 1;
new_node->height = 1;
new_node->size_left = 0;
new_node->size_right = 0;
#ifdef __CUDA_ARCH__
unused_item[gid] = NULL;
oset_size[gid]++;
#else
h_unused_item = NULL;
h_oset_size++;
#endif
return new_node;
}
if (value < node->value) {
//LEFT(node) = cuoset_insert(LEFT(node), value, gid, items);
if (HAS_LEFT(node)) {
node->left_offset = cuoset_insert(LEFT(node), value, gid, items) - items;
} else {
node->left_offset = cuoset_insert(NULL, value, gid, items) - items;
}
node->size_left++;
} else if (value > node->value) {
//RIGHT(node) = cuoset_insert(RIGHT(node), value, gid, items);
if (HAS_RIGHT(node)) {
node->right_offset = cuoset_insert(RIGHT(node), value, gid, items) - items;
} else {
node->right_offset = cuoset_insert(NULL, value, gid, items) - items;
}
node->size_right++;
} else {
// Keys equal, discard insert since values need to be unique
return node;
}
uint8_t hl = 0;
if (HAS_LEFT(node)) hl = height(LEFT(node));
uint8_t hr = 0;
if (HAS_RIGHT(node)) hr = height(RIGHT(node));
node->height = 1 + max(hl, hr);
int8_t balance = getBalance(node, items);
if (balance > 1) {
uint64_t lval = LEFT(node)->value;
// Left Left case
if (value < lval) {
return rightRotate(node, items);
}
// Left Right case
if (value > lval) {
//LEFT(node) = leftRotate(LEFT(node), items);
node->left_offset = leftRotate(LEFT(node), items) - items;
return rightRotate(node, items);
}
}
if (balance < -1) {
uint64_t rval = RIGHT(node)->value;
// Right Right case
if (value > rval) {
return leftRotate(node, items);
}
// Right Left case
if (value < rval) {
//RIGHT(node) = rightRotate(RIGHT(node), items);
node->right_offset = rightRotate(RIGHT(node), items) - items;
return leftRotate(node, items);
}
}
return node;
}
__device__ __host__ cuoset *minValueNode(cuoset *node, cuoset *items) {
cuoset *current = node;
while (HAS_LEFT(current)) {
current = LEFT(current);
}
return current;
}
__device__ __host__ cuoset *cuoset_erase(cuoset *root, cuoset *item, uint32_t gid, cuoset *items) {
if (root == NULL ||root->in_use == 0 ) return root;
if (item->value < root->value) {
//LEFT(root) = cuoset_erase(LEFT(root), item, gid, items);
root->left_offset = cuoset_erase(LEFT(root), item, gid, items) - items;
root->size_left--;
} else if (item->value > root->value) {
//RIGHT(root) = cuoset_erase(RIGHT(root), item, gid, items);
root->right_offset = cuoset_erase(RIGHT(root), item, gid, items) - items;
root->size_right--;
} else {
if ( (!HAS_LEFT(root)) || (!HAS_RIGHT(root)) ) {
cuoset *temp = HAS_LEFT(root) ? LEFT(root) : RIGHT(root);
//if (temp == NULL || temp->in_use == 0) {
if (!HAS_LEFT(root) && !HAS_RIGHT(root)) {
temp = root;
root = NULL;
} else {
*root = *temp;
}
temp->in_use = 0;
temp->left_offset = 0;
temp->right_offset = 0;
temp->size_left = 0;
temp->size_right = 0;
#ifdef __CUDA_ARCH__
unused_item[gid] = temp;
oset_size[gid]--;
#else
h_unused_item = temp;
h_oset_size--;
#endif
} else {
cuoset *temp = minValueNode(RIGHT(root), items);
root->value = temp->value;
//RIGHT(root) = cuoset_erase(RIGHT(root), temp, gid, items);
root->right_offset = cuoset_erase(RIGHT(root), temp, gid, items) - items;
root->size_right -= 1;
}
}
if (root == NULL) return root;
//root->height = 1 + max(height(LEFT(root)), height(RIGHT(root)));
uint8_t hl = 0;
if (HAS_LEFT(root)) hl = height(LEFT(root));
uint8_t hr = 0;
if (HAS_RIGHT(root)) hr = height(RIGHT(root));
root->height = 1 + max(hl, hr);
int8_t balance = getBalance(root, items);
if (balance > 1) {
int8_t bl = getBalance(LEFT(root), items);
// Left Left case
if (bl >= 0) {
return rightRotate(root, items);
}
// Left Right case
if (bl < 0) {
//LEFT(root) = leftRotate(LEFT(root), items);
root->left_offset = leftRotate(LEFT(root), items) - items;
return rightRotate(root, items);
}
}
if (balance < -1) {
int8_t br = getBalance(RIGHT(root), items);
// Right Right case
if (br <= 0) {
return leftRotate(root, items);
}
// Right Left case
if (br > 0) {
//RIGHT(root) = rightRotate(RIGHT(root), items);
root->right_offset = rightRotate(RIGHT(root), items) - items;
return leftRotate(root, items);
}
}
return root;
}
/*
__device__ __host__ uint32_t cuoset_size(cuoset *s) {
if (s == NULL) return 0;
uint32_t size = 1+cuoset_size(LEFT(s))+cuoset_size(RIGHT(s));
return size;
}
*/
__device__ cuoset* cuoset_get(cuoset* s, uint32_t p, cuoset *items) {
while (s != NULL && s->in_use) {
uint32_t size_l = s->size_left;
if (p == size_l) {
return s;
} else if (p < size_l) {
s = LEFT(s);
} else {
s = RIGHT(s);
p = p-(size_l+1);
}
}
return NULL;
}
/*
* A simplified version of generating initial set.
* A more secure way is to use the cache generation in eth.
*/
void generate_init_set(ordered_set_t& oset, uint64_t seed, uint32_t size) {
std::uniform_int_distribution<uint64_t> dist(0, ULLONG_MAX);
std::default_random_engine generator(seed);
for (uint32_t i = 0; i < size; i++) {
uint64_t v = dist(generator);
oset.insert(v);
}
}
__device__ cuoset *d_coset[MAX_WORKSIZE];
__host__ cuoset *h_qkc_hash_init(uint64_t *oset_raw, cuoset *items) {
cuoset *coset = NULL;
cuoset **unused = &(h_unused_item);
h_oset_size = 0;
for (int i = 0; i < INIT_SET_ENTRIES; i++) {
*unused = &(items[i]);
(*unused)->in_use = 0;
coset = cuoset_insert(coset, oset_raw[i], 0, items);
}
//printf("Initialized on host. coset offset: %lu\n", coset-items);
return coset;
}
__global__ void qkc_hash_init(uint64_t *oset_raw, cuoset *items_all) {
uint32_t gid = blockIdx.x * blockDim.x + threadIdx.x;
cuoset *coset = d_coset[gid];
cuoset *items = items_all + gid*INIT_SET_ENTRIES;
if (gid == 0) {
cuoset **unused = &(unused_item[gid]);
oset_size[gid] = 0;
for (int i = 0; i < INIT_SET_ENTRIES; i++) {
*unused = &(items[i]);
(*unused)->in_use = 0;
d_coset[0] = cuoset_insert(d_coset[0], oset_raw[i], gid, items);
}
}
}
__global__ void qkc_hash_init_copy(cuoset *items_all, uint16_t offset, uint32_t o_size) {
uint32_t gid = blockIdx.x * blockDim.x + threadIdx.x;
cuoset *items = items_all + gid*INIT_SET_ENTRIES;
if (gid > 0) {
for (int i = 0; i < INIT_SET_ENTRIES*sizeof(cuoset)/sizeof(uint64_t); i++) {
uint64_t tmp = ((uint64_t*)(items_all))[i];
((uint64_t*)(items))[i] = tmp;
}
}
//uint16_t offset = d_coset[0] - items_all;
d_coset[gid] = items + offset;
//oset_size[gid] = oset_size[0];
oset_size[gid] = o_size;
unused_item[gid] = NULL;
}
#define swap64(x) \
((uint64_t)((((uint64_t)(x)) >> 56) | \
(((uint64_t)(x) & 0x00ff000000000000ULL) >> 40) | \
(((uint64_t)(x) & 0x0000ff0000000000ULL) >> 24) | \
(((uint64_t)(x) & 0x000000ff00000000ULL) >> 8) | \
(((uint64_t)(x) & 0x00000000ff000000ULL) << 8) | \
(((uint64_t)(x) & 0x0000000000ff0000ULL) << 24) | \
(((uint64_t)(x) & 0x000000000000ff00ULL) << 40) | \
(((uint64_t)(x)) << 56)))
/*
* QKC hash using ordered set.
*/
//#define DEBUG
#define MIX_SIZE 16
#define SEED_SIZE 8
#define RESULT_SIZE 4
__global__ void qkc_hash(
uint64_t *result_out,
cuoset *items_all,
uint64_t *found,
uint64_t *target,
uint8_t *header,
uint64_t start_nonce,
uint8_t devid,
uint32_t worksize) {
uint32_t gid = blockIdx.x * blockDim.x + threadIdx.x;
cuoset *coset = d_coset[gid];
cuoset *items = items_all + gid*INIT_SET_ENTRIES;
#ifdef DEBUG
if (gid == 0) {
printf("Target %16lx %16lx %16lx %16lx\n", swap64(target[0]), swap64(target[1]), swap64(target[2]), swap64(target[3]));
}
#endif
/* Calculate SHA3_512 */
uint64_t seed_hash[SEED_SIZE];
{
const int rsize = 72;
const int rsize_byte = 9;
uint64_t state[25];
uint8_t temp[144];
const uint32_t msglen = 40;
memset(state, 0, sizeof(state));
for (int i = 0; i < 4; i++) {
state[i] = (((uint64_t*)header)[i]);
}
state[4] = (start_nonce) + gid;
#ifdef DEBUG
if (gid == 10) {
printf("CUDA sha3_512( ");
for (int i = 0; i < 40; i++) {
printf("%02x ", ((uint8_t*)state)[i]);
}
printf(" )\n");
}
#endif
// Padding
memcpy(temp, state, msglen);
memset(temp+msglen, 0, rsize-msglen);
//temp[msglen] = 0x06;
temp[msglen] = 0x01;
temp[rsize - 1] |= 0x80;
/* Absorb */
for (int i = 0; i < rsize_byte; i++) {
state[i] = ((uint64_t*)temp)[i];
}
#ifdef DEBUG
if (gid == 10) {
printf("CUDA sha3_512 state1: ");
for (int i = 0; i < rsize; i++) {
printf("%02x ", ((uint8_t*)state)[i]);
}
printf("\n");
}
#endif
keccak_function(state);
/* Squeeze */
seed_hash[0] = state[0];
seed_hash[1] = state[1];
seed_hash[2] = state[2];
seed_hash[3] = state[3];
seed_hash[4] = state[4];
seed_hash[5] = state[5];
seed_hash[6] = state[6];
seed_hash[7] = state[7];
#ifdef DEBUG
if (gid == 10) {
printf("CPU Seed Hash: ");
for (int i = 0; i < SEED_SIZE*8; i++) {
printf("%02x ", ((uint8_t*)seed_init)[i]);
}
printf("\n");
printf("CUDA Seed Hash: ");
for (int i = 0; i < SEED_SIZE*8; i++) {
printf("%02x ", ((uint8_t*)seed_hash)[i]);
}
printf("\n");
}
#endif
}
uint64_t seed[SEED_SIZE];
for (int i = 0; i < SEED_SIZE; i++) {
//seed[i] = seed_init[i];
seed[i] = seed_hash[i];
}
uint64_t mix[16];
for (uint32_t i = 0; i < MIX_SIZE; i++) {
mix[i] = seed[i % SEED_SIZE];
}
for (uint32_t i = 0; i < ACCESS_ROUND; i ++) {
uint64_t new_data[16];
uint64_t p = fnv64(i ^ seed[0], mix[i % MIX_SIZE]);
for (uint32_t j = 0; j < MIX_SIZE; j++) {
// Find the pth element and remove it
cuoset *it = cuoset_get(coset, p % oset_size[gid], items);
new_data[j] = it->value;
coset = cuoset_erase(coset, it, gid, items);
// Generate random data and insert it
p = fnv64(p, new_data[j]);
coset = cuoset_insert(coset, p, gid, items);
// Find the next element index (ordered)
p = fnv64(p, new_data[j]);
}
for (uint32_t j = 0; j < MIX_SIZE; j++) {
mix[j] = fnv64(mix[j], new_data[j]);
}
}
/*
* Compress
*/
uint64_t result[RESULT_SIZE];
for (uint32_t i = 0; i < RESULT_SIZE; i++) {
uint32_t j = i * 4;
result[i] = fnv64(fnv64(fnv64(mix[j], mix[j + 1]), mix[j + 2]), mix[j + 3]);
}
/* Calculate SHA3_256 */
uint64_t hash[4];
{
const int rsize = 136;
const int rsize_byte = 17;
uint64_t state[25];
uint8_t temp[144];
const uint32_t msglen = 96;
memset(state, 0, sizeof(state));
for (int i = 0; i < 8; i++) {
state[i] = seed[i];
}
state[ 8] = result[0];
state[ 9] = result[1];
state[10] = result[2];
state[11] = result[3];
// Padding
memcpy(temp, state, msglen);
memset(temp+msglen, 0, rsize-msglen);
temp[msglen] = 0x01;
temp[rsize - 1] |= 0x80;
/* Absorb */
for (int i = 0; i < rsize_byte; i++) {
state[i] = ((uint64_t*)temp)[i];
}
keccak_function(state);
/* Squeeze */
hash[0] = state[0];
hash[1] = state[1];
hash[2] = state[2];
hash[3] = state[3];
}
if (swap64(hash[0]) <= swap64(target[0])) {
#ifdef DEBUG
printf("%16lx < %16lx\n", swap64(hash[0]), swap64(target[0]));
printf("CUDA Solve (devid: %d, gid: %d)! ", devid, gid);
for (int i = 0; i < 32; i++) {
printf("%02x, ", ((uint8_t*)hash)[i]);
}
printf("\n");
printf("CUDA Solve (devid: %d, gid: %d) Result: ", devid, gid);
for (int i = 0; i < 32; i++) {
printf("%02x, ", ((uint8_t*)result)[i]);
}
printf("\n");
printf("CUDA Solve Seed Hash (devid: %d, gid: %d): ", devid, gid);
for (int i = 0; i < SEED_SIZE*8; i++) {
printf("%02x ", ((uint8_t*)seed)[i]);
}
printf("\n");
#endif
if (!found[4]) {
found[0] = result[0];
found[1] = result[1];
found[2] = result[2];
found[3] = result[3];
found[5] = gid + devid*worksize;
found[4] = 1;
}
}
//memcpy(result_out+gid*RESULT_SIZE, result, RESULT_SIZE*sizeof(uint64_t));
}
void qkc_hash_sorted_list(
std::vector<uint64_t>& slist,
std::array<uint64_t, 8>& seed,
std::array<uint64_t, 4>& result) {
std::array<uint64_t, 16> mix;
for (uint32_t i = 0; i < mix.size(); i++) {
mix[i] = seed[i % seed.size()];
}
for (uint32_t i = 0; i < ACCESS_ROUND; i ++) {
std::array<uint64_t, 16> new_data;
uint64_t p = fnv64(i ^ seed[0], mix[i % mix.size()]);
for (uint32_t j = 0; j < mix.size(); j++) {
// Find the pth element and remove it
uint32_t idx = p % slist.size();
new_data[j] = slist[idx];
slist.erase(slist.begin() + idx);
// Generate random data and insert it
// if the vector doesn't contain it.
p = fnv64(p, new_data[j]);
auto it = std::lower_bound(slist.begin(), slist.end(), p);
if (it == slist.end() || *it != p) {
slist.insert(it, p);
}
#ifdef DEBUG
if (i == 60) {
printf("access %d, mix %d: get value=%lu, insert value=%lu\n", i, j, new_data[j], p);
}
#endif
// Find the next element index (ordered)
p = fnv64(p, new_data[j]);
}
for (uint32_t j = 0; j < mix.size(); j++) {
mix[j] = fnv64(mix[j], new_data[j]);
}
}
/*
* Compress
*/
for (uint32_t i = 0; i < result.size(); i++) {
uint32_t j = i * 4;
result[i] = fnv64(fnv64(fnv64(mix[j], mix[j + 1]), mix[j + 2]), mix[j + 3]);
}
}
} // quarkchain
} // org
extern "C" void *cache_create(uint64_t *cache_ptr,
uint32_t cache_size) {
ordered_set_t *oset = new ordered_set_t();
for (uint32_t i = 0; i < cache_size; i++) {
oset->insert(cache_ptr[i]);
}
return oset;
}
extern "C" void cache_destroy(void *ptr) {
ordered_set_t *oset = (ordered_set_t *)ptr;
delete oset;
}
bool device_init[64] = {};
uint64_t result0[6];
uint64_t *d_result0[64];
//uint64_t *d_seed_c[64];
org::quarkchain::cuoset *items[64];
//uint64_t *d_oset_raw[64];
uint64_t *found[64];
uint64_t *d_found[64];
uint64_t *d_target[64];
uint8_t *d_header[64];
hipEvent_t kernelFinished[64];
int num_devices = 0;
extern "C" int32_t qkc_hash(void *cache_ptr,
//uint64_t* seed_ptr,
uint64_t* result_ptr,
uint64_t* target,
uint64_t* header,
uint64_t start_nonce,
uint32_t blocks,
uint32_t threads) {
ordered_set_t *oset = (ordered_set_t *)cache_ptr;
//printf("c start_nonce: %lu\n", start_nonce);
checkCudaErrors(hipGetDeviceCount(&num_devices));
//ordered_set_t noset(*oset);*/
//std::array<uint64_t, 8> seed;
//std::array<uint64_t, 4> result;
//std::copy(seed_ptr, seed_ptr + seed.size(), seed.begin());
/*
org::quarkchain::cuoset *coset = (org::quarkchain::cuoset *)malloc(sizeof(org::quarkchain::cuoset));
org::quarkchain::cuoset *prev_item = coset;
prev_item->value = *(oset->find_by_order(0));
for (int i = 1; i < org::quarkchain::INIT_SET_ENTRIES; i++) {
org::quarkchain::cuoset *item = (org::quarkchain::cuoset*)malloc(sizeof(org::quarkchain::cuoset));
item->value = *(oset->find_by_order(i));
prev_item->next = item;
prev_item = item;
}*/
//org::quarkchain::qkc_hash<<<1,1>>>(coset, seed_ptr, result_ptr);
//std::copy(result.begin(), result.end(), result_ptr);
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
uint64_t oset_raw[org::quarkchain::INIT_SET_ENTRIES];
for (int i = 0; i < org::quarkchain::INIT_SET_ENTRIES; i++) {
oset_raw[i] = *(oset->find_by_order(i));
}
// Prepare cuoset on host
org::quarkchain::cuoset *h_items = (org::quarkchain::cuoset*)malloc(sizeof(org::quarkchain::cuoset)*org::quarkchain::INIT_SET_ENTRIES);
org::quarkchain::cuoset *h_coset = org::quarkchain::h_qkc_hash_init(oset_raw, h_items);
for (int i = 0; i < num_devices; i++) {
hipSetDevice(i);
if (!device_init[i]) {
checkCudaErrors(hipMalloc(&d_result0[i], sizeof(uint64_t)*4*blocks*threads));
checkCudaErrors(hipMalloc(&items[i], sizeof(org::quarkchain::cuoset)*org::quarkchain::INIT_SET_ENTRIES*blocks*threads));
checkCudaErrors(hipMalloc(&d_target[i], sizeof(uint64_t)*4));
checkCudaErrors(hipMalloc(&d_header[i], sizeof(uint8_t)*32));
//checkCudaErrors(hipMalloc(&d_oset_raw[i], org::quarkchain::INIT_SET_ENTRIES*sizeof(uint64_t)));
checkCudaErrors(hipHostMalloc((void**)&(found[i]), sizeof(uint64_t)*6, hipHostMallocMapped));
checkCudaErrors(hipHostGetDevicePointer((void**)&(d_found[i]), found[i], 0));
hipDeviceSetLimit(hipLimitStackSize, 8192);
size_t size_heap, size_stack;
hipDeviceGetLimit(&size_heap, hipLimitMallocHeapSize);
hipDeviceGetLimit(&size_stack, hipLimitStackSize);
printf("Heap size found to be %d; Stack size found to be %d\n",(int)size_heap,(int)size_stack);
checkCudaErrors(hipEventCreate(&kernelFinished[i]));
device_init[i] = true;
printf("Initialized device %d\n", i);
}
for (int j = 0; j < 6; j++) {
uint64_t *fo = found[i];
fo[j] = 0;
}
checkCudaErrors(hipMemcpy(d_target[i], target, sizeof(uint64_t)*4, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_header[i], header, sizeof(uint8_t)*32, hipMemcpyHostToDevice));
// Copy cuoset to GPU
checkCudaErrors(hipMemcpy(items[i], h_items, sizeof(org::quarkchain::cuoset)*org::quarkchain::INIT_SET_ENTRIES, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( org::quarkchain::qkc_hash_init_copy), dim3(blocks),dim3(threads), 0, 0, items[i], h_coset-h_items, org::quarkchain::h_oset_size);
// Calculate hashes
hipLaunchKernelGGL(( org::quarkchain::qkc_hash), dim3(blocks),dim3(threads), 0, 0, d_result0[i], items[i], d_found[i], d_target[i], d_header[i], start_nonce + i*blocks*threads, i, blocks*threads);
checkCudaErrors(hipEventRecord(kernelFinished[i]));
}
while (true) {
usleep(10);
uint8_t success = 0;
for (int i = 0; i < num_devices; i++) {
hipSetDevice(i);
if (hipEventQuery(kernelFinished[i]) != hipSuccess) {
uint64_t *fo = found[i];
if (fo[4] > 0) {
printf("Found: ");
for (int j = 0; j < 4; j++) {
printf("%16lx ", fo[j]);
}
printf("\n");
memcpy(result_ptr, fo, 6*sizeof(uint64_t));
return num_devices * blocks * threads; // Early return when result found, to reduce orphans
//break;
}
} else {
success++;
}
}
if (success >= num_devices) {
// All GPUs completed
break;
}
}
for (int i = 0; i < num_devices; i++) {
hipSetDevice(i);
checkCudaErrors(hipDeviceSynchronize());
uint64_t *fo = found[i];
if (fo[4] > 0) {
printf("Found: ");
for (int j = 0; j < 4; j++) {
printf("%16lx ", fo[j]);
}
printf("\n");
memcpy(result_ptr, fo, 6*sizeof(uint64_t));
return num_devices * blocks * threads; // Early return when result found, to reduce orphans
}
// Copy results
checkCudaErrors(hipDeviceSynchronize());
}
free(h_items);
gettimeofday(&tv2, NULL);
unsigned long utime1 = 1000000 * tv1.tv_sec + tv1.tv_usec;
unsigned long utime2 = 1000000 * tv2.tv_sec + tv2.tv_usec;
unsigned long udiff1 = utime2-utime1;
double cudaseconds = udiff1 / 1000.0 / 1000.0;
printf("Hashrate: %5.2f H/s\n", num_devices * blocks*threads / cudaseconds);
return num_devices * blocks * threads;
}
void test_sorted_list(int blocks, int threads) {
std::cout << "Testing sorted list implementation" << std::endl;
ordered_set_t oset;
org::quarkchain::generate_init_set(
oset, 431, org::quarkchain::INIT_SET_ENTRIES);
std::vector<uint64_t> slist;
for (auto v : oset) {
slist.push_back(v);
}
std::uniform_int_distribution<uint64_t> dist(0, ULLONG_MAX);
std::default_random_engine generator(475);
std::array<uint64_t, 8> seed;
uint64_t seed_c[8];
for (uint32_t j = 0; j < 8; j++) {
seed[j] = dist(generator);
seed_c[j] = seed[j];
}
//std::array<uint64_t, 4> result0;
uint64_t result0[4];
uint64_t *d_result0;
uint64_t *d_seed_c;
checkCudaErrors(hipMalloc(&d_result0, sizeof(uint64_t)*4));
checkCudaErrors(hipMalloc(&d_seed_c, sizeof(uint64_t)*8));
checkCudaErrors(hipMemcpy(d_seed_c, seed_c, sizeof(uint64_t)*8, hipMemcpyHostToDevice));
org::quarkchain::cuoset *items;
checkCudaErrors(hipMalloc(&items, sizeof(org::quarkchain::cuoset)*org::quarkchain::INIT_SET_ENTRIES*blocks*threads));
std::array<uint64_t, 4> result1;
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
org::quarkchain::qkc_hash_sorted_list(slist, seed, result1);
gettimeofday(&tv2, NULL);
/*org::quarkchain::cuoset *coset = (org::quarkchain::cuoset *)malloc(sizeof(org::quarkchain::cuoset));
org::quarkchain::cuoset *prev_item = coset;
prev_item->value = *(oset.find_by_order(0));*/
uint64_t oset_raw[org::quarkchain::INIT_SET_ENTRIES];
uint64_t *d_oset_raw;
checkCudaErrors(hipMalloc(&d_oset_raw, org::quarkchain::INIT_SET_ENTRIES*sizeof(uint64_t)));
for (int i = 0; i < org::quarkchain::INIT_SET_ENTRIES; i++) {
/*org::quarkchain::cuoset *item = (org::quarkchain::cuoset*)malloc(sizeof(org::quarkchain::cuoset));
if (item == NULL) {
printf("malloc failed, i=%d\n", i);
}
item->value = *(oset.find_by_order(i));
item->next = NULL;
prev_item->next = item;
prev_item = item;
//printf("Added item: %d, value: %lu\n", i, item->value);*/
oset_raw[i] = *(oset.find_by_order(i));
}
checkCudaErrors(hipMemcpy(d_oset_raw, oset_raw, sizeof(uint64_t)*org::quarkchain::INIT_SET_ENTRIES, hipMemcpyHostToDevice));
/*int count = 0;
org::quarkchain::cuoset *iter = coset;
for (; iter != NULL; iter = iter->next, count++);
printf("elements in cuoset: %d\n", count);*/
hipDeviceSetLimit(hipLimitStackSize, 8192);
size_t size_heap, size_stack;
hipDeviceGetLimit(&size_heap, hipLimitMallocHeapSize);
hipDeviceGetLimit(&size_stack, hipLimitStackSize);
printf("Heap size found to be %d; Stack size found to be %d\n",(int)size_heap,(int)size_stack);
printf("Starting qkc_hash\n");
struct timeval tv3, tv4, tv5, tv6, tv7;
gettimeofday(&tv7, NULL);
org::quarkchain::cuoset *h_items = (org::quarkchain::cuoset*)malloc(sizeof(org::quarkchain::cuoset)*org::quarkchain::INIT_SET_ENTRIES);
org::quarkchain::cuoset *h_coset = org::quarkchain::h_qkc_hash_init(oset_raw, h_items);
checkCudaErrors(hipMemcpy(items, h_items, sizeof(org::quarkchain::cuoset)*org::quarkchain::INIT_SET_ENTRIES, hipMemcpyHostToDevice));
gettimeofday(&tv3, NULL);
//org::quarkchain::qkc_hash_init<<<1,1>>>(d_oset_raw, items);
//checkCudaErrors(hipDeviceSynchronize());
gettimeofday(&tv6, NULL);
hipLaunchKernelGGL(( org::quarkchain::qkc_hash_init_copy), dim3(blocks),dim3(threads), 0, 0, items, h_coset-h_items, org::quarkchain::h_oset_size);
checkCudaErrors(hipDeviceSynchronize());
gettimeofday(&tv4, NULL);
printf("Waiting for device synchronize\n");
checkCudaErrors(hipDeviceSynchronize());
gettimeofday(&tv5, NULL);
printf("Device synchronized\n");
checkCudaErrors(hipMemcpy(result0, d_result0, 4*sizeof(uint64_t), hipMemcpyDeviceToHost));
printf("result0 copied from device\n");
free(h_items);
unsigned long utime1 = 1000000 * tv1.tv_sec + tv1.tv_usec;
unsigned long utime2 = 1000000 * tv2.tv_sec + tv2.tv_usec;
unsigned long udiff1 = utime2-utime1;
printf("CPU Sorted list Time: %lu us\n", udiff1);
unsigned long utime3 = 1000000 * tv3.tv_sec + tv3.tv_usec;
unsigned long utime4 = 1000000 * tv4.tv_sec + tv4.tv_usec;
unsigned long utime5 = 1000000 * tv5.tv_sec + tv5.tv_usec;
unsigned long utime6 = 1000000 * tv6.tv_sec + tv6.tv_usec;
unsigned long utime7 = 1000000 * tv7.tv_sec + tv7.tv_usec;
unsigned long cpuinit = utime3-utime7;
unsigned long cudatime = utime5-utime3;
unsigned long inittime = utime4-utime3;
unsigned long init1time = utime6-utime3;
printf("CPU Init1 Time: %lu us\n", cpuinit);
printf("CUDA Init1 Time: %lu us\n", init1time);
printf("CUDA Init Time: %lu us\n", inittime);
printf("CUDA Time: %lu us\n", cudatime);
double cudaseconds = cudatime / 1000 / 1000;
printf("Hashrate: %5.2f H/s\n", blocks*threads / cudaseconds);
for (uint32_t i = 0; i < result1.size(); i++) {
if (result0[i] != result1[i]) {
std::cout << "Test failed" << std::endl;
return;
}
}
std::cout << "Test passed" << std::endl;
}
void test_qkc_hash_perf() {
ordered_set_t oset;
auto t_start = std::chrono::steady_clock::now();
org::quarkchain::generate_init_set(
oset, 1, org::quarkchain::INIT_SET_ENTRIES);
auto used_time = std::chrono::steady_clock::now() - t_start;
std::cout << "Generate time: "
<< std::chrono::duration<double, std::milli>(used_time).count()
<< std::endl;
t_start = std::chrono::steady_clock::now();
ordered_set_t noset = oset;
used_time = std::chrono::steady_clock::now() - t_start;
std::cout << "Copy time: "
<< std::chrono::duration<double, std::milli>(used_time).count()
<< std::endl;
std::uniform_int_distribution<uint64_t> dist(0, ULLONG_MAX);
std::default_random_engine generator(475);
t_start = std::chrono::steady_clock::now();
uint32_t count = 1000;
uint64_t seed[8];
uint64_t result[8];
for (uint32_t i = 0; i < count; i++) {
for (uint32_t j = 0; j < 8; j++) {
seed[j] = dist(generator);
}
ordered_set_t new_oset(oset);
/*
org::quarkchain::cuoset *coset = (org::quarkchain::cuoset *)malloc(sizeof(org::quarkchain::cuoset));
org::quarkchain::cuoset *prev_item = coset;
prev_item->value = *(new_oset.find_by_order(0));
for (int i = 1; i < org::quarkchain::INIT_SET_ENTRIES; i++) {
org::quarkchain::cuoset *item = (org::quarkchain::cuoset*)malloc(sizeof(org::quarkchain::cuoset));
item->value = *(new_oset.find_by_order(i));
prev_item->next = item;
prev_item = item;
}*/
//org::quarkchain::qkc_hash<<<1,1>>>(coset, seed, result);
}
used_time = std::chrono::steady_clock::now() - t_start;
std::cout << "Duration: "
<< std::chrono::duration<double, std::milli>(used_time).count()
<< std::endl;
}
void test_qkc_hash_slist_perf() {
ordered_set_t oset;
auto t_start = std::chrono::steady_clock::now();
org::quarkchain::generate_init_set(
oset, 1, org::quarkchain::INIT_SET_ENTRIES);
auto used_time = std::chrono::steady_clock::now() - t_start;
std::cout << "Generate time: "
<< std::chrono::duration<double, std::milli>(used_time).count()
<< std::endl;
std::vector<uint64_t> slist;
for (auto v : oset) {
slist.push_back(v);
}
t_start = std::chrono::steady_clock::now();
std::vector<uint64_t> nslist(slist);
used_time = std::chrono::steady_clock::now() - t_start;
std::cout << "Copy time: "
<< std::chrono::duration<double, std::milli>(used_time).count()
<< std::endl;
std::uniform_int_distribution<uint64_t> dist(0, ULLONG_MAX);
std::default_random_engine generator(475);
t_start = std::chrono::steady_clock::now();
uint32_t count = 1000;
std::array<uint64_t, 8> seed;
std::array<uint64_t, 4> result;
for (uint32_t i = 0; i < count; i++) {
for (uint32_t j = 0; j < 8; j++) {
seed[j] = dist(generator);
}
std::vector<uint64_t> new_slist(slist);
org::quarkchain::qkc_hash_sorted_list(new_slist, seed, result);
}
used_time = std::chrono::steady_clock::now() - t_start;
std::cout << "Duration: "
<< std::chrono::duration<double, std::milli>(used_time).count()
<< std::endl;
}
int main(int argc, char** argv) {
if (argc <= 1) {
std::cout << "Must specify command in "
"qkc_perf, slist_test, slist_perf"
<< std::endl;
return -1;
}
if (strcmp(argv[1], "qkc_perf") == 0) {
test_qkc_hash_perf();
} else if (strcmp(argv[1], "slist_perf") == 0) {
test_qkc_hash_slist_perf();
} else if (strcmp(argv[1], "slist_test") == 0) {
if (argc <= 3) {
printf("Usage: %s slist_test <blocks> <threads>\n", argv[0]);
return -1;
}
int blocks = atoi(argv[2]);
int threads = atoi(argv[3]);
test_sorted_list(blocks, threads);
} else {
std::cout << "Unrecognized command: " << argv[1] << std::endl;
return -1;
}
return 0;
}
| b6c37bd089b133bd06eba4ee540633c7c81ab346.cu | #include <climits>
#include <cstdint>
#include <cstring>
#include <array>
#include <chrono>
#include <iostream>
#include <random>
#include <set>
#include <ext/pb_ds/assoc_container.hpp>
#include <ext/pb_ds/tree_policy.hpp>
#include "helper_cuda.h"
#include <sys/time.h>
#include <cooperative_groups.h>
#include <unistd.h>
using namespace std;
using namespace __gnu_pbds;
#define MAX_WORKSIZE 128000
typedef
tree<
uint64_t,
null_type,
less<uint64_t>,
rb_tree_tag,
tree_order_statistics_node_update>
ordered_set_t;
namespace org {
namespace quarkchain {
__constant__ const uint32_t FNV_PRIME_32 = 0x01000193;
__constant__ const uint64_t FNV_PRIME_64 = 0x100000001b3ULL;
__constant__ const uint32_t ACCESS_ROUND = 64;
__constant__ const uint32_t INIT_SET_ENTRIES = 1024 * 64;
/* Keccak core function */
#define KECCAK_ROUNDS 24
#define ROT_01 36
#define ROT_02 3
#define ROT_03 41
#define ROT_04 18
#define ROT_05 1
#define ROT_06 44
#define ROT_07 10
#define ROT_08 45
#define ROT_09 2
#define ROT_10 62
#define ROT_11 6
#define ROT_12 43
#define ROT_13 15
#define ROT_14 61
#define ROT_15 28
#define ROT_16 55
#define ROT_17 25
#define ROT_18 21
#define ROT_19 56
#define ROT_20 27
#define ROT_21 20
#define ROT_22 39
#define ROT_23 8
#define ROT_24 14
__device__ __constant__ const uint64_t roundconstants[KECCAK_ROUNDS] = {
0x0000000000000001ULL,
0x0000000000008082ULL,
0x800000000000808aULL,
0x8000000080008000ULL,
0x000000000000808bULL,
0x0000000080000001ULL,
0x8000000080008081ULL,
0x8000000000008009ULL,
0x000000000000008aULL,
0x0000000000000088ULL,
0x0000000080008009ULL,
0x000000008000000aULL,
0x000000008000808bULL,
0x800000000000008bULL,
0x8000000000008089ULL,
0x8000000000008003ULL,
0x8000000000008002ULL,
0x8000000000000080ULL,
0x000000000000800aULL,
0x800000008000000aULL,
0x8000000080008081ULL,
0x8000000000008080ULL,
0x0000000080000001ULL,
0x8000000080008008ULL
};
#define ROL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
__device__ void keccak_function (uint64_t *state) {
short i;
/* Temporary variables to avoid indexing overhead */
uint64_t a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12;
uint64_t a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24;
uint64_t b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12;
uint64_t b13, b14, b15, b16, b17, b18, b19, b20, b21, b22, b23, b24;
uint64_t c0, c1, c2, c3, c4, d;
a0 = state[0];
a1 = state[1];
a2 = state[2];
a3 = state[3];
a4 = state[4];
a5 = state[5];
a6 = state[6];
a7 = state[7];
a8 = state[8];
a9 = state[9];
a10 = state[10];
a11 = state[11];
a12 = state[12];
a13 = state[13];
a14 = state[14];
a15 = state[15];
a16 = state[16];
a17 = state[17];
a18 = state[18];
a19 = state[19];
a20 = state[20];
a21 = state[21];
a22 = state[22];
a23 = state[23];
a24 = state[24];
for (i = 0; i < KECCAK_ROUNDS; ++i) {
/*
* Uses temporary variables and loop unrolling to
* avoid array indexing and inner loops overhead
* */
/* Prepare column parity for Theta step */
c0 = a0 ^ a5 ^ a10 ^ a15 ^ a20;
c1 = a1 ^ a6 ^ a11 ^ a16 ^ a21;
c2 = a2 ^ a7 ^ a12 ^ a17 ^ a22;
c3 = a3 ^ a8 ^ a13 ^ a18 ^ a23;
c4 = a4 ^ a9 ^ a14 ^ a19 ^ a24;
/* Theta + Rho + Pi steps */
d = c4 ^ ROL64(c1, 1);
b0 = d ^ a0;
b16 = ROL64(d ^ a5, ROT_01);
b7 = ROL64(d ^ a10, ROT_02);
b23 = ROL64(d ^ a15, ROT_03);
b14 = ROL64(d ^ a20, ROT_04);
d = c0 ^ ROL64(c2, 1);
b10 = ROL64(d ^ a1, ROT_05);
b1 = ROL64(d ^ a6, ROT_06);
b17 = ROL64(d ^ a11, ROT_07);
b8 = ROL64(d ^ a16, ROT_08);
b24 = ROL64(d ^ a21, ROT_09);
d = c1 ^ ROL64(c3, 1);
b20 = ROL64(d ^ a2, ROT_10);
b11 = ROL64(d ^ a7, ROT_11);
b2 = ROL64(d ^ a12, ROT_12);
b18 = ROL64(d ^ a17, ROT_13);
b9 = ROL64(d ^ a22, ROT_14);
d = c2 ^ ROL64(c4, 1);
b5 = ROL64(d ^ a3, ROT_15);
b21 = ROL64(d ^ a8, ROT_16);
b12 = ROL64(d ^ a13, ROT_17);
b3 = ROL64(d ^ a18, ROT_18);
b19 = ROL64(d ^ a23, ROT_19);
d = c3 ^ ROL64(c0, 1);
b15 = ROL64(d ^ a4, ROT_20);
b6 = ROL64(d ^ a9, ROT_21);
b22 = ROL64(d ^ a14, ROT_22);
b13 = ROL64(d ^ a19, ROT_23);
b4 = ROL64(d ^ a24, ROT_24);
/* Chi + Iota steps */
a0 = b0 ^ (~b1 & b2) ^ roundconstants[i];
a1 = b1 ^ (~b2 & b3);
a2 = b2 ^ (~b3 & b4);
a3 = b3 ^ (~b4 & b0);
a4 = b4 ^ (~b0 & b1);
a5 = b5 ^ (~b6 & b7);
a6 = b6 ^ (~b7 & b8);
a7 = b7 ^ (~b8 & b9);
a8 = b8 ^ (~b9 & b5);
a9 = b9 ^ (~b5 & b6);
a10 = b10 ^ (~b11 & b12);
a11 = b11 ^ (~b12 & b13);
a12 = b12 ^ (~b13 & b14);
a13 = b13 ^ (~b14 & b10);
a14 = b14 ^ (~b10 & b11);
a15 = b15 ^ (~b16 & b17);
a16 = b16 ^ (~b17 & b18);
a17 = b17 ^ (~b18 & b19);
a18 = b18 ^ (~b19 & b15);
a19 = b19 ^ (~b15 & b16);
a20 = b20 ^ (~b21 & b22);
a21 = b21 ^ (~b22 & b23);
a22 = b22 ^ (~b23 & b24);
a23 = b23 ^ (~b24 & b20);
a24 = b24 ^ (~b20 & b21);
}
state[0] = a0;
state[1] = a1;
state[2] = a2;
state[3] = a3;
state[4] = a4;
state[5] = a5;
state[6] = a6;
state[7] = a7;
state[8] = a8;
state[9] = a9;
state[10] = a10;
state[11] = a11;
state[12] = a12;
state[13] = a13;
state[14] = a14;
state[15] = a15;
state[16] = a16;
state[17] = a17;
state[18] = a18;
state[19] = a19;
state[20] = a20;
state[21] = a21;
state[22] = a22;
state[23] = a23;
state[24] = a24;
}
/*
* 32-bit FNV function
*/
__device__ __host__ uint32_t fnv32(uint32_t v1, uint32_t v2) {
return (v1 * FNV_PRIME_32) ^ v2;
}
/*
* 64-bit FNV function
*/
__device__ __host__ uint64_t fnv64(uint64_t v1, uint64_t v2) {
return (v1 * FNV_PRIME_64) ^ v2;
}
#define LEFT(x) (items+x->left_offset)
#define RIGHT(x) (items+x->right_offset)
#define HAS_LEFT(x) (x->size_left > 0)
#define HAS_RIGHT(x) (x->size_right > 0)
/*
#define LEFT(x) (x->left)
#define RIGHT(x) (x->right)
#define HAS_LEFT(x) (x->left)
#define HAS_RIGHT(x) (x->right)
*/
typedef struct cuoset {
uint64_t value;
/*struct cuoset *left;
struct cuoset *right;*/
uint16_t left_offset;
uint16_t right_offset;
uint8_t in_use;
uint8_t height;
uint16_t size_left;
uint16_t size_right;
} cuoset;
__device__ __host__ uint32_t cuoset_size(cuoset *s);
__device__ __host__ __forceinline__ uint64_t max(uint64_t a, uint64_t b) {
return (a>b) ? a : b;
}
__device__ __host__ __forceinline__ uint8_t height(cuoset *p) {
//if (p == NULL || p->in_use == 0) return 0;
return p->height;
}
__device__ __host__ cuoset *rightRotate(cuoset *y, cuoset *items) {
cuoset *x = LEFT(y);
cuoset *T2 = RIGHT(x);
/*RIGHT(x) = y;
LEFT(y) = T2;*/
x->right_offset = y-items;
y->left_offset = T2-items;
y->size_left = x->size_right;
x->size_right = y->size_left + y->size_right + 1;
uint8_t yhl = HAS_LEFT(y) ? height(LEFT(y)) : 0;
uint8_t yhr = HAS_RIGHT(y) ? height(RIGHT(y)) : 0;
y->height = max(yhl, yhr)+1;
uint8_t xhl = HAS_LEFT(x) ? height(LEFT(x)) : 0;
uint8_t xhr = HAS_RIGHT(x) ? height(RIGHT(x)) : 0;
x->height = max(xhl, xhr)+1;
return x;
}
__device__ __host__ cuoset *leftRotate(cuoset *x, cuoset *items) {
cuoset *y = RIGHT(x);
cuoset *T2 = LEFT(y);
/*LEFT(y) = x;
RIGHT(x) = T2;*/
y->left_offset = x-items;
x->right_offset = T2-items;
x->size_right = y->size_left;
y->size_left = x->size_left + x->size_right + 1;
uint8_t xhl = HAS_LEFT(x) ? height(LEFT(x)) : 0;
uint8_t xhr = HAS_RIGHT(x) ? height(RIGHT(x)) : 0;
x->height = max(xhl, xhr)+1;
uint8_t yhl = HAS_LEFT(y) ? height(LEFT(y)) : 0;
uint8_t yhr = HAS_RIGHT(y) ? height(RIGHT(y)) : 0;
y->height = max(yhl, yhr)+1;
return y;
}
__device__ __host__ __forceinline__ int8_t getBalance(cuoset *N, cuoset *items) {
if (N == NULL || N->in_use == 0) return 0;
uint8_t hl = HAS_LEFT(N) ? height(LEFT(N)) : 0;
uint8_t hr = HAS_RIGHT(N) ? height(RIGHT(N)) : 0;
return hl-hr;
}
cuoset *h_unused_item;
uint32_t h_oset_size;
__device__ cuoset *unused_item[MAX_WORKSIZE] = {};
__device__ uint32_t oset_size[MAX_WORKSIZE] = {};
__device__ uint32_t depth = 0;
__device__ __host__ cuoset *cuoset_insert(cuoset *node, uint64_t value, uint32_t gid, cuoset *items) {
if (node == NULL || node->in_use == 0) {
#ifdef __CUDA_ARCH__
cuoset *new_node = unused_item[gid];
#else
cuoset *new_node = h_unused_item;
#endif
new_node->value = value;
/*LEFT(new_node) = NULL;
RIGHT(new_node) = NULL;*/
new_node->in_use = 1;
new_node->height = 1;
new_node->size_left = 0;
new_node->size_right = 0;
#ifdef __CUDA_ARCH__
unused_item[gid] = NULL;
oset_size[gid]++;
#else
h_unused_item = NULL;
h_oset_size++;
#endif
return new_node;
}
if (value < node->value) {
//LEFT(node) = cuoset_insert(LEFT(node), value, gid, items);
if (HAS_LEFT(node)) {
node->left_offset = cuoset_insert(LEFT(node), value, gid, items) - items;
} else {
node->left_offset = cuoset_insert(NULL, value, gid, items) - items;
}
node->size_left++;
} else if (value > node->value) {
//RIGHT(node) = cuoset_insert(RIGHT(node), value, gid, items);
if (HAS_RIGHT(node)) {
node->right_offset = cuoset_insert(RIGHT(node), value, gid, items) - items;
} else {
node->right_offset = cuoset_insert(NULL, value, gid, items) - items;
}
node->size_right++;
} else {
// Keys equal, discard insert since values need to be unique
return node;
}
uint8_t hl = 0;
if (HAS_LEFT(node)) hl = height(LEFT(node));
uint8_t hr = 0;
if (HAS_RIGHT(node)) hr = height(RIGHT(node));
node->height = 1 + max(hl, hr);
int8_t balance = getBalance(node, items);
if (balance > 1) {
uint64_t lval = LEFT(node)->value;
// Left Left case
if (value < lval) {
return rightRotate(node, items);
}
// Left Right case
if (value > lval) {
//LEFT(node) = leftRotate(LEFT(node), items);
node->left_offset = leftRotate(LEFT(node), items) - items;
return rightRotate(node, items);
}
}
if (balance < -1) {
uint64_t rval = RIGHT(node)->value;
// Right Right case
if (value > rval) {
return leftRotate(node, items);
}
// Right Left case
if (value < rval) {
//RIGHT(node) = rightRotate(RIGHT(node), items);
node->right_offset = rightRotate(RIGHT(node), items) - items;
return leftRotate(node, items);
}
}
return node;
}
__device__ __host__ cuoset *minValueNode(cuoset *node, cuoset *items) {
cuoset *current = node;
while (HAS_LEFT(current)) {
current = LEFT(current);
}
return current;
}
__device__ __host__ cuoset *cuoset_erase(cuoset *root, cuoset *item, uint32_t gid, cuoset *items) {
if (root == NULL ||root->in_use == 0 ) return root;
if (item->value < root->value) {
//LEFT(root) = cuoset_erase(LEFT(root), item, gid, items);
root->left_offset = cuoset_erase(LEFT(root), item, gid, items) - items;
root->size_left--;
} else if (item->value > root->value) {
//RIGHT(root) = cuoset_erase(RIGHT(root), item, gid, items);
root->right_offset = cuoset_erase(RIGHT(root), item, gid, items) - items;
root->size_right--;
} else {
if ( (!HAS_LEFT(root)) || (!HAS_RIGHT(root)) ) {
cuoset *temp = HAS_LEFT(root) ? LEFT(root) : RIGHT(root);
//if (temp == NULL || temp->in_use == 0) {
if (!HAS_LEFT(root) && !HAS_RIGHT(root)) {
temp = root;
root = NULL;
} else {
*root = *temp;
}
temp->in_use = 0;
temp->left_offset = 0;
temp->right_offset = 0;
temp->size_left = 0;
temp->size_right = 0;
#ifdef __CUDA_ARCH__
unused_item[gid] = temp;
oset_size[gid]--;
#else
h_unused_item = temp;
h_oset_size--;
#endif
} else {
cuoset *temp = minValueNode(RIGHT(root), items);
root->value = temp->value;
//RIGHT(root) = cuoset_erase(RIGHT(root), temp, gid, items);
root->right_offset = cuoset_erase(RIGHT(root), temp, gid, items) - items;
root->size_right -= 1;
}
}
if (root == NULL) return root;
//root->height = 1 + max(height(LEFT(root)), height(RIGHT(root)));
uint8_t hl = 0;
if (HAS_LEFT(root)) hl = height(LEFT(root));
uint8_t hr = 0;
if (HAS_RIGHT(root)) hr = height(RIGHT(root));
root->height = 1 + max(hl, hr);
int8_t balance = getBalance(root, items);
if (balance > 1) {
int8_t bl = getBalance(LEFT(root), items);
// Left Left case
if (bl >= 0) {
return rightRotate(root, items);
}
// Left Right case
if (bl < 0) {
//LEFT(root) = leftRotate(LEFT(root), items);
root->left_offset = leftRotate(LEFT(root), items) - items;
return rightRotate(root, items);
}
}
if (balance < -1) {
int8_t br = getBalance(RIGHT(root), items);
// Right Right case
if (br <= 0) {
return leftRotate(root, items);
}
// Right Left case
if (br > 0) {
//RIGHT(root) = rightRotate(RIGHT(root), items);
root->right_offset = rightRotate(RIGHT(root), items) - items;
return leftRotate(root, items);
}
}
return root;
}
/*
__device__ __host__ uint32_t cuoset_size(cuoset *s) {
if (s == NULL) return 0;
uint32_t size = 1+cuoset_size(LEFT(s))+cuoset_size(RIGHT(s));
return size;
}
*/
__device__ cuoset* cuoset_get(cuoset* s, uint32_t p, cuoset *items) {
while (s != NULL && s->in_use) {
uint32_t size_l = s->size_left;
if (p == size_l) {
return s;
} else if (p < size_l) {
s = LEFT(s);
} else {
s = RIGHT(s);
p = p-(size_l+1);
}
}
return NULL;
}
/*
* A simplified version of generating initial set.
* A more secure way is to use the cache generation in eth.
*/
void generate_init_set(ordered_set_t& oset, uint64_t seed, uint32_t size) {
std::uniform_int_distribution<uint64_t> dist(0, ULLONG_MAX);
std::default_random_engine generator(seed);
for (uint32_t i = 0; i < size; i++) {
uint64_t v = dist(generator);
oset.insert(v);
}
}
__device__ cuoset *d_coset[MAX_WORKSIZE];
__host__ cuoset *h_qkc_hash_init(uint64_t *oset_raw, cuoset *items) {
cuoset *coset = NULL;
cuoset **unused = &(h_unused_item);
h_oset_size = 0;
for (int i = 0; i < INIT_SET_ENTRIES; i++) {
*unused = &(items[i]);
(*unused)->in_use = 0;
coset = cuoset_insert(coset, oset_raw[i], 0, items);
}
//printf("Initialized on host. coset offset: %lu\n", coset-items);
return coset;
}
__global__ void qkc_hash_init(uint64_t *oset_raw, cuoset *items_all) {
uint32_t gid = blockIdx.x * blockDim.x + threadIdx.x;
cuoset *coset = d_coset[gid];
cuoset *items = items_all + gid*INIT_SET_ENTRIES;
if (gid == 0) {
cuoset **unused = &(unused_item[gid]);
oset_size[gid] = 0;
for (int i = 0; i < INIT_SET_ENTRIES; i++) {
*unused = &(items[i]);
(*unused)->in_use = 0;
d_coset[0] = cuoset_insert(d_coset[0], oset_raw[i], gid, items);
}
}
}
__global__ void qkc_hash_init_copy(cuoset *items_all, uint16_t offset, uint32_t o_size) {
uint32_t gid = blockIdx.x * blockDim.x + threadIdx.x;
cuoset *items = items_all + gid*INIT_SET_ENTRIES;
if (gid > 0) {
for (int i = 0; i < INIT_SET_ENTRIES*sizeof(cuoset)/sizeof(uint64_t); i++) {
uint64_t tmp = ((uint64_t*)(items_all))[i];
((uint64_t*)(items))[i] = tmp;
}
}
//uint16_t offset = d_coset[0] - items_all;
d_coset[gid] = items + offset;
//oset_size[gid] = oset_size[0];
oset_size[gid] = o_size;
unused_item[gid] = NULL;
}
#define swap64(x) \
((uint64_t)((((uint64_t)(x)) >> 56) | \
(((uint64_t)(x) & 0x00ff000000000000ULL) >> 40) | \
(((uint64_t)(x) & 0x0000ff0000000000ULL) >> 24) | \
(((uint64_t)(x) & 0x000000ff00000000ULL) >> 8) | \
(((uint64_t)(x) & 0x00000000ff000000ULL) << 8) | \
(((uint64_t)(x) & 0x0000000000ff0000ULL) << 24) | \
(((uint64_t)(x) & 0x000000000000ff00ULL) << 40) | \
(((uint64_t)(x)) << 56)))
/*
* QKC hash using ordered set.
*/
//#define DEBUG
#define MIX_SIZE 16
#define SEED_SIZE 8
#define RESULT_SIZE 4
__global__ void qkc_hash(
uint64_t *result_out,
cuoset *items_all,
uint64_t *found,
uint64_t *target,
uint8_t *header,
uint64_t start_nonce,
uint8_t devid,
uint32_t worksize) {
uint32_t gid = blockIdx.x * blockDim.x + threadIdx.x;
cuoset *coset = d_coset[gid];
cuoset *items = items_all + gid*INIT_SET_ENTRIES;
#ifdef DEBUG
if (gid == 0) {
printf("Target %16lx %16lx %16lx %16lx\n", swap64(target[0]), swap64(target[1]), swap64(target[2]), swap64(target[3]));
}
#endif
/* Calculate SHA3_512 */
uint64_t seed_hash[SEED_SIZE];
{
const int rsize = 72;
const int rsize_byte = 9;
uint64_t state[25];
uint8_t temp[144];
const uint32_t msglen = 40;
memset(state, 0, sizeof(state));
for (int i = 0; i < 4; i++) {
state[i] = (((uint64_t*)header)[i]);
}
state[4] = (start_nonce) + gid;
#ifdef DEBUG
if (gid == 10) {
printf("CUDA sha3_512( ");
for (int i = 0; i < 40; i++) {
printf("%02x ", ((uint8_t*)state)[i]);
}
printf(" )\n");
}
#endif
// Padding
memcpy(temp, state, msglen);
memset(temp+msglen, 0, rsize-msglen);
//temp[msglen] = 0x06;
temp[msglen] = 0x01;
temp[rsize - 1] |= 0x80;
/* Absorb */
for (int i = 0; i < rsize_byte; i++) {
state[i] = ((uint64_t*)temp)[i];
}
#ifdef DEBUG
if (gid == 10) {
printf("CUDA sha3_512 state1: ");
for (int i = 0; i < rsize; i++) {
printf("%02x ", ((uint8_t*)state)[i]);
}
printf("\n");
}
#endif
keccak_function(state);
/* Squeeze */
seed_hash[0] = state[0];
seed_hash[1] = state[1];
seed_hash[2] = state[2];
seed_hash[3] = state[3];
seed_hash[4] = state[4];
seed_hash[5] = state[5];
seed_hash[6] = state[6];
seed_hash[7] = state[7];
#ifdef DEBUG
if (gid == 10) {
printf("CPU Seed Hash: ");
for (int i = 0; i < SEED_SIZE*8; i++) {
printf("%02x ", ((uint8_t*)seed_init)[i]);
}
printf("\n");
printf("CUDA Seed Hash: ");
for (int i = 0; i < SEED_SIZE*8; i++) {
printf("%02x ", ((uint8_t*)seed_hash)[i]);
}
printf("\n");
}
#endif
}
uint64_t seed[SEED_SIZE];
for (int i = 0; i < SEED_SIZE; i++) {
//seed[i] = seed_init[i];
seed[i] = seed_hash[i];
}
uint64_t mix[16];
for (uint32_t i = 0; i < MIX_SIZE; i++) {
mix[i] = seed[i % SEED_SIZE];
}
for (uint32_t i = 0; i < ACCESS_ROUND; i ++) {
uint64_t new_data[16];
uint64_t p = fnv64(i ^ seed[0], mix[i % MIX_SIZE]);
for (uint32_t j = 0; j < MIX_SIZE; j++) {
// Find the pth element and remove it
cuoset *it = cuoset_get(coset, p % oset_size[gid], items);
new_data[j] = it->value;
coset = cuoset_erase(coset, it, gid, items);
// Generate random data and insert it
p = fnv64(p, new_data[j]);
coset = cuoset_insert(coset, p, gid, items);
// Find the next element index (ordered)
p = fnv64(p, new_data[j]);
}
for (uint32_t j = 0; j < MIX_SIZE; j++) {
mix[j] = fnv64(mix[j], new_data[j]);
}
}
/*
* Compress
*/
uint64_t result[RESULT_SIZE];
for (uint32_t i = 0; i < RESULT_SIZE; i++) {
uint32_t j = i * 4;
result[i] = fnv64(fnv64(fnv64(mix[j], mix[j + 1]), mix[j + 2]), mix[j + 3]);
}
/* Calculate SHA3_256 */
uint64_t hash[4];
{
const int rsize = 136;
const int rsize_byte = 17;
uint64_t state[25];
uint8_t temp[144];
const uint32_t msglen = 96;
memset(state, 0, sizeof(state));
for (int i = 0; i < 8; i++) {
state[i] = seed[i];
}
state[ 8] = result[0];
state[ 9] = result[1];
state[10] = result[2];
state[11] = result[3];
// Padding
memcpy(temp, state, msglen);
memset(temp+msglen, 0, rsize-msglen);
temp[msglen] = 0x01;
temp[rsize - 1] |= 0x80;
/* Absorb */
for (int i = 0; i < rsize_byte; i++) {
state[i] = ((uint64_t*)temp)[i];
}
keccak_function(state);
/* Squeeze */
hash[0] = state[0];
hash[1] = state[1];
hash[2] = state[2];
hash[3] = state[3];
}
if (swap64(hash[0]) <= swap64(target[0])) {
#ifdef DEBUG
printf("%16lx < %16lx\n", swap64(hash[0]), swap64(target[0]));
printf("CUDA Solve (devid: %d, gid: %d)! ", devid, gid);
for (int i = 0; i < 32; i++) {
printf("%02x, ", ((uint8_t*)hash)[i]);
}
printf("\n");
printf("CUDA Solve (devid: %d, gid: %d) Result: ", devid, gid);
for (int i = 0; i < 32; i++) {
printf("%02x, ", ((uint8_t*)result)[i]);
}
printf("\n");
printf("CUDA Solve Seed Hash (devid: %d, gid: %d): ", devid, gid);
for (int i = 0; i < SEED_SIZE*8; i++) {
printf("%02x ", ((uint8_t*)seed)[i]);
}
printf("\n");
#endif
if (!found[4]) {
found[0] = result[0];
found[1] = result[1];
found[2] = result[2];
found[3] = result[3];
found[5] = gid + devid*worksize;
found[4] = 1;
}
}
//memcpy(result_out+gid*RESULT_SIZE, result, RESULT_SIZE*sizeof(uint64_t));
}
void qkc_hash_sorted_list(
std::vector<uint64_t>& slist,
std::array<uint64_t, 8>& seed,
std::array<uint64_t, 4>& result) {
std::array<uint64_t, 16> mix;
for (uint32_t i = 0; i < mix.size(); i++) {
mix[i] = seed[i % seed.size()];
}
for (uint32_t i = 0; i < ACCESS_ROUND; i ++) {
std::array<uint64_t, 16> new_data;
uint64_t p = fnv64(i ^ seed[0], mix[i % mix.size()]);
for (uint32_t j = 0; j < mix.size(); j++) {
// Find the pth element and remove it
uint32_t idx = p % slist.size();
new_data[j] = slist[idx];
slist.erase(slist.begin() + idx);
// Generate random data and insert it
// if the vector doesn't contain it.
p = fnv64(p, new_data[j]);
auto it = std::lower_bound(slist.begin(), slist.end(), p);
if (it == slist.end() || *it != p) {
slist.insert(it, p);
}
#ifdef DEBUG
if (i == 60) {
printf("access %d, mix %d: get value=%lu, insert value=%lu\n", i, j, new_data[j], p);
}
#endif
// Find the next element index (ordered)
p = fnv64(p, new_data[j]);
}
for (uint32_t j = 0; j < mix.size(); j++) {
mix[j] = fnv64(mix[j], new_data[j]);
}
}
/*
* Compress
*/
for (uint32_t i = 0; i < result.size(); i++) {
uint32_t j = i * 4;
result[i] = fnv64(fnv64(fnv64(mix[j], mix[j + 1]), mix[j + 2]), mix[j + 3]);
}
}
} // quarkchain
} // org
extern "C" void *cache_create(uint64_t *cache_ptr,
uint32_t cache_size) {
ordered_set_t *oset = new ordered_set_t();
for (uint32_t i = 0; i < cache_size; i++) {
oset->insert(cache_ptr[i]);
}
return oset;
}
extern "C" void cache_destroy(void *ptr) {
ordered_set_t *oset = (ordered_set_t *)ptr;
delete oset;
}
bool device_init[64] = {};
uint64_t result0[6];
uint64_t *d_result0[64];
//uint64_t *d_seed_c[64];
org::quarkchain::cuoset *items[64];
//uint64_t *d_oset_raw[64];
uint64_t *found[64];
uint64_t *d_found[64];
uint64_t *d_target[64];
uint8_t *d_header[64];
cudaEvent_t kernelFinished[64];
int num_devices = 0;
extern "C" int32_t qkc_hash(void *cache_ptr,
//uint64_t* seed_ptr,
uint64_t* result_ptr,
uint64_t* target,
uint64_t* header,
uint64_t start_nonce,
uint32_t blocks,
uint32_t threads) {
ordered_set_t *oset = (ordered_set_t *)cache_ptr;
//printf("c start_nonce: %lu\n", start_nonce);
checkCudaErrors(cudaGetDeviceCount(&num_devices));
//ordered_set_t noset(*oset);*/
//std::array<uint64_t, 8> seed;
//std::array<uint64_t, 4> result;
//std::copy(seed_ptr, seed_ptr + seed.size(), seed.begin());
/*
org::quarkchain::cuoset *coset = (org::quarkchain::cuoset *)malloc(sizeof(org::quarkchain::cuoset));
org::quarkchain::cuoset *prev_item = coset;
prev_item->value = *(oset->find_by_order(0));
for (int i = 1; i < org::quarkchain::INIT_SET_ENTRIES; i++) {
org::quarkchain::cuoset *item = (org::quarkchain::cuoset*)malloc(sizeof(org::quarkchain::cuoset));
item->value = *(oset->find_by_order(i));
prev_item->next = item;
prev_item = item;
}*/
//org::quarkchain::qkc_hash<<<1,1>>>(coset, seed_ptr, result_ptr);
//std::copy(result.begin(), result.end(), result_ptr);
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
uint64_t oset_raw[org::quarkchain::INIT_SET_ENTRIES];
for (int i = 0; i < org::quarkchain::INIT_SET_ENTRIES; i++) {
oset_raw[i] = *(oset->find_by_order(i));
}
// Prepare cuoset on host
org::quarkchain::cuoset *h_items = (org::quarkchain::cuoset*)malloc(sizeof(org::quarkchain::cuoset)*org::quarkchain::INIT_SET_ENTRIES);
org::quarkchain::cuoset *h_coset = org::quarkchain::h_qkc_hash_init(oset_raw, h_items);
for (int i = 0; i < num_devices; i++) {
cudaSetDevice(i);
if (!device_init[i]) {
checkCudaErrors(cudaMalloc(&d_result0[i], sizeof(uint64_t)*4*blocks*threads));
checkCudaErrors(cudaMalloc(&items[i], sizeof(org::quarkchain::cuoset)*org::quarkchain::INIT_SET_ENTRIES*blocks*threads));
checkCudaErrors(cudaMalloc(&d_target[i], sizeof(uint64_t)*4));
checkCudaErrors(cudaMalloc(&d_header[i], sizeof(uint8_t)*32));
//checkCudaErrors(cudaMalloc(&d_oset_raw[i], org::quarkchain::INIT_SET_ENTRIES*sizeof(uint64_t)));
checkCudaErrors(cudaHostAlloc((void**)&(found[i]), sizeof(uint64_t)*6, cudaHostAllocMapped));
checkCudaErrors(cudaHostGetDevicePointer((void**)&(d_found[i]), found[i], 0));
cudaDeviceSetLimit(cudaLimitStackSize, 8192);
size_t size_heap, size_stack;
cudaDeviceGetLimit(&size_heap, cudaLimitMallocHeapSize);
cudaDeviceGetLimit(&size_stack, cudaLimitStackSize);
printf("Heap size found to be %d; Stack size found to be %d\n",(int)size_heap,(int)size_stack);
checkCudaErrors(cudaEventCreate(&kernelFinished[i]));
device_init[i] = true;
printf("Initialized device %d\n", i);
}
for (int j = 0; j < 6; j++) {
uint64_t *fo = found[i];
fo[j] = 0;
}
checkCudaErrors(cudaMemcpy(d_target[i], target, sizeof(uint64_t)*4, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_header[i], header, sizeof(uint8_t)*32, cudaMemcpyHostToDevice));
// Copy cuoset to GPU
checkCudaErrors(cudaMemcpy(items[i], h_items, sizeof(org::quarkchain::cuoset)*org::quarkchain::INIT_SET_ENTRIES, cudaMemcpyHostToDevice));
org::quarkchain::qkc_hash_init_copy<<<blocks,threads>>>(items[i], h_coset-h_items, org::quarkchain::h_oset_size);
// Calculate hashes
org::quarkchain::qkc_hash<<<blocks,threads>>>(d_result0[i], items[i], d_found[i], d_target[i], d_header[i], start_nonce + i*blocks*threads, i, blocks*threads);
checkCudaErrors(cudaEventRecord(kernelFinished[i]));
}
while (true) {
usleep(10);
uint8_t success = 0;
for (int i = 0; i < num_devices; i++) {
cudaSetDevice(i);
if (cudaEventQuery(kernelFinished[i]) != cudaSuccess) {
uint64_t *fo = found[i];
if (fo[4] > 0) {
printf("Found: ");
for (int j = 0; j < 4; j++) {
printf("%16lx ", fo[j]);
}
printf("\n");
memcpy(result_ptr, fo, 6*sizeof(uint64_t));
return num_devices * blocks * threads; // Early return when result found, to reduce orphans
//break;
}
} else {
success++;
}
}
if (success >= num_devices) {
// All GPUs completed
break;
}
}
for (int i = 0; i < num_devices; i++) {
cudaSetDevice(i);
checkCudaErrors(cudaDeviceSynchronize());
uint64_t *fo = found[i];
if (fo[4] > 0) {
printf("Found: ");
for (int j = 0; j < 4; j++) {
printf("%16lx ", fo[j]);
}
printf("\n");
memcpy(result_ptr, fo, 6*sizeof(uint64_t));
return num_devices * blocks * threads; // Early return when result found, to reduce orphans
}
// Copy results
checkCudaErrors(cudaDeviceSynchronize());
}
free(h_items);
gettimeofday(&tv2, NULL);
unsigned long utime1 = 1000000 * tv1.tv_sec + tv1.tv_usec;
unsigned long utime2 = 1000000 * tv2.tv_sec + tv2.tv_usec;
unsigned long udiff1 = utime2-utime1;
double cudaseconds = udiff1 / 1000.0 / 1000.0;
printf("Hashrate: %5.2f H/s\n", num_devices * blocks*threads / cudaseconds);
return num_devices * blocks * threads;
}
void test_sorted_list(int blocks, int threads) {
std::cout << "Testing sorted list implementation" << std::endl;
ordered_set_t oset;
org::quarkchain::generate_init_set(
oset, 431, org::quarkchain::INIT_SET_ENTRIES);
std::vector<uint64_t> slist;
for (auto v : oset) {
slist.push_back(v);
}
std::uniform_int_distribution<uint64_t> dist(0, ULLONG_MAX);
std::default_random_engine generator(475);
std::array<uint64_t, 8> seed;
uint64_t seed_c[8];
for (uint32_t j = 0; j < 8; j++) {
seed[j] = dist(generator);
seed_c[j] = seed[j];
}
//std::array<uint64_t, 4> result0;
uint64_t result0[4];
uint64_t *d_result0;
uint64_t *d_seed_c;
checkCudaErrors(cudaMalloc(&d_result0, sizeof(uint64_t)*4));
checkCudaErrors(cudaMalloc(&d_seed_c, sizeof(uint64_t)*8));
checkCudaErrors(cudaMemcpy(d_seed_c, seed_c, sizeof(uint64_t)*8, cudaMemcpyHostToDevice));
org::quarkchain::cuoset *items;
checkCudaErrors(cudaMalloc(&items, sizeof(org::quarkchain::cuoset)*org::quarkchain::INIT_SET_ENTRIES*blocks*threads));
std::array<uint64_t, 4> result1;
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
org::quarkchain::qkc_hash_sorted_list(slist, seed, result1);
gettimeofday(&tv2, NULL);
/*org::quarkchain::cuoset *coset = (org::quarkchain::cuoset *)malloc(sizeof(org::quarkchain::cuoset));
org::quarkchain::cuoset *prev_item = coset;
prev_item->value = *(oset.find_by_order(0));*/
uint64_t oset_raw[org::quarkchain::INIT_SET_ENTRIES];
uint64_t *d_oset_raw;
checkCudaErrors(cudaMalloc(&d_oset_raw, org::quarkchain::INIT_SET_ENTRIES*sizeof(uint64_t)));
for (int i = 0; i < org::quarkchain::INIT_SET_ENTRIES; i++) {
/*org::quarkchain::cuoset *item = (org::quarkchain::cuoset*)malloc(sizeof(org::quarkchain::cuoset));
if (item == NULL) {
printf("malloc failed, i=%d\n", i);
}
item->value = *(oset.find_by_order(i));
item->next = NULL;
prev_item->next = item;
prev_item = item;
//printf("Added item: %d, value: %lu\n", i, item->value);*/
oset_raw[i] = *(oset.find_by_order(i));
}
checkCudaErrors(cudaMemcpy(d_oset_raw, oset_raw, sizeof(uint64_t)*org::quarkchain::INIT_SET_ENTRIES, cudaMemcpyHostToDevice));
/*int count = 0;
org::quarkchain::cuoset *iter = coset;
for (; iter != NULL; iter = iter->next, count++);
printf("elements in cuoset: %d\n", count);*/
cudaDeviceSetLimit(cudaLimitStackSize, 8192);
size_t size_heap, size_stack;
cudaDeviceGetLimit(&size_heap, cudaLimitMallocHeapSize);
cudaDeviceGetLimit(&size_stack, cudaLimitStackSize);
printf("Heap size found to be %d; Stack size found to be %d\n",(int)size_heap,(int)size_stack);
printf("Starting qkc_hash\n");
struct timeval tv3, tv4, tv5, tv6, tv7;
gettimeofday(&tv7, NULL);
org::quarkchain::cuoset *h_items = (org::quarkchain::cuoset*)malloc(sizeof(org::quarkchain::cuoset)*org::quarkchain::INIT_SET_ENTRIES);
org::quarkchain::cuoset *h_coset = org::quarkchain::h_qkc_hash_init(oset_raw, h_items);
checkCudaErrors(cudaMemcpy(items, h_items, sizeof(org::quarkchain::cuoset)*org::quarkchain::INIT_SET_ENTRIES, cudaMemcpyHostToDevice));
gettimeofday(&tv3, NULL);
//org::quarkchain::qkc_hash_init<<<1,1>>>(d_oset_raw, items);
//checkCudaErrors(cudaDeviceSynchronize());
gettimeofday(&tv6, NULL);
org::quarkchain::qkc_hash_init_copy<<<blocks,threads>>>(items, h_coset-h_items, org::quarkchain::h_oset_size);
checkCudaErrors(cudaDeviceSynchronize());
gettimeofday(&tv4, NULL);
printf("Waiting for device synchronize\n");
checkCudaErrors(cudaDeviceSynchronize());
gettimeofday(&tv5, NULL);
printf("Device synchronized\n");
checkCudaErrors(cudaMemcpy(result0, d_result0, 4*sizeof(uint64_t), cudaMemcpyDeviceToHost));
printf("result0 copied from device\n");
free(h_items);
unsigned long utime1 = 1000000 * tv1.tv_sec + tv1.tv_usec;
unsigned long utime2 = 1000000 * tv2.tv_sec + tv2.tv_usec;
unsigned long udiff1 = utime2-utime1;
printf("CPU Sorted list Time: %lu us\n", udiff1);
unsigned long utime3 = 1000000 * tv3.tv_sec + tv3.tv_usec;
unsigned long utime4 = 1000000 * tv4.tv_sec + tv4.tv_usec;
unsigned long utime5 = 1000000 * tv5.tv_sec + tv5.tv_usec;
unsigned long utime6 = 1000000 * tv6.tv_sec + tv6.tv_usec;
unsigned long utime7 = 1000000 * tv7.tv_sec + tv7.tv_usec;
unsigned long cpuinit = utime3-utime7;
unsigned long cudatime = utime5-utime3;
unsigned long inittime = utime4-utime3;
unsigned long init1time = utime6-utime3;
printf("CPU Init1 Time: %lu us\n", cpuinit);
printf("CUDA Init1 Time: %lu us\n", init1time);
printf("CUDA Init Time: %lu us\n", inittime);
printf("CUDA Time: %lu us\n", cudatime);
double cudaseconds = cudatime / 1000 / 1000;
printf("Hashrate: %5.2f H/s\n", blocks*threads / cudaseconds);
for (uint32_t i = 0; i < result1.size(); i++) {
if (result0[i] != result1[i]) {
std::cout << "Test failed" << std::endl;
return;
}
}
std::cout << "Test passed" << std::endl;
}
void test_qkc_hash_perf() {
ordered_set_t oset;
auto t_start = std::chrono::steady_clock::now();
org::quarkchain::generate_init_set(
oset, 1, org::quarkchain::INIT_SET_ENTRIES);
auto used_time = std::chrono::steady_clock::now() - t_start;
std::cout << "Generate time: "
<< std::chrono::duration<double, std::milli>(used_time).count()
<< std::endl;
t_start = std::chrono::steady_clock::now();
ordered_set_t noset = oset;
used_time = std::chrono::steady_clock::now() - t_start;
std::cout << "Copy time: "
<< std::chrono::duration<double, std::milli>(used_time).count()
<< std::endl;
std::uniform_int_distribution<uint64_t> dist(0, ULLONG_MAX);
std::default_random_engine generator(475);
t_start = std::chrono::steady_clock::now();
uint32_t count = 1000;
uint64_t seed[8];
uint64_t result[8];
for (uint32_t i = 0; i < count; i++) {
for (uint32_t j = 0; j < 8; j++) {
seed[j] = dist(generator);
}
ordered_set_t new_oset(oset);
/*
org::quarkchain::cuoset *coset = (org::quarkchain::cuoset *)malloc(sizeof(org::quarkchain::cuoset));
org::quarkchain::cuoset *prev_item = coset;
prev_item->value = *(new_oset.find_by_order(0));
for (int i = 1; i < org::quarkchain::INIT_SET_ENTRIES; i++) {
org::quarkchain::cuoset *item = (org::quarkchain::cuoset*)malloc(sizeof(org::quarkchain::cuoset));
item->value = *(new_oset.find_by_order(i));
prev_item->next = item;
prev_item = item;
}*/
//org::quarkchain::qkc_hash<<<1,1>>>(coset, seed, result);
}
used_time = std::chrono::steady_clock::now() - t_start;
std::cout << "Duration: "
<< std::chrono::duration<double, std::milli>(used_time).count()
<< std::endl;
}
void test_qkc_hash_slist_perf() {
ordered_set_t oset;
auto t_start = std::chrono::steady_clock::now();
org::quarkchain::generate_init_set(
oset, 1, org::quarkchain::INIT_SET_ENTRIES);
auto used_time = std::chrono::steady_clock::now() - t_start;
std::cout << "Generate time: "
<< std::chrono::duration<double, std::milli>(used_time).count()
<< std::endl;
std::vector<uint64_t> slist;
for (auto v : oset) {
slist.push_back(v);
}
t_start = std::chrono::steady_clock::now();
std::vector<uint64_t> nslist(slist);
used_time = std::chrono::steady_clock::now() - t_start;
std::cout << "Copy time: "
<< std::chrono::duration<double, std::milli>(used_time).count()
<< std::endl;
std::uniform_int_distribution<uint64_t> dist(0, ULLONG_MAX);
std::default_random_engine generator(475);
t_start = std::chrono::steady_clock::now();
uint32_t count = 1000;
std::array<uint64_t, 8> seed;
std::array<uint64_t, 4> result;
for (uint32_t i = 0; i < count; i++) {
for (uint32_t j = 0; j < 8; j++) {
seed[j] = dist(generator);
}
std::vector<uint64_t> new_slist(slist);
org::quarkchain::qkc_hash_sorted_list(new_slist, seed, result);
}
used_time = std::chrono::steady_clock::now() - t_start;
std::cout << "Duration: "
<< std::chrono::duration<double, std::milli>(used_time).count()
<< std::endl;
}
int main(int argc, char** argv) {
if (argc <= 1) {
std::cout << "Must specify command in "
"qkc_perf, slist_test, slist_perf"
<< std::endl;
return -1;
}
if (strcmp(argv[1], "qkc_perf") == 0) {
test_qkc_hash_perf();
} else if (strcmp(argv[1], "slist_perf") == 0) {
test_qkc_hash_slist_perf();
} else if (strcmp(argv[1], "slist_test") == 0) {
if (argc <= 3) {
printf("Usage: %s slist_test <blocks> <threads>\n", argv[0]);
return -1;
}
int blocks = atoi(argv[2]);
int threads = atoi(argv[3]);
test_sorted_list(blocks, threads);
} else {
std::cout << "Unrecognized command: " << argv[1] << std::endl;
return -1;
}
return 0;
}
|
82e5bda89d5c369fad27c1d0077fb2739f5d736d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Delaunay.h"
////////////////////////////////////////////////
// DRAW TRIANGLE RELATED //
////////////////////////////////////////////////
__device__ int lower_div(int a, int b) {
if (a < 0) { return (a + 1) / b - 1; }
else { return a / b; }
}
__device__ int upper_div(int a, int b) {
if (a > 0) { return (a - 1) / b + 1; }
else { return a / b; }
}
__global__ void draw_triangles(float** map, int* extrema_x, int* extrema_y, float* extrema_value, int* triangles, const int* count_list) {
int points[3];
float a[3]; // a0*x + a1*y + a2 = z
int max_height;
bool clockwise;
int *left_bound, *right_bound;
int index;
for (index = 0; index < count_list[COUNT_TRI]; index++) {
sort_points(triangles, index, extrema_y, points);
find_direction(extrema_x, extrema_y, points, &clockwise);
max_height = extrema_y[points[0]] - extrema_y[points[2]];
left_bound = (int*)malloc((max_height + 1) * sizeof(int));
right_bound = (int*)malloc((max_height + 1) * sizeof(int));
int i, j, h, d, x0;
// right bound
if (clockwise) {
h = max_height;
d = extrema_x[points[0]] - extrema_x[points[2]];
x0 = extrema_x[points[2]];
if (h == 0) { printf("tiangles: %d) h = 0\n", index); asm("trap;"); }
for (i = 0; i <= h; i++) {
right_bound[i] = lower_div(d*i, h) + x0;
}
}
else {
h = extrema_y[points[1]] - extrema_y[points[2]];
d = extrema_x[points[1]] - extrema_x[points[2]];
x0 = extrema_x[points[2]];
i = 0;
if (h == 0) {
right_bound[i] = extrema_x[points[1]];
i++;
}
else {
for (i; i <= h; i++) {
right_bound[i] = lower_div(d*i, h) + x0;
}
}
h = extrema_y[points[0]] - extrema_y[points[1]];
d = extrema_x[points[0]] - extrema_x[points[1]];
x0 = extrema_x[points[1]];
for (j = 1; j <= h; j++) {
right_bound[i] = lower_div(d*j, h) + x0;
i++;
}
}
// left bound
if (!clockwise) {
h = max_height;
d = extrema_x[points[0]] - extrema_x[points[2]];
x0 = extrema_x[points[2]];
if (h == 0) { printf("tiangles: %d) h = 0\n", index); asm("trap;"); }
for (i = 0; i <= h; i++) {
left_bound[i] = upper_div(d*i, h) + x0;
}
}
else {
h = extrema_y[points[1]] - extrema_y[points[2]];
d = extrema_x[points[1]] - extrema_x[points[2]];
x0 = extrema_x[points[2]];
i = 0;
if (h == 0) {
left_bound[i] = extrema_x[points[1]];
i++;
}
else {
for (i; i <= h; i++) {
left_bound[i] = upper_div(d*i, h) + x0;
}
}
h = extrema_y[points[0]] - extrema_y[points[1]];
d = extrema_x[points[0]] - extrema_x[points[1]];
x0 = extrema_x[points[1]];
for (j = 1; j <= h; j++) {
left_bound[i] = upper_div(d*j, h) + x0;
i++;
}
}
cramer(extrema_x, extrema_y, extrema_value, points, a);
j = extrema_y[points[2]];
for (h = 0; h <= max_height; h++) {
for (i = left_bound[h]; i <= right_bound[h]; i++) { map[j][i] = a[0] * i + a[1] * j + a[2]; }
j++;
}
free(left_bound);
free(right_bound);
}
}
__device__ void sort_points(const int* triangles, const int index, const int* extrema_y, int* points) {
int p1 = triangles[index * 3 + 0], p2 = triangles[index * 3 + 1], p3 = triangles[index * 3 + 2];
if (extrema_y[p1] < extrema_y[p2]) {
points[0] = p2;
points[1] = p1;
}
else {
points[0] = p1;
points[1] = p2;
}
if (extrema_y[p3] <= extrema_y[points[1]]) {
points[2] = p3;
}
else {
points[2] = points[1];
points[1] = p3;
if (extrema_y[points[1]] > extrema_y[points[0]]) {
int temp = points[0];
points[0] = points[1];
points[1] = temp;
}
}
}
__device__ void find_direction(const int* extrema_x, const int* extrema_y, const int* points, bool* clockwise) {
float vec_A[2], vec_B[2];
float z;
vec_A[0] = extrema_x[points[2]] - extrema_x[points[0]];
vec_A[1] = extrema_y[points[2]] - extrema_y[points[0]];
vec_B[0] = extrema_x[points[1]] - extrema_x[points[0]];
vec_B[1] = extrema_y[points[1]] - extrema_y[points[0]];
z = vec_A[0] * vec_B[1] - vec_A[1] * vec_B[0];
if (z < 0) { *clockwise = true; }
else { *clockwise = false; }
}
__device__ void cramer(const int* extrema_x, const int* extrema_y, const float* extrema_value, const int* points, float* a) {
float delta, delta_x, delta_y, delta_z;
delta = extrema_x[points[0]] * extrema_y[points[1]]
+ extrema_x[points[1]] * extrema_y[points[2]]
+ extrema_x[points[2]] * extrema_y[points[0]];
delta -= (extrema_x[points[0]] * extrema_y[points[2]]
+ extrema_x[points[1]] * extrema_y[points[0]]
+ extrema_x[points[2]] * extrema_y[points[1]]);
delta_x = extrema_value[points[0]] * extrema_y[points[1]]
+ extrema_value[points[1]] * extrema_y[points[2]]
+ extrema_value[points[2]] * extrema_y[points[0]];
delta_x -= (extrema_value[points[0]] * extrema_y[points[2]]
+ extrema_value[points[1]] * extrema_y[points[0]]
+ extrema_value[points[2]] * extrema_y[points[1]]);
delta_y = extrema_x[points[0]] * extrema_value[points[1]]
+ extrema_x[points[1]] * extrema_value[points[2]]
+ extrema_x[points[2]] * extrema_value[points[0]];
delta_y -= (extrema_x[points[0]] * extrema_value[points[2]]
+ extrema_x[points[1]] * extrema_value[points[0]]
+ extrema_x[points[2]] * extrema_value[points[1]]);
delta_z = extrema_x[points[0]] * extrema_y[points[1]] * extrema_value[points[2]]
+ extrema_x[points[1]] * extrema_y[points[2]] * extrema_value[points[0]]
+ extrema_x[points[2]] * extrema_y[points[0]] * extrema_value[points[1]];
delta_z -= (extrema_x[points[0]] * extrema_y[points[2]] * extrema_value[points[1]]
+ extrema_x[points[1]] * extrema_y[points[0]] * extrema_value[points[2]]
+ extrema_x[points[2]] * extrema_y[points[1]] * extrema_value[points[0]]);
a[0] = delta_x / delta;
a[1] = delta_y / delta;
a[2] = delta_z / delta;
}
////////////////////////////////////////////////
// DELAUNAY RELATED //
////////////////////////////////////////////////
__device__ void write_triangle(int* triangles, const int index, const int p0, const int p1, const int p2) {
triangles[index * 3] = p0;
triangles[index * 3 + 1] = p1;
triangles[index * 3 + 2] = p2;
}
__device__ void write_triangle(int* dst, const int* src) {
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
}
__device__ int sign(const int p, const int t0, const int t1,const int* x, const int* y) {
return (x[t0] - x[p]) * (y[t1] - y[p]) - (x[t1] - x[p]) * (y[t0] - y[p]);
}
__device__ int distance(const int A, const int B, const int* x, const int* y) {
int dx = x[A] - x[B], dy = y[A] - y[B];
return dx * dx + dy * dy;
}
__device__ point_status in_triangle(const int p, const int* t, const int* x, const int* y) {
const int A = t[0];
const int B = t[1];
const int C = t[2];
int i;
int s[3];
s[0] = sign(p, A, B, x, y);
s[1] = sign(p, B, C, x, y);
s[2] = sign(p, C, A, x, y);
for (i = 0; i < 3; i++) {
if (s[i] == 0) {
int L = distance(t[i], t[(i + 1) % 3], x, y);
if (distance(p, t[i], x, y) < L && distance(p, t[(i + 1) % 3], x, y) < L) {
return ON_EDGE;
}
}
}
// if sign(PAxPB)==sign(PBxPC)==sign(PCxPA) then INSIDE
if ((s[0] * s[1]) > 0 && (s[1] * s[2]) > 0) { return INSIDE; }
else { return OUTSIDE; }
}
__device__ int on_which_edge(const int p, const int* t, const int* extrema_x, const int* extrema_y) {
const int A = t[0];
const int B = t[1];
const int C = t[2];
int s[3];
int i;
s[0] = sign(p, A, B, extrema_x, extrema_y);
s[1] = sign(p, B, C, extrema_x, extrema_y);
s[2] = sign(p, C, A, extrema_x, extrema_y);
for (i = 0; i < 3; i++) {
if (s[i] == 0) { return i; }
}
printf("point: %d not found on edge\n", p);
printf("%d, %d, %d\n", A, B, C);
return -1;
}
__device__ void find_triangle(const int point, const int* hist_graph, const int index, const int* extrema_x, const int* extrema_y, int* find_info) {
int this_address, child_count, child_index, i;
this_address = index * HIST_COLUMN;
child_count = hist_graph[this_address + HIST_CHILD];
if (child_count == 0) {
find_info[FIND_TRI] = hist_graph[this_address + HIST_HIST_0];
find_info[FIND_STAUS] = in_triangle(point, &hist_graph[this_address], extrema_x, extrema_y);
}
else {
for (i = 0; i < child_count; i++) {
child_index = hist_graph[this_address + HIST_HIST_0 + i];
if (in_triangle(point, &hist_graph[child_index * HIST_COLUMN], extrema_x, extrema_y) != OUTSIDE) {
find_triangle(point, hist_graph, child_index, extrema_x, extrema_y, find_info);
i = 999;
}
}
if (i != 1000) { printf("point: %d, not found in history: %d\n", point, this_address / HIST_COLUMN); }
}
}
__device__ void set_circum_det(const int* extrema_x, const int* extrema_y, const int A, const int D, long long int* AD) {
int Ax = extrema_x[A], Ay = extrema_y[A], Dx = extrema_x[D], Dy = extrema_y[D];
AD[0] = Ax - Dx;
AD[1] = Ay - Dy;
AD[2] = (Ax * Ax - Dx * Dx) + (Ay * Ay - Dy * Dy);
}
__device__ bool in_circum_circle(const int* extrema_x, const int* extrema_y, const int A, const int B, const int C, const int D) {
long long int AD[3], CD[3], BD[3]; //transfer into counter-clockwise
long long int det_value;
set_circum_det(extrema_x, extrema_y, A, D, AD);
set_circum_det(extrema_x, extrema_y, C, D, CD);
set_circum_det(extrema_x, extrema_y, B, D, BD);
det_value = AD[0] * CD[1] * BD[2]
+ AD[2] * CD[0] * BD[1]
+ AD[1] * CD[2] * BD[0]
- AD[2] * CD[1] * BD[0]
- AD[0] * CD[2] * BD[1]
- AD[1] * CD[0] * BD[2];
return det_value < 0;
}
__device__ int find_neighbor(const int* neighbors, const int index, const int target) {
for (int i = 0; i < 3; i++) {
if (neighbors[index * 3 + i] == target) { return i; }
}
printf("index: %d, neighbor: %d not found\n", index, target);
return -1;
}
__device__ void change_neighbor(int* neighbors, const int index, const int old_t, const int new_t) {
if(index != -1){ neighbors[index * 3 + find_neighbor(neighbors, index, old_t)] = new_t; }
}
__device__ void write_hist_child(int* hist_graph, int* hist_index, const int* triangles, const int index, const int t0) {
int address = index * HIST_COLUMN;
write_triangle(&hist_graph[address], &triangles[t0 * 3]);
hist_graph[address + HIST_CHILD] = 0;
hist_graph[address + HIST_HIST_0] = t0;
hist_index[t0] = index;
}
__device__ void exchange_hist(int* hist_graph, int* hist_index, const int* triangles, const int t0, const int t1, int* count_list) {
int add_0, add_1;
add_0 = hist_index[t0] * HIST_COLUMN;
add_1 = hist_index[t1] * HIST_COLUMN;
hist_graph[add_0 + HIST_CHILD] = hist_graph[add_1 + HIST_CHILD] = 2;
hist_graph[add_0 + HIST_HIST_0] = hist_graph[add_1 + HIST_HIST_0] = count_list[COUNT_HIST]++;
hist_graph[add_0 + HIST_HIST_1] = hist_graph[add_1 + HIST_HIST_1] = count_list[COUNT_HIST]++;
write_hist_child(hist_graph, hist_index, triangles, hist_graph[add_0 + HIST_HIST_0], t0);
write_hist_child(hist_graph, hist_index, triangles, hist_graph[add_0 + HIST_HIST_1], t1);
}
__device__ void write_hist(int* hist_graph, int* hist_index, const int* triangles, const int t0, const int t1, int* count_list) {
int address = hist_index[t0] * HIST_COLUMN;
hist_graph[address + HIST_CHILD] = 2;
hist_graph[address + HIST_HIST_0] = count_list[COUNT_HIST]++;
hist_graph[address + HIST_HIST_1] = count_list[COUNT_HIST]++;
write_hist_child(hist_graph, hist_index, triangles, hist_graph[address + HIST_HIST_0], t0);
write_hist_child(hist_graph, hist_index, triangles, hist_graph[address + HIST_HIST_1], t1);
}
__device__ void write_hist(int* hist_graph, int* hist_index, const int* triangles, const int t0, const int t1, const int t2, int* count_list) {
int address = hist_index[t0] * HIST_COLUMN;
hist_graph[address + HIST_CHILD] = 3;
hist_graph[address + HIST_HIST_0] = count_list[COUNT_HIST]++;
hist_graph[address + HIST_HIST_1] = count_list[COUNT_HIST]++;
hist_graph[address + HIST_HIST_2] = count_list[COUNT_HIST]++;
write_hist_child(hist_graph, hist_index, triangles, hist_graph[address + HIST_HIST_0], t0);
write_hist_child(hist_graph, hist_index, triangles, hist_graph[address + HIST_HIST_1], t1);
write_hist_child(hist_graph, hist_index, triangles, hist_graph[address + HIST_HIST_2], t2);
}
__device__ void flip(const int tri_0, const int tri_1, int* triangles, int* neighbors, int* hist_graph, int* hist_index, int* count_list,
const int* extrema_x, const int* extrema_y) {
if (tri_0 != -1 && tri_1 != -1) {
int tri0_nb, tri1_nb;
int t0[3], t1[3];
bool in_circle;
write_triangle(t0, &triangles[tri_0 * 3]);
write_triangle(t1, &triangles[tri_1 * 3]);
tri0_nb = find_neighbor(neighbors, tri_0, tri_1);
tri1_nb = find_neighbor(neighbors, tri_1, tri_0);
in_circle = in_circum_circle(extrema_x, extrema_y, t0[(tri0_nb + 2) % 3], t0[tri0_nb], t0[(tri0_nb + 1) % 3], t1[(tri1_nb + 2) % 3]);
if (in_circle) {
//printf("flip tri: %d, %d\n", tri_0, tri_1);
//printf("tri %d) %d, %d, %d\n", tri_0, triangles[tri_0 * 3], triangles[tri_0 * 3 + 1], triangles[tri_0 * 3 + 2]);
//printf("tri %d) %d, %d, %d\n", tri_1, triangles[tri_1 * 3], triangles[tri_1 * 3 + 1], triangles[tri_1 * 3 + 2]);
t0[(tri0_nb + 1) % 3] = t1[(tri1_nb + 2) % 3];
t1[(tri1_nb + 1) % 3] = t0[(tri0_nb + 2) % 3];
neighbors[tri_0 * 3 + tri0_nb] = neighbors[tri_1 * 3 + (tri1_nb + 1) % 3];
neighbors[tri_1 * 3 + tri1_nb] = neighbors[tri_0 * 3 + (tri0_nb + 1) % 3];
neighbors[tri_0 * 3 + (tri0_nb + 1) % 3] = tri_1;
neighbors[tri_1 * 3 + (tri1_nb + 1) % 3] = tri_0;
change_neighbor(neighbors, neighbors[tri_0 * 3 + tri0_nb], tri_1, tri_0);
change_neighbor(neighbors, neighbors[tri_1 * 3 + tri1_nb], tri_0, tri_1);
write_triangle(&triangles[tri_0 * 3],t0);
write_triangle(&triangles[tri_1 * 3],t1);
exchange_hist(hist_graph, hist_index, triangles, tri_0, tri_1, count_list);
//printf("After flip\n");
//printf("tri %d) %d, %d, %d\n", tri_0, triangles[tri_0 * 3], triangles[tri_0 * 3 + 1], triangles[tri_0 * 3 + 2]);
//printf("tri %d) %d, %d, %d\n", tri_1, triangles[tri_1 * 3], triangles[tri_1 * 3 + 1], triangles[tri_1 * 3 + 2]);
flip(tri_0, neighbors[tri_0 * 3 + tri0_nb], triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
flip(tri_1, neighbors[tri_1 * 3 + tri1_nb], triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
}
}
}
__device__ int insert_on_edge(const int point, const int tri_0, const int tri_1, const int tri_0_nb,const int mark, int* triangles, int* neighbors,
int* hist_graph, int* hist_index, int* count_list, const int* extrema_x, const int* extrema_y) {
int new_0, new_1;
int A, B, C, N_B;
A = triangles[tri_0 * 3 + tri_0_nb];
B = triangles[tri_0 * 3 + (tri_0_nb + 1) % 3];
C = triangles[tri_0 * 3 + (tri_0_nb + 2) % 3];
N_B = neighbors[tri_0 * 3 + (tri_0_nb + 1) % 3];
new_0 = count_list[COUNT_TRI]++;
if (tri_1 != -1) { new_1 = new_0 + mark; }
else { new_1 = tri_1; }
write_triangle(triangles, new_0, point, B, C);
write_triangle(neighbors, new_0, tri_1, N_B, tri_0);
change_neighbor(neighbors, N_B, tri_0, new_0);
triangles[tri_0 * 3 + (tri_0_nb + 1) % 3] = point;
neighbors[tri_0 * 3 + tri_0_nb] = new_1;
neighbors[tri_0 * 3 + (tri_0_nb + 1) % 3] = new_0;
write_hist(hist_graph, hist_index, triangles, tri_0, new_0, count_list);
return new_0;
}
__device__ void insert_point(const int point, const int* find_info, int* triangles, int* neighbors, int* hist_graph, int* hist_index, int* count_list,
const int* extrema_x, const int* extrema_y) {
int id;
int tri_0 = find_info[FIND_TRI];
int A, B, C, N_A, N_B, N_C, new_1, new_2;
if (find_info[FIND_STAUS] == INSIDE) {
A = triangles[tri_0 * 3];
B = triangles[tri_0 * 3 + 1];
C = triangles[tri_0 * 3 + 2];
N_A = neighbors[tri_0 * 3];
N_B = neighbors[tri_0 * 3 + 1];
N_C = neighbors[tri_0 * 3 + 2];
new_1 = count_list[COUNT_TRI]++;
new_2 = count_list[COUNT_TRI]++;
//printf("point: %d, insert in tri: %d\n", point, tri_0);
//printf("tri %d) %d, %d, %d\n", tri_0, triangles[tri_0 * 3], triangles[tri_0 * 3 + 1], triangles[tri_0 * 3 + 2]);
write_triangle(triangles, tri_0, A, B, point);
write_triangle(triangles, new_1, B, C, point);
write_triangle(triangles, new_2, C, A, point);
write_triangle(neighbors, tri_0, N_A, new_1, new_2);
write_triangle(neighbors, new_1, N_B, new_2, tri_0);
write_triangle(neighbors, new_2, N_C, tri_0, new_1);
//change_neighbor(neighbors, N_A, tri_0, tri_0);
change_neighbor(neighbors, N_B, tri_0, new_1);
change_neighbor(neighbors, N_C, tri_0, new_2);
/*
printf("After insert\n");
printf("tri %d) %d, %d, %d\n", tri_0, triangles[tri_0 * 3], triangles[tri_0 * 3 + 1], triangles[tri_0 * 3 + 2]);
printf("tri %d) %d, %d, %d\n", new_1, triangles[new_1 * 3], triangles[new_1 * 3 + 1], triangles[new_1 * 3 + 2]);
printf("tri %d) %d, %d, %d\n\n", new_2, triangles[new_2 * 3], triangles[new_2 * 3 + 1], triangles[new_2 * 3 + 2]);
*/
write_hist(hist_graph, hist_index, triangles, tri_0, new_1, new_2, count_list);
flip(tri_0, N_A, triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
flip(new_1, N_B, triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
flip(new_2, N_C, triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
}
else {
int tri_1, new_0, new_1;
int tri_0_nb, tri_1_nb;
tri_0_nb = on_which_edge(point, &triangles[tri_0 * 3], extrema_x, extrema_y);
tri_1 = neighbors[tri_0 * 3 + tri_0_nb];
new_0 = insert_on_edge(point, tri_0, tri_1, tri_0_nb, 1, triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
if(tri_1 != -1){
tri_1_nb = on_which_edge(point, &triangles[tri_1 * 3], extrema_x, extrema_y);
new_1 = insert_on_edge(point, tri_1, tri_0, tri_1_nb, -1, triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
flip(tri_1, neighbors[tri_1*3+(tri_1_nb +2)%3], triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
flip(new_1, neighbors[new_1 * 3 + 1], triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
}
flip(tri_0, neighbors[tri_0 * 3 + (tri_0_nb + 2) % 3], triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
flip(new_0, neighbors[new_0 * 3 + 1], triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
}
}
__global__ void initialize(float** map,
const int* extrema_x, const int* extrema_y, const float* extrema_value,
int* triangles, int* neighbors, int* history_index, int* history_graph, int* count_list,
const int width, const int height){
history_graph[0 * HIST_COLUMN + HIST_CHILD] = 2;
history_graph[0 * HIST_COLUMN + HIST_HIST_0] = 1;
history_graph[0 * HIST_COLUMN + HIST_HIST_1] = 2;
write_triangle(triangles, 0, 0, 1, 2);
write_triangle(neighbors, 0, -1, 1, -1);
history_graph[1 * HIST_COLUMN + HIST_PT_0] = 0;
history_graph[1 * HIST_COLUMN + HIST_PT_1] = 1;
history_graph[1 * HIST_COLUMN + HIST_PT_2] = 2;
history_graph[1 * HIST_COLUMN + HIST_CHILD] = 0;
history_graph[1 * HIST_COLUMN + HIST_HIST_0] = 0;
history_index[0] = 1;
write_triangle(triangles, 1, 1, 3, 2);
write_triangle(neighbors, 1, -1, -1, 0);
history_graph[2 * HIST_COLUMN + HIST_PT_0] = 1;
history_graph[2 * HIST_COLUMN + HIST_PT_1] = 3;
history_graph[2 * HIST_COLUMN + HIST_PT_2] = 2;
history_graph[2 * HIST_COLUMN + HIST_CHILD] = 0;
history_graph[2 * HIST_COLUMN + HIST_HIST_0] = 1;
history_index[1] = 2;
count_list[COUNT_TRI] = 2;
count_list[COUNT_HIST] = 3;
int i,j;
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
map[i][j] = 255;
}
}
}
__global__ void incremental_construction(int* points_count,const int mode, const int* extrema_x, const int* extrema_y,
int* triangles, int* neighbors, int* hist_index, int* hist_graph, int* count_list) {
int find_info[FIND_SIZE];
int i, k, pt_count;
pt_count = points_count[mode];
/*
printf("Points:\n");
for (i = 0; i < pt_count; i++) {
printf("%d, %d\n", extrema_x[i], extrema_y[i]);
}
printf("\n");
*/
//printf("%d, %d, %d\n", points_count[mode], count_list[COUNT_TRI], count_list[COUNT_HIST]);
for (i = 4; i <pt_count; i++) {
find_triangle(i, hist_graph, 0, extrema_x, extrema_y, find_info);
//printf("point: %d, in tri: %d, status: %d\n", i, find_info[FIND_TRI], find_info[FIND_STAUS]);
k = find_info[FIND_TRI];
insert_point(i, find_info, triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
if (count_list[COUNT_TRI] > TRIANGLE_SIZE) {
printf("COUNT_TRI out of TRIANGLE_SIZE\n");
asm("trap;");
}
if (count_list[COUNT_HIST] > HIST_SIZE) {
printf("COUNT_HIST out of HIST_SIZE\n");
asm("trap;");
}
}
/*
printf("\nTriangles:\n");
for (i = 0; i < count_list[COUNT_TRI]; i++) {
printf("%d) %d, %d, %d\n", i, triangles[i * 3], triangles[i * 3 + 1], triangles[i * 3 + 2]);
}
printf("\nNeighbors:\n");
for (i = 0; i < count_list[COUNT_TRI]; i++) {
printf("%d) %d, %d, %d\n", i, neighbors[i * 3], neighbors[i * 3 + 1], neighbors[i * 3 + 2]);
}
printf("\nHistory Graph:\n");
for (i = 0; i < count_list[COUNT_HIST]; i++) {
int address = i * HIST_COLUMN;
printf("%d) %d, %d, %d |%d| %d, %d, %d\n", i, hist_graph[address + HIST_PT_0], hist_graph[address + HIST_PT_1], hist_graph[address + HIST_PT_2],
hist_graph[address + HIST_CHILD], hist_graph[address + HIST_HIST_0], hist_graph[address + HIST_HIST_1], hist_graph[address + HIST_HIST_2]);
}
*/
}
void write_csv(const char* file_path, int* extrema_x, int* extrema_y, int* triangles, int* count_list, int* points_count, int mode) {
int *host_x, *host_y, *host_tri, *host_tri_count, *host_pt_count;
host_x = (int*)malloc(EXTREMA_SIZE * sizeof(int));
host_y = (int*)malloc(EXTREMA_SIZE * sizeof(int));
host_tri = (int*)malloc(TRIANGLE_SIZE * 3 * sizeof(int));
host_tri_count = (int*)malloc(COUNT_SIZE * sizeof(int));
host_pt_count = (int*)malloc(2 * sizeof(int));
hipMemcpy(host_x, extrema_x, EXTREMA_SIZE * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(host_y, extrema_y, EXTREMA_SIZE * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(host_tri, triangles, TRIANGLE_SIZE * 3 * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(host_tri_count, count_list, COUNT_SIZE * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(host_pt_count, points_count, 2 * sizeof(int), hipMemcpyDeviceToHost);
FILE* fp;
int i;
fp = fopen(file_path, "w");
fprintf(fp, "%d\n", host_pt_count[mode]);
for (i = 0; i < host_pt_count[mode]; i++) {
fprintf(fp, "%d, %d\n", host_x[i], host_y[i]);
}
fprintf(fp, "%d\n", host_tri_count[COUNT_TRI]);
for (i = 0; i < host_tri_count[COUNT_TRI]; i++) {
fprintf(fp, "%d, %d, %d\n", host_tri[i * 3], host_tri[i * 3 + 1], host_tri[i * 3 + 2]);
}
fclose(fp);
free(host_x);
free(host_y);
free(host_tri);
free(host_tri_count);
}
void Delaunay_triangle(float** map, int* points_count, int mode,
int* extrema_x, int* extrema_y, float* extrema_value,
const int width, const int height){
int *triangles, *neighbors, *hist_index ,*hist_graph, *count_list;
hipMalloc(&triangles, TRIANGLE_SIZE * 3 * sizeof(int));
hipMalloc(&neighbors, TRIANGLE_SIZE * 3 * sizeof(int));
hipMalloc(&hist_index, TRIANGLE_SIZE * sizeof(int));
hipMalloc(&hist_graph, HIST_SIZE * HIST_COLUMN * sizeof(int));
hipMalloc(&count_list, COUNT_SIZE * sizeof(int));
hipLaunchKernelGGL(( initialize), dim3(1),dim3(1), 0, 0, map, extrema_x, extrema_y, extrema_value, triangles, neighbors, hist_index, hist_graph, count_list, width, height);
hipLaunchKernelGGL(( incremental_construction), dim3(1),dim3(1), 0, 0, points_count, mode, extrema_x, extrema_y, triangles, neighbors, hist_index, hist_graph, count_list);
hipFree(neighbors);
hipFree(hist_index);
hipFree(hist_graph);
hipLaunchKernelGGL(( draw_triangles) , dim3(1),dim3(1), 0, 0, map, extrema_x, extrema_y, extrema_value, triangles, count_list);
check("error in drawing triangles");
//write_csv("figures/result.csv", extrema_x, extrema_y, triangles,count_list, points_count, mode);
hipFree(count_list);
hipFree(triangles);
} | 82e5bda89d5c369fad27c1d0077fb2739f5d736d.cu | #include "Delaunay.h"
////////////////////////////////////////////////
// DRAW TRIANGLE RELATED //
////////////////////////////////////////////////
__device__ int lower_div(int a, int b) {
if (a < 0) { return (a + 1) / b - 1; }
else { return a / b; }
}
__device__ int upper_div(int a, int b) {
if (a > 0) { return (a - 1) / b + 1; }
else { return a / b; }
}
__global__ void draw_triangles(float** map, int* extrema_x, int* extrema_y, float* extrema_value, int* triangles, const int* count_list) {
int points[3];
float a[3]; // a0*x + a1*y + a2 = z
int max_height;
bool clockwise;
int *left_bound, *right_bound;
int index;
for (index = 0; index < count_list[COUNT_TRI]; index++) {
sort_points(triangles, index, extrema_y, points);
find_direction(extrema_x, extrema_y, points, &clockwise);
max_height = extrema_y[points[0]] - extrema_y[points[2]];
left_bound = (int*)malloc((max_height + 1) * sizeof(int));
right_bound = (int*)malloc((max_height + 1) * sizeof(int));
int i, j, h, d, x0;
// right bound
if (clockwise) {
h = max_height;
d = extrema_x[points[0]] - extrema_x[points[2]];
x0 = extrema_x[points[2]];
if (h == 0) { printf("tiangles: %d) h = 0\n", index); asm("trap;"); }
for (i = 0; i <= h; i++) {
right_bound[i] = lower_div(d*i, h) + x0;
}
}
else {
h = extrema_y[points[1]] - extrema_y[points[2]];
d = extrema_x[points[1]] - extrema_x[points[2]];
x0 = extrema_x[points[2]];
i = 0;
if (h == 0) {
right_bound[i] = extrema_x[points[1]];
i++;
}
else {
for (i; i <= h; i++) {
right_bound[i] = lower_div(d*i, h) + x0;
}
}
h = extrema_y[points[0]] - extrema_y[points[1]];
d = extrema_x[points[0]] - extrema_x[points[1]];
x0 = extrema_x[points[1]];
for (j = 1; j <= h; j++) {
right_bound[i] = lower_div(d*j, h) + x0;
i++;
}
}
// left bound
if (!clockwise) {
h = max_height;
d = extrema_x[points[0]] - extrema_x[points[2]];
x0 = extrema_x[points[2]];
if (h == 0) { printf("tiangles: %d) h = 0\n", index); asm("trap;"); }
for (i = 0; i <= h; i++) {
left_bound[i] = upper_div(d*i, h) + x0;
}
}
else {
h = extrema_y[points[1]] - extrema_y[points[2]];
d = extrema_x[points[1]] - extrema_x[points[2]];
x0 = extrema_x[points[2]];
i = 0;
if (h == 0) {
left_bound[i] = extrema_x[points[1]];
i++;
}
else {
for (i; i <= h; i++) {
left_bound[i] = upper_div(d*i, h) + x0;
}
}
h = extrema_y[points[0]] - extrema_y[points[1]];
d = extrema_x[points[0]] - extrema_x[points[1]];
x0 = extrema_x[points[1]];
for (j = 1; j <= h; j++) {
left_bound[i] = upper_div(d*j, h) + x0;
i++;
}
}
cramer(extrema_x, extrema_y, extrema_value, points, a);
j = extrema_y[points[2]];
for (h = 0; h <= max_height; h++) {
for (i = left_bound[h]; i <= right_bound[h]; i++) { map[j][i] = a[0] * i + a[1] * j + a[2]; }
j++;
}
free(left_bound);
free(right_bound);
}
}
__device__ void sort_points(const int* triangles, const int index, const int* extrema_y, int* points) {
int p1 = triangles[index * 3 + 0], p2 = triangles[index * 3 + 1], p3 = triangles[index * 3 + 2];
if (extrema_y[p1] < extrema_y[p2]) {
points[0] = p2;
points[1] = p1;
}
else {
points[0] = p1;
points[1] = p2;
}
if (extrema_y[p3] <= extrema_y[points[1]]) {
points[2] = p3;
}
else {
points[2] = points[1];
points[1] = p3;
if (extrema_y[points[1]] > extrema_y[points[0]]) {
int temp = points[0];
points[0] = points[1];
points[1] = temp;
}
}
}
__device__ void find_direction(const int* extrema_x, const int* extrema_y, const int* points, bool* clockwise) {
float vec_A[2], vec_B[2];
float z;
vec_A[0] = extrema_x[points[2]] - extrema_x[points[0]];
vec_A[1] = extrema_y[points[2]] - extrema_y[points[0]];
vec_B[0] = extrema_x[points[1]] - extrema_x[points[0]];
vec_B[1] = extrema_y[points[1]] - extrema_y[points[0]];
z = vec_A[0] * vec_B[1] - vec_A[1] * vec_B[0];
if (z < 0) { *clockwise = true; }
else { *clockwise = false; }
}
__device__ void cramer(const int* extrema_x, const int* extrema_y, const float* extrema_value, const int* points, float* a) {
float delta, delta_x, delta_y, delta_z;
delta = extrema_x[points[0]] * extrema_y[points[1]]
+ extrema_x[points[1]] * extrema_y[points[2]]
+ extrema_x[points[2]] * extrema_y[points[0]];
delta -= (extrema_x[points[0]] * extrema_y[points[2]]
+ extrema_x[points[1]] * extrema_y[points[0]]
+ extrema_x[points[2]] * extrema_y[points[1]]);
delta_x = extrema_value[points[0]] * extrema_y[points[1]]
+ extrema_value[points[1]] * extrema_y[points[2]]
+ extrema_value[points[2]] * extrema_y[points[0]];
delta_x -= (extrema_value[points[0]] * extrema_y[points[2]]
+ extrema_value[points[1]] * extrema_y[points[0]]
+ extrema_value[points[2]] * extrema_y[points[1]]);
delta_y = extrema_x[points[0]] * extrema_value[points[1]]
+ extrema_x[points[1]] * extrema_value[points[2]]
+ extrema_x[points[2]] * extrema_value[points[0]];
delta_y -= (extrema_x[points[0]] * extrema_value[points[2]]
+ extrema_x[points[1]] * extrema_value[points[0]]
+ extrema_x[points[2]] * extrema_value[points[1]]);
delta_z = extrema_x[points[0]] * extrema_y[points[1]] * extrema_value[points[2]]
+ extrema_x[points[1]] * extrema_y[points[2]] * extrema_value[points[0]]
+ extrema_x[points[2]] * extrema_y[points[0]] * extrema_value[points[1]];
delta_z -= (extrema_x[points[0]] * extrema_y[points[2]] * extrema_value[points[1]]
+ extrema_x[points[1]] * extrema_y[points[0]] * extrema_value[points[2]]
+ extrema_x[points[2]] * extrema_y[points[1]] * extrema_value[points[0]]);
a[0] = delta_x / delta;
a[1] = delta_y / delta;
a[2] = delta_z / delta;
}
////////////////////////////////////////////////
// DELAUNAY RELATED //
////////////////////////////////////////////////
__device__ void write_triangle(int* triangles, const int index, const int p0, const int p1, const int p2) {
triangles[index * 3] = p0;
triangles[index * 3 + 1] = p1;
triangles[index * 3 + 2] = p2;
}
__device__ void write_triangle(int* dst, const int* src) {
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
}
__device__ int sign(const int p, const int t0, const int t1,const int* x, const int* y) {
return (x[t0] - x[p]) * (y[t1] - y[p]) - (x[t1] - x[p]) * (y[t0] - y[p]);
}
__device__ int distance(const int A, const int B, const int* x, const int* y) {
int dx = x[A] - x[B], dy = y[A] - y[B];
return dx * dx + dy * dy;
}
__device__ point_status in_triangle(const int p, const int* t, const int* x, const int* y) {
const int A = t[0];
const int B = t[1];
const int C = t[2];
int i;
int s[3];
s[0] = sign(p, A, B, x, y);
s[1] = sign(p, B, C, x, y);
s[2] = sign(p, C, A, x, y);
for (i = 0; i < 3; i++) {
if (s[i] == 0) {
int L = distance(t[i], t[(i + 1) % 3], x, y);
if (distance(p, t[i], x, y) < L && distance(p, t[(i + 1) % 3], x, y) < L) {
return ON_EDGE;
}
}
}
// if sign(PAxPB)==sign(PBxPC)==sign(PCxPA) then INSIDE
if ((s[0] * s[1]) > 0 && (s[1] * s[2]) > 0) { return INSIDE; }
else { return OUTSIDE; }
}
__device__ int on_which_edge(const int p, const int* t, const int* extrema_x, const int* extrema_y) {
const int A = t[0];
const int B = t[1];
const int C = t[2];
int s[3];
int i;
s[0] = sign(p, A, B, extrema_x, extrema_y);
s[1] = sign(p, B, C, extrema_x, extrema_y);
s[2] = sign(p, C, A, extrema_x, extrema_y);
for (i = 0; i < 3; i++) {
if (s[i] == 0) { return i; }
}
printf("point: %d not found on edge\n", p);
printf("%d, %d, %d\n", A, B, C);
return -1;
}
__device__ void find_triangle(const int point, const int* hist_graph, const int index, const int* extrema_x, const int* extrema_y, int* find_info) {
int this_address, child_count, child_index, i;
this_address = index * HIST_COLUMN;
child_count = hist_graph[this_address + HIST_CHILD];
if (child_count == 0) {
find_info[FIND_TRI] = hist_graph[this_address + HIST_HIST_0];
find_info[FIND_STAUS] = in_triangle(point, &hist_graph[this_address], extrema_x, extrema_y);
}
else {
for (i = 0; i < child_count; i++) {
child_index = hist_graph[this_address + HIST_HIST_0 + i];
if (in_triangle(point, &hist_graph[child_index * HIST_COLUMN], extrema_x, extrema_y) != OUTSIDE) {
find_triangle(point, hist_graph, child_index, extrema_x, extrema_y, find_info);
i = 999;
}
}
if (i != 1000) { printf("point: %d, not found in history: %d\n", point, this_address / HIST_COLUMN); }
}
}
__device__ void set_circum_det(const int* extrema_x, const int* extrema_y, const int A, const int D, long long int* AD) {
int Ax = extrema_x[A], Ay = extrema_y[A], Dx = extrema_x[D], Dy = extrema_y[D];
AD[0] = Ax - Dx;
AD[1] = Ay - Dy;
AD[2] = (Ax * Ax - Dx * Dx) + (Ay * Ay - Dy * Dy);
}
__device__ bool in_circum_circle(const int* extrema_x, const int* extrema_y, const int A, const int B, const int C, const int D) {
long long int AD[3], CD[3], BD[3]; //transfer into counter-clockwise
long long int det_value;
set_circum_det(extrema_x, extrema_y, A, D, AD);
set_circum_det(extrema_x, extrema_y, C, D, CD);
set_circum_det(extrema_x, extrema_y, B, D, BD);
det_value = AD[0] * CD[1] * BD[2]
+ AD[2] * CD[0] * BD[1]
+ AD[1] * CD[2] * BD[0]
- AD[2] * CD[1] * BD[0]
- AD[0] * CD[2] * BD[1]
- AD[1] * CD[0] * BD[2];
return det_value < 0;
}
__device__ int find_neighbor(const int* neighbors, const int index, const int target) {
for (int i = 0; i < 3; i++) {
if (neighbors[index * 3 + i] == target) { return i; }
}
printf("index: %d, neighbor: %d not found\n", index, target);
return -1;
}
__device__ void change_neighbor(int* neighbors, const int index, const int old_t, const int new_t) {
if(index != -1){ neighbors[index * 3 + find_neighbor(neighbors, index, old_t)] = new_t; }
}
__device__ void write_hist_child(int* hist_graph, int* hist_index, const int* triangles, const int index, const int t0) {
int address = index * HIST_COLUMN;
write_triangle(&hist_graph[address], &triangles[t0 * 3]);
hist_graph[address + HIST_CHILD] = 0;
hist_graph[address + HIST_HIST_0] = t0;
hist_index[t0] = index;
}
__device__ void exchange_hist(int* hist_graph, int* hist_index, const int* triangles, const int t0, const int t1, int* count_list) {
int add_0, add_1;
add_0 = hist_index[t0] * HIST_COLUMN;
add_1 = hist_index[t1] * HIST_COLUMN;
hist_graph[add_0 + HIST_CHILD] = hist_graph[add_1 + HIST_CHILD] = 2;
hist_graph[add_0 + HIST_HIST_0] = hist_graph[add_1 + HIST_HIST_0] = count_list[COUNT_HIST]++;
hist_graph[add_0 + HIST_HIST_1] = hist_graph[add_1 + HIST_HIST_1] = count_list[COUNT_HIST]++;
write_hist_child(hist_graph, hist_index, triangles, hist_graph[add_0 + HIST_HIST_0], t0);
write_hist_child(hist_graph, hist_index, triangles, hist_graph[add_0 + HIST_HIST_1], t1);
}
__device__ void write_hist(int* hist_graph, int* hist_index, const int* triangles, const int t0, const int t1, int* count_list) {
int address = hist_index[t0] * HIST_COLUMN;
hist_graph[address + HIST_CHILD] = 2;
hist_graph[address + HIST_HIST_0] = count_list[COUNT_HIST]++;
hist_graph[address + HIST_HIST_1] = count_list[COUNT_HIST]++;
write_hist_child(hist_graph, hist_index, triangles, hist_graph[address + HIST_HIST_0], t0);
write_hist_child(hist_graph, hist_index, triangles, hist_graph[address + HIST_HIST_1], t1);
}
__device__ void write_hist(int* hist_graph, int* hist_index, const int* triangles, const int t0, const int t1, const int t2, int* count_list) {
int address = hist_index[t0] * HIST_COLUMN;
hist_graph[address + HIST_CHILD] = 3;
hist_graph[address + HIST_HIST_0] = count_list[COUNT_HIST]++;
hist_graph[address + HIST_HIST_1] = count_list[COUNT_HIST]++;
hist_graph[address + HIST_HIST_2] = count_list[COUNT_HIST]++;
write_hist_child(hist_graph, hist_index, triangles, hist_graph[address + HIST_HIST_0], t0);
write_hist_child(hist_graph, hist_index, triangles, hist_graph[address + HIST_HIST_1], t1);
write_hist_child(hist_graph, hist_index, triangles, hist_graph[address + HIST_HIST_2], t2);
}
__device__ void flip(const int tri_0, const int tri_1, int* triangles, int* neighbors, int* hist_graph, int* hist_index, int* count_list,
const int* extrema_x, const int* extrema_y) {
if (tri_0 != -1 && tri_1 != -1) {
int tri0_nb, tri1_nb;
int t0[3], t1[3];
bool in_circle;
write_triangle(t0, &triangles[tri_0 * 3]);
write_triangle(t1, &triangles[tri_1 * 3]);
tri0_nb = find_neighbor(neighbors, tri_0, tri_1);
tri1_nb = find_neighbor(neighbors, tri_1, tri_0);
in_circle = in_circum_circle(extrema_x, extrema_y, t0[(tri0_nb + 2) % 3], t0[tri0_nb], t0[(tri0_nb + 1) % 3], t1[(tri1_nb + 2) % 3]);
if (in_circle) {
//printf("flip tri: %d, %d\n", tri_0, tri_1);
//printf("tri %d) %d, %d, %d\n", tri_0, triangles[tri_0 * 3], triangles[tri_0 * 3 + 1], triangles[tri_0 * 3 + 2]);
//printf("tri %d) %d, %d, %d\n", tri_1, triangles[tri_1 * 3], triangles[tri_1 * 3 + 1], triangles[tri_1 * 3 + 2]);
t0[(tri0_nb + 1) % 3] = t1[(tri1_nb + 2) % 3];
t1[(tri1_nb + 1) % 3] = t0[(tri0_nb + 2) % 3];
neighbors[tri_0 * 3 + tri0_nb] = neighbors[tri_1 * 3 + (tri1_nb + 1) % 3];
neighbors[tri_1 * 3 + tri1_nb] = neighbors[tri_0 * 3 + (tri0_nb + 1) % 3];
neighbors[tri_0 * 3 + (tri0_nb + 1) % 3] = tri_1;
neighbors[tri_1 * 3 + (tri1_nb + 1) % 3] = tri_0;
change_neighbor(neighbors, neighbors[tri_0 * 3 + tri0_nb], tri_1, tri_0);
change_neighbor(neighbors, neighbors[tri_1 * 3 + tri1_nb], tri_0, tri_1);
write_triangle(&triangles[tri_0 * 3],t0);
write_triangle(&triangles[tri_1 * 3],t1);
exchange_hist(hist_graph, hist_index, triangles, tri_0, tri_1, count_list);
//printf("After flip\n");
//printf("tri %d) %d, %d, %d\n", tri_0, triangles[tri_0 * 3], triangles[tri_0 * 3 + 1], triangles[tri_0 * 3 + 2]);
//printf("tri %d) %d, %d, %d\n", tri_1, triangles[tri_1 * 3], triangles[tri_1 * 3 + 1], triangles[tri_1 * 3 + 2]);
flip(tri_0, neighbors[tri_0 * 3 + tri0_nb], triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
flip(tri_1, neighbors[tri_1 * 3 + tri1_nb], triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
}
}
}
__device__ int insert_on_edge(const int point, const int tri_0, const int tri_1, const int tri_0_nb,const int mark, int* triangles, int* neighbors,
int* hist_graph, int* hist_index, int* count_list, const int* extrema_x, const int* extrema_y) {
int new_0, new_1;
int A, B, C, N_B;
A = triangles[tri_0 * 3 + tri_0_nb];
B = triangles[tri_0 * 3 + (tri_0_nb + 1) % 3];
C = triangles[tri_0 * 3 + (tri_0_nb + 2) % 3];
N_B = neighbors[tri_0 * 3 + (tri_0_nb + 1) % 3];
new_0 = count_list[COUNT_TRI]++;
if (tri_1 != -1) { new_1 = new_0 + mark; }
else { new_1 = tri_1; }
write_triangle(triangles, new_0, point, B, C);
write_triangle(neighbors, new_0, tri_1, N_B, tri_0);
change_neighbor(neighbors, N_B, tri_0, new_0);
triangles[tri_0 * 3 + (tri_0_nb + 1) % 3] = point;
neighbors[tri_0 * 3 + tri_0_nb] = new_1;
neighbors[tri_0 * 3 + (tri_0_nb + 1) % 3] = new_0;
write_hist(hist_graph, hist_index, triangles, tri_0, new_0, count_list);
return new_0;
}
__device__ void insert_point(const int point, const int* find_info, int* triangles, int* neighbors, int* hist_graph, int* hist_index, int* count_list,
const int* extrema_x, const int* extrema_y) {
int id;
int tri_0 = find_info[FIND_TRI];
int A, B, C, N_A, N_B, N_C, new_1, new_2;
if (find_info[FIND_STAUS] == INSIDE) {
A = triangles[tri_0 * 3];
B = triangles[tri_0 * 3 + 1];
C = triangles[tri_0 * 3 + 2];
N_A = neighbors[tri_0 * 3];
N_B = neighbors[tri_0 * 3 + 1];
N_C = neighbors[tri_0 * 3 + 2];
new_1 = count_list[COUNT_TRI]++;
new_2 = count_list[COUNT_TRI]++;
//printf("point: %d, insert in tri: %d\n", point, tri_0);
//printf("tri %d) %d, %d, %d\n", tri_0, triangles[tri_0 * 3], triangles[tri_0 * 3 + 1], triangles[tri_0 * 3 + 2]);
write_triangle(triangles, tri_0, A, B, point);
write_triangle(triangles, new_1, B, C, point);
write_triangle(triangles, new_2, C, A, point);
write_triangle(neighbors, tri_0, N_A, new_1, new_2);
write_triangle(neighbors, new_1, N_B, new_2, tri_0);
write_triangle(neighbors, new_2, N_C, tri_0, new_1);
//change_neighbor(neighbors, N_A, tri_0, tri_0);
change_neighbor(neighbors, N_B, tri_0, new_1);
change_neighbor(neighbors, N_C, tri_0, new_2);
/*
printf("After insert\n");
printf("tri %d) %d, %d, %d\n", tri_0, triangles[tri_0 * 3], triangles[tri_0 * 3 + 1], triangles[tri_0 * 3 + 2]);
printf("tri %d) %d, %d, %d\n", new_1, triangles[new_1 * 3], triangles[new_1 * 3 + 1], triangles[new_1 * 3 + 2]);
printf("tri %d) %d, %d, %d\n\n", new_2, triangles[new_2 * 3], triangles[new_2 * 3 + 1], triangles[new_2 * 3 + 2]);
*/
write_hist(hist_graph, hist_index, triangles, tri_0, new_1, new_2, count_list);
flip(tri_0, N_A, triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
flip(new_1, N_B, triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
flip(new_2, N_C, triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
}
else {
int tri_1, new_0, new_1;
int tri_0_nb, tri_1_nb;
tri_0_nb = on_which_edge(point, &triangles[tri_0 * 3], extrema_x, extrema_y);
tri_1 = neighbors[tri_0 * 3 + tri_0_nb];
new_0 = insert_on_edge(point, tri_0, tri_1, tri_0_nb, 1, triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
if(tri_1 != -1){
tri_1_nb = on_which_edge(point, &triangles[tri_1 * 3], extrema_x, extrema_y);
new_1 = insert_on_edge(point, tri_1, tri_0, tri_1_nb, -1, triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
flip(tri_1, neighbors[tri_1*3+(tri_1_nb +2)%3], triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
flip(new_1, neighbors[new_1 * 3 + 1], triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
}
flip(tri_0, neighbors[tri_0 * 3 + (tri_0_nb + 2) % 3], triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
flip(new_0, neighbors[new_0 * 3 + 1], triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
}
}
__global__ void initialize(float** map,
const int* extrema_x, const int* extrema_y, const float* extrema_value,
int* triangles, int* neighbors, int* history_index, int* history_graph, int* count_list,
const int width, const int height){
history_graph[0 * HIST_COLUMN + HIST_CHILD] = 2;
history_graph[0 * HIST_COLUMN + HIST_HIST_0] = 1;
history_graph[0 * HIST_COLUMN + HIST_HIST_1] = 2;
write_triangle(triangles, 0, 0, 1, 2);
write_triangle(neighbors, 0, -1, 1, -1);
history_graph[1 * HIST_COLUMN + HIST_PT_0] = 0;
history_graph[1 * HIST_COLUMN + HIST_PT_1] = 1;
history_graph[1 * HIST_COLUMN + HIST_PT_2] = 2;
history_graph[1 * HIST_COLUMN + HIST_CHILD] = 0;
history_graph[1 * HIST_COLUMN + HIST_HIST_0] = 0;
history_index[0] = 1;
write_triangle(triangles, 1, 1, 3, 2);
write_triangle(neighbors, 1, -1, -1, 0);
history_graph[2 * HIST_COLUMN + HIST_PT_0] = 1;
history_graph[2 * HIST_COLUMN + HIST_PT_1] = 3;
history_graph[2 * HIST_COLUMN + HIST_PT_2] = 2;
history_graph[2 * HIST_COLUMN + HIST_CHILD] = 0;
history_graph[2 * HIST_COLUMN + HIST_HIST_0] = 1;
history_index[1] = 2;
count_list[COUNT_TRI] = 2;
count_list[COUNT_HIST] = 3;
int i,j;
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
map[i][j] = 255;
}
}
}
__global__ void incremental_construction(int* points_count,const int mode, const int* extrema_x, const int* extrema_y,
int* triangles, int* neighbors, int* hist_index, int* hist_graph, int* count_list) {
int find_info[FIND_SIZE];
int i, k, pt_count;
pt_count = points_count[mode];
/*
printf("Points:\n");
for (i = 0; i < pt_count; i++) {
printf("%d, %d\n", extrema_x[i], extrema_y[i]);
}
printf("\n");
*/
//printf("%d, %d, %d\n", points_count[mode], count_list[COUNT_TRI], count_list[COUNT_HIST]);
for (i = 4; i <pt_count; i++) {
find_triangle(i, hist_graph, 0, extrema_x, extrema_y, find_info);
//printf("point: %d, in tri: %d, status: %d\n", i, find_info[FIND_TRI], find_info[FIND_STAUS]);
k = find_info[FIND_TRI];
insert_point(i, find_info, triangles, neighbors, hist_graph, hist_index, count_list, extrema_x, extrema_y);
if (count_list[COUNT_TRI] > TRIANGLE_SIZE) {
printf("COUNT_TRI out of TRIANGLE_SIZE\n");
asm("trap;");
}
if (count_list[COUNT_HIST] > HIST_SIZE) {
printf("COUNT_HIST out of HIST_SIZE\n");
asm("trap;");
}
}
/*
printf("\nTriangles:\n");
for (i = 0; i < count_list[COUNT_TRI]; i++) {
printf("%d) %d, %d, %d\n", i, triangles[i * 3], triangles[i * 3 + 1], triangles[i * 3 + 2]);
}
printf("\nNeighbors:\n");
for (i = 0; i < count_list[COUNT_TRI]; i++) {
printf("%d) %d, %d, %d\n", i, neighbors[i * 3], neighbors[i * 3 + 1], neighbors[i * 3 + 2]);
}
printf("\nHistory Graph:\n");
for (i = 0; i < count_list[COUNT_HIST]; i++) {
int address = i * HIST_COLUMN;
printf("%d) %d, %d, %d |%d| %d, %d, %d\n", i, hist_graph[address + HIST_PT_0], hist_graph[address + HIST_PT_1], hist_graph[address + HIST_PT_2],
hist_graph[address + HIST_CHILD], hist_graph[address + HIST_HIST_0], hist_graph[address + HIST_HIST_1], hist_graph[address + HIST_HIST_2]);
}
*/
}
void write_csv(const char* file_path, int* extrema_x, int* extrema_y, int* triangles, int* count_list, int* points_count, int mode) {
int *host_x, *host_y, *host_tri, *host_tri_count, *host_pt_count;
host_x = (int*)malloc(EXTREMA_SIZE * sizeof(int));
host_y = (int*)malloc(EXTREMA_SIZE * sizeof(int));
host_tri = (int*)malloc(TRIANGLE_SIZE * 3 * sizeof(int));
host_tri_count = (int*)malloc(COUNT_SIZE * sizeof(int));
host_pt_count = (int*)malloc(2 * sizeof(int));
cudaMemcpy(host_x, extrema_x, EXTREMA_SIZE * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(host_y, extrema_y, EXTREMA_SIZE * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(host_tri, triangles, TRIANGLE_SIZE * 3 * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(host_tri_count, count_list, COUNT_SIZE * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(host_pt_count, points_count, 2 * sizeof(int), cudaMemcpyDeviceToHost);
FILE* fp;
int i;
fp = fopen(file_path, "w");
fprintf(fp, "%d\n", host_pt_count[mode]);
for (i = 0; i < host_pt_count[mode]; i++) {
fprintf(fp, "%d, %d\n", host_x[i], host_y[i]);
}
fprintf(fp, "%d\n", host_tri_count[COUNT_TRI]);
for (i = 0; i < host_tri_count[COUNT_TRI]; i++) {
fprintf(fp, "%d, %d, %d\n", host_tri[i * 3], host_tri[i * 3 + 1], host_tri[i * 3 + 2]);
}
fclose(fp);
free(host_x);
free(host_y);
free(host_tri);
free(host_tri_count);
}
void Delaunay_triangle(float** map, int* points_count, int mode,
int* extrema_x, int* extrema_y, float* extrema_value,
const int width, const int height){
int *triangles, *neighbors, *hist_index ,*hist_graph, *count_list;
cudaMalloc(&triangles, TRIANGLE_SIZE * 3 * sizeof(int));
cudaMalloc(&neighbors, TRIANGLE_SIZE * 3 * sizeof(int));
cudaMalloc(&hist_index, TRIANGLE_SIZE * sizeof(int));
cudaMalloc(&hist_graph, HIST_SIZE * HIST_COLUMN * sizeof(int));
cudaMalloc(&count_list, COUNT_SIZE * sizeof(int));
initialize<<<1,1>>>(map, extrema_x, extrema_y, extrema_value, triangles, neighbors, hist_index, hist_graph, count_list, width, height);
incremental_construction<<<1,1>>>(points_count, mode, extrema_x, extrema_y, triangles, neighbors, hist_index, hist_graph, count_list);
cudaFree(neighbors);
cudaFree(hist_index);
cudaFree(hist_graph);
draw_triangles <<<1,1>>> (map, extrema_x, extrema_y, extrema_value, triangles, count_list);
check("error in drawing triangles");
//write_csv("figures/result.csv", extrema_x, extrema_y, triangles,count_list, points_count, mode);
cudaFree(count_list);
cudaFree(triangles);
} |
9f4670f134f91c76ecb715293dbba0a3a77ad8fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__
void crossEntropyCost(float *desiredOutput, unsigned int length, float *networkOutput, float* result)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < length;
i += blockDim.x * gridDim.x)
{
result[i]=desiredOutput[i]*logf(0.00001f+networkOutput[i])+(1.0f-desiredOutput[i])*logf(1.00001f-networkOutput[i]);
}
}
| 9f4670f134f91c76ecb715293dbba0a3a77ad8fe.cu | extern "C"
__global__
void crossEntropyCost(float *desiredOutput, unsigned int length, float *networkOutput, float* result)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < length;
i += blockDim.x * gridDim.x)
{
result[i]=desiredOutput[i]*logf(0.00001f+networkOutput[i])+(1.0f-desiredOutput[i])*logf(1.00001f-networkOutput[i]);
}
}
|
36cf7873ae988a84c0777e68cf9a5885c70f876f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/attributes.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/transform.h>
#include <thrust/transform_scan.h>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Returns a numeric column containing lengths of each string in
* based on the provided unary function.
*
* Any null string will result in a null entry for that row in the output column.
*
* @tparam UnaryFunction Device function that returns an integer given a string_view.
* @param strings Strings instance for this operation.
* @param ufn Function returns an integer for each string.
* @param stream Stream to use for any kernels in this function.
* @param mr Resource for allocating device memory.
* @return New INT32 column with lengths for each string.
*/
template <typename UnaryFunction>
std::unique_ptr<column> counts_fn(strings_column_view const& strings,
UnaryFunction& ufn,
rmm::mr::device_memory_resource* mr,
hipStream_t stream = 0)
{
auto strings_count = strings.size();
auto execpol = rmm::exec_policy(stream);
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create output column
auto results = std::make_unique<cudf::column>(
cudf::data_type{INT32},
strings_count,
rmm::device_buffer(strings_count * sizeof(int32_t), stream, mr),
copy_bitmask(strings.parent(), stream, mr), // copy the null mask
strings.null_count());
auto results_view = results->mutable_view();
auto d_lengths = results_view.data<int32_t>();
// fill in the lengths
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count),
d_lengths,
[d_strings, ufn] __device__(size_type idx) {
int32_t length = 0;
if (!d_strings.is_null(idx))
length = static_cast<int32_t>(ufn(d_strings.element<string_view>(idx)));
return length;
});
results->set_null_count(strings.null_count()); // reset null count
return results;
}
} // namespace
std::unique_ptr<column> count_characters(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
auto ufn = [] __device__(const string_view& d_str) { return d_str.length(); };
return counts_fn(strings, ufn, mr, stream);
}
std::unique_ptr<column> count_bytes(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
auto ufn = [] __device__(const string_view& d_str) { return d_str.size_bytes(); };
return counts_fn(strings, ufn, mr, stream);
}
} // namespace detail
namespace {
/**
* @brief Sets the code-point values for each character in the output
* integer memory for each string in the strings column.
*
* For each string, there is a sub-array in d_results with length equal
* to the number of characters in that string. The function here will
* write code-point values to that section as pointed to by the
* corresponding d_offsets value calculated for that string.
*/
struct code_points_fn {
column_device_view d_strings;
size_type* d_offsets; // offset within d_results to fill with each string's code-point values
int32_t* d_results; // base integer array output
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) return;
auto d_str = d_strings.element<string_view>(idx);
auto result = d_results + d_offsets[idx];
thrust::copy(thrust::seq, d_str.begin(), d_str.end(), result);
}
};
} // namespace
namespace detail {
//
std::unique_ptr<column> code_points(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
// create offsets vector to account for each string's character length
rmm::device_vector<size_type> offsets(strings.size() + 1);
size_type* d_offsets = offsets.data().get();
thrust::transform_inclusive_scan(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings.size()),
d_offsets + 1,
[d_column] __device__(size_type idx) {
size_type length = 0;
if (!d_column.is_null(idx)) length = d_column.element<string_view>(idx).length();
return length;
},
thrust::plus<size_type>());
CUDA_TRY(hipMemsetAsync(d_offsets, 0, sizeof(size_type), stream));
// the total size is the number of characters in the entire column
size_type num_characters = offsets.back();
// create output column with no nulls
auto results =
make_numeric_column(data_type{INT32}, num_characters, mask_state::UNALLOCATED, stream, mr);
auto results_view = results->mutable_view();
// fill column with character code-point values
auto d_results = results_view.data<int32_t>();
// now set the ranges from each strings' character values
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings.size(),
code_points_fn{d_column, d_offsets, d_results});
//
results->set_null_count(0);
return results;
}
} // namespace detail
// external APIS
std::unique_ptr<column> count_characters(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::count_characters(strings, mr);
}
std::unique_ptr<column> count_bytes(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::count_bytes(strings, mr);
}
std::unique_ptr<column> code_points(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::code_points(strings, mr);
}
} // namespace strings
} // namespace cudf
| 36cf7873ae988a84c0777e68cf9a5885c70f876f.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/attributes.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/transform.h>
#include <thrust/transform_scan.h>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Returns a numeric column containing lengths of each string in
* based on the provided unary function.
*
* Any null string will result in a null entry for that row in the output column.
*
* @tparam UnaryFunction Device function that returns an integer given a string_view.
* @param strings Strings instance for this operation.
* @param ufn Function returns an integer for each string.
* @param stream Stream to use for any kernels in this function.
* @param mr Resource for allocating device memory.
* @return New INT32 column with lengths for each string.
*/
template <typename UnaryFunction>
std::unique_ptr<column> counts_fn(strings_column_view const& strings,
UnaryFunction& ufn,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream = 0)
{
auto strings_count = strings.size();
auto execpol = rmm::exec_policy(stream);
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create output column
auto results = std::make_unique<cudf::column>(
cudf::data_type{INT32},
strings_count,
rmm::device_buffer(strings_count * sizeof(int32_t), stream, mr),
copy_bitmask(strings.parent(), stream, mr), // copy the null mask
strings.null_count());
auto results_view = results->mutable_view();
auto d_lengths = results_view.data<int32_t>();
// fill in the lengths
thrust::transform(execpol->on(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count),
d_lengths,
[d_strings, ufn] __device__(size_type idx) {
int32_t length = 0;
if (!d_strings.is_null(idx))
length = static_cast<int32_t>(ufn(d_strings.element<string_view>(idx)));
return length;
});
results->set_null_count(strings.null_count()); // reset null count
return results;
}
} // namespace
std::unique_ptr<column> count_characters(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
auto ufn = [] __device__(const string_view& d_str) { return d_str.length(); };
return counts_fn(strings, ufn, mr, stream);
}
std::unique_ptr<column> count_bytes(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
auto ufn = [] __device__(const string_view& d_str) { return d_str.size_bytes(); };
return counts_fn(strings, ufn, mr, stream);
}
} // namespace detail
namespace {
/**
* @brief Sets the code-point values for each character in the output
* integer memory for each string in the strings column.
*
* For each string, there is a sub-array in d_results with length equal
* to the number of characters in that string. The function here will
* write code-point values to that section as pointed to by the
* corresponding d_offsets value calculated for that string.
*/
struct code_points_fn {
column_device_view d_strings;
size_type* d_offsets; // offset within d_results to fill with each string's code-point values
int32_t* d_results; // base integer array output
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) return;
auto d_str = d_strings.element<string_view>(idx);
auto result = d_results + d_offsets[idx];
thrust::copy(thrust::seq, d_str.begin(), d_str.end(), result);
}
};
} // namespace
namespace detail {
//
std::unique_ptr<column> code_points(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
// create offsets vector to account for each string's character length
rmm::device_vector<size_type> offsets(strings.size() + 1);
size_type* d_offsets = offsets.data().get();
thrust::transform_inclusive_scan(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings.size()),
d_offsets + 1,
[d_column] __device__(size_type idx) {
size_type length = 0;
if (!d_column.is_null(idx)) length = d_column.element<string_view>(idx).length();
return length;
},
thrust::plus<size_type>());
CUDA_TRY(cudaMemsetAsync(d_offsets, 0, sizeof(size_type), stream));
// the total size is the number of characters in the entire column
size_type num_characters = offsets.back();
// create output column with no nulls
auto results =
make_numeric_column(data_type{INT32}, num_characters, mask_state::UNALLOCATED, stream, mr);
auto results_view = results->mutable_view();
// fill column with character code-point values
auto d_results = results_view.data<int32_t>();
// now set the ranges from each strings' character values
thrust::for_each_n(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings.size(),
code_points_fn{d_column, d_offsets, d_results});
//
results->set_null_count(0);
return results;
}
} // namespace detail
// external APIS
std::unique_ptr<column> count_characters(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::count_characters(strings, mr);
}
std::unique_ptr<column> count_bytes(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::count_bytes(strings, mr);
}
std::unique_ptr<column> code_points(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::code_points(strings, mr);
}
} // namespace strings
} // namespace cudf
|
413c322cf8417e4689632c97524b4e9e53a7228e.hip | // !!! This is a file automatically generated by hipify!!!
// incrementArray.cu
// Utilities and system includes
#include <assert.h>
#include <helper_string.h> // helper for shared functions common to CUDA Samples
// CUDA runtime
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <rocblas.h>
// CUDA and CUBLAS functions
#include <helper_functions.h>
#include <helper_cuda.h>
//-----------------------------------------------------------------------------------
void NN_OnHost(float *activity, float *weights, int N)
{
int i, j;
float new_activity[N];
for (i = 0; i<N; i++) {
new_activity[i] = 0;
for (j = 0; j<N; j++) {
new_activity[i] += activity[j] * weights[(j*N) + i];
}
}
for (i = 0; i < N; i++) {
activity[i] = 1 / (1 + exp(-new_activity[i]));
}
}
//-----------------------------------------------------------------------------------
__global__ void NN_OnDevice(float *activity, float *weights, float *new_activity, int N)
{
int j, idx = threadIdx.x;
new_activity[idx] = 0;
for (j = 0; j<N; j++) {
new_activity[idx] += activity[j] * weights[(j*N) + idx];
}
__syncthreads();
activity[idx] = 1 / (1 + exp(-new_activity[idx]));
}
//-----------------------------------------------------------------------------------
int main(void)
{
hipSetDevice(0);
float *activity_h, *weights_h, *new_activity_h; // pointers to host memory
float *activity_d, *weights_d, *new_activity_d; // pointer to device memory
int i, j, N = 100;
size_t size = N * sizeof(float);
//timer stuff
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// allocate arrays on host
activity_h = (float *)malloc(size);
new_activity_h = (float *)malloc(size);
weights_h = (float *)malloc(size*size);
// allocate array on device
hipMalloc((void **)&activity_d, size);
hipMalloc((void **)&new_activity_d, size);
hipMalloc((void **)&weights_d, size*size);
// initialization of host data
for (i = 0; i<N; i++) {
activity_h[i] = (float(rand() % 100) / 100);
for (j = 0; j<N; j++) {
weights_h[(j*N) + i] = (float(rand() % 200) / 100) - 1;
//printf("%f ",weights_h[(j*N)+i]);
}
//printf("%f ",activity_h[i]);
}
//printf("\n");
// copy data from host to device
hipMemcpy(activity_d, activity_h, sizeof(float)*N, hipMemcpyHostToDevice);
hipMemcpy(weights_d, weights_h, sizeof(float)*N*N, hipMemcpyHostToDevice);
// do calculation on host
NN_OnHost(activity_h, weights_h, N);
for (i = 0; i<10; i++) printf("%f ", activity_h[i]);
printf("\n");
//start timer
hipEventRecord(start, 0);
// do calculation on device:
hipLaunchKernelGGL(( NN_OnDevice) , dim3(1), dim3(N) , 0, 0, activity_d, weights_d, new_activity_d, N);
// block until the device has completed
hipDeviceSynchronize();
//end timer
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
// Retrieve result from device and store in b_h
hipMemcpy(new_activity_h, activity_d, sizeof(float)*N, hipMemcpyDeviceToHost);
for (i = 0; i<10; i++) printf("%f ", new_activity_h[i]);
printf("\n");
printf("time = %f\n\n", elapsedTime);
// cleanup
free(activity_h); free(weights_h); free(new_activity_h);
hipFree(activity_d); hipFree(weights_d); hipFree(new_activity_d);
hipEventDestroy(start);
hipEventDestroy(stop);
} | 413c322cf8417e4689632c97524b4e9e53a7228e.cu | // incrementArray.cu
// Utilities and system includes
#include <assert.h>
#include <helper_string.h> // helper for shared functions common to CUDA Samples
// CUDA runtime
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <cublas_v2.h>
// CUDA and CUBLAS functions
#include <helper_functions.h>
#include <helper_cuda.h>
//-----------------------------------------------------------------------------------
void NN_OnHost(float *activity, float *weights, int N)
{
int i, j;
float new_activity[N];
for (i = 0; i<N; i++) {
new_activity[i] = 0;
for (j = 0; j<N; j++) {
new_activity[i] += activity[j] * weights[(j*N) + i];
}
}
for (i = 0; i < N; i++) {
activity[i] = 1 / (1 + exp(-new_activity[i]));
}
}
//-----------------------------------------------------------------------------------
__global__ void NN_OnDevice(float *activity, float *weights, float *new_activity, int N)
{
int j, idx = threadIdx.x;
new_activity[idx] = 0;
for (j = 0; j<N; j++) {
new_activity[idx] += activity[j] * weights[(j*N) + idx];
}
__syncthreads();
activity[idx] = 1 / (1 + exp(-new_activity[idx]));
}
//-----------------------------------------------------------------------------------
int main(void)
{
cudaSetDevice(0);
float *activity_h, *weights_h, *new_activity_h; // pointers to host memory
float *activity_d, *weights_d, *new_activity_d; // pointer to device memory
int i, j, N = 100;
size_t size = N * sizeof(float);
//timer stuff
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocate arrays on host
activity_h = (float *)malloc(size);
new_activity_h = (float *)malloc(size);
weights_h = (float *)malloc(size*size);
// allocate array on device
cudaMalloc((void **)&activity_d, size);
cudaMalloc((void **)&new_activity_d, size);
cudaMalloc((void **)&weights_d, size*size);
// initialization of host data
for (i = 0; i<N; i++) {
activity_h[i] = (float(rand() % 100) / 100);
for (j = 0; j<N; j++) {
weights_h[(j*N) + i] = (float(rand() % 200) / 100) - 1;
//printf("%f ",weights_h[(j*N)+i]);
}
//printf("%f ",activity_h[i]);
}
//printf("\n");
// copy data from host to device
cudaMemcpy(activity_d, activity_h, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(weights_d, weights_h, sizeof(float)*N*N, cudaMemcpyHostToDevice);
// do calculation on host
NN_OnHost(activity_h, weights_h, N);
for (i = 0; i<10; i++) printf("%f ", activity_h[i]);
printf("\n");
//start timer
cudaEventRecord(start, 0);
// do calculation on device:
NN_OnDevice <<< 1, N >>> (activity_d, weights_d, new_activity_d, N);
// block until the device has completed
cudaThreadSynchronize();
//end timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
// Retrieve result from device and store in b_h
cudaMemcpy(new_activity_h, activity_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
for (i = 0; i<10; i++) printf("%f ", new_activity_h[i]);
printf("\n");
printf("time = %f\n\n", elapsedTime);
// cleanup
free(activity_h); free(weights_h); free(new_activity_h);
cudaFree(activity_d); cudaFree(weights_d); cudaFree(new_activity_d);
cudaEventDestroy(start);
cudaEventDestroy(stop);
} |
eb59c8caf029d8d040fa1834dc092accb00c373a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include <hipfft.h>
///// GPU kernels /////
__global__ void ConjMult(hipfftComplex *d_potential_F, hipfftComplex *d_ligand_F, int odist, int numOfGridsUsed)
{
int dist = odist * numOfGridsUsed;
int idx_l = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
int idx_p = idx_l % dist;
float x = d_ligand_F[idx_l].x;
float y = d_ligand_F[idx_l].y;
d_ligand_F[idx_l].x = x * d_potential_F[idx_p].x + y * d_potential_F[idx_p].y;
d_ligand_F[idx_l].y = x * d_potential_F[idx_p].y - y * d_potential_F[idx_p].x;
};
__global__ void SumGrids(hipfftComplex *d_ligand_F, hipfftComplex *d_ligand_sum_F, int numOfGridsUsed, int odist, int idist)
{
int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
int idxQuaternion = idx / odist;
int idxValue = idx % odist;
d_ligand_sum_F[idx].x = 0;
d_ligand_sum_F[idx].y = 0;
for(int i = 0; i < numOfGridsUsed; i++)
{
d_ligand_sum_F[idx].x += d_ligand_F[ (idxQuaternion*numOfGridsUsed + i) * odist + idxValue].x;
d_ligand_sum_F[idx].y += d_ligand_F[ (idxQuaternion*numOfGridsUsed + i) * odist + idxValue].y;
}
d_ligand_sum_F[idx].x = d_ligand_sum_F[idx].x / sqrt((float) idist);
d_ligand_sum_F[idx].y = d_ligand_sum_F[idx].y / sqrt((float) idist);
};
| eb59c8caf029d8d040fa1834dc092accb00c373a.cu | #include "kernel.h"
#include <cufft.h>
///// GPU kernels /////
__global__ void ConjMult(cufftComplex *d_potential_F, cufftComplex *d_ligand_F, int odist, int numOfGridsUsed)
{
int dist = odist * numOfGridsUsed;
int idx_l = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
int idx_p = idx_l % dist;
float x = d_ligand_F[idx_l].x;
float y = d_ligand_F[idx_l].y;
d_ligand_F[idx_l].x = x * d_potential_F[idx_p].x + y * d_potential_F[idx_p].y;
d_ligand_F[idx_l].y = x * d_potential_F[idx_p].y - y * d_potential_F[idx_p].x;
};
__global__ void SumGrids(cufftComplex *d_ligand_F, cufftComplex *d_ligand_sum_F, int numOfGridsUsed, int odist, int idist)
{
int idx = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
int idxQuaternion = idx / odist;
int idxValue = idx % odist;
d_ligand_sum_F[idx].x = 0;
d_ligand_sum_F[idx].y = 0;
for(int i = 0; i < numOfGridsUsed; i++)
{
d_ligand_sum_F[idx].x += d_ligand_F[ (idxQuaternion*numOfGridsUsed + i) * odist + idxValue].x;
d_ligand_sum_F[idx].y += d_ligand_F[ (idxQuaternion*numOfGridsUsed + i) * odist + idxValue].y;
}
d_ligand_sum_F[idx].x = d_ligand_sum_F[idx].x / sqrt((float) idist);
d_ligand_sum_F[idx].y = d_ligand_sum_F[idx].y / sqrt((float) idist);
};
|
e1bf5885985b77b355194d5293b1724489d42891.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
//************variables globales***************
int msk=3, dimx=1040, dimy=1388, tam_imag=1388*1040;
// [i][j] = i*dimy+j
//************** Kernel CUDA *********************
__global__ void Varianza (int *G_d, float *var_d){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int idy = threadIdx.y + blockIdx.y*blockDim.y;
int id = idx + idy*blockDim.x*gridDim.x;
int M_d[9], i, dimx=1040, dimy=1388, tam_imag=1388*1040, msk=3;
float X=0.f,Xprom=0.f,Y=0.f;
var_d[id]=0;
//printf("prueba\n");
if(id<tam_imag){
//M_d[0]=((i<1 || j<1) ? 0:A[i-1][j-1]);
/*
M_d[0]=((idx<1 || idy<1) ? 0:G_d[(idx-1)+(idy-1)*blockDim.x*gridDim.x]);
M_d[1]=((idx<1) ? 0:G_d[(idx-1)+(idy)*blockDim.x*gridDim.x]);
M_d[2]=((idx<1 || idy>dimy-2) ? 0:G_d[(idx-1)+(idy+1)*blockDim.x*gridDim.x]);
M_d[3]=((idy<1) ? 0:G_d[(idx)+(idy-1)*blockDim.x*gridDim.x]);
M_d[4]=G_d[(idx)+(idy)*blockDim.x*gridDim.x];
M_d[5]=((idy>dimy-2) ? 0:G_d[(idx)+(idy+1)*blockDim.x*gridDim.x]);
M_d[6]=((idx>dimx-2 || idy<1) ? 0:G_d[(idx+1)+(idy-1)*blockDim.x*gridDim.x]);
M_d[7]=((idx>dimx-2) ? 0:G_d[(idx+1)+(idy)*blockDim.x*gridDim.x]);
M_d[8]=((idx>dimx-2 || idy>dimy-1) ? 0:G_d[(idx+1)+(idy+1)*blockDim.x*gridDim.x]);
*/
if (idx==0 || idy==0){
M_d[0]=0;
}else{
M_d[0]=G_d[id-1-dimy];
}
/*
if ((idx==0)){
M_d[1]=0;
}else{
M_d[1]=G_d[id-dimy];
//M_d[1]=8;
}
/*
if (idx==0 || idy==dimy){
M_d[2]=0;
}else{
M_d[2]=G_d[id+1-dimy];
}
*/
if (idy==0){
M_d[3]=0;
}else{
M_d[3]=G_d[id-1];
}
M_d[4]=G_d[id];
if (idy==dimy){
M_d[5]=0;
}else{
M_d[5]=G_d[id+1];
}
/*
if (id==dimx || idy==0){
M_d[6]=0;
}else{
M_d[6]=G_d[id-1+dimy];
}
*//*
if (idx==dimx){
M_d[7]=0;
}else{
M_d[7]=G_d[id+dimy];
}
*//*
if (idx==dimx || idy==dimy){
M_d[8]=0;
}else{
M_d[8]=G_d[id+1+dimy];
}
*/
//M_d[0]=1;
M_d[1]=5;
M_d[2]=8;
//M_d[3]=1;
//M_d[4]=1;
//M_d[5]=1;
M_d[6]=2;
M_d[7]=5;
M_d[8]=4;
for(i=0;i<msk*msk;i++)
X+=M_d[i];
Xprom=((float)X)/(msk*msk);
for(i=0;i<msk*msk;i++)
Y+=(Xprom-M_d[i])*(Xprom-M_d[i]);
var_d[id]=Y/(msk*msk);
}
}
//*****************Funcion main**********************
int main(int argc,char* argv[]){
//***************Declaracion de variables**************
int i,j,init,fin,d;
init=atoi(argv[1]);
fin=atoi(argv[2]);
//init=1;
//fin=328;
FILE *matrizR, *matrizG, *matrizB;
float t;
clock_t tinicio, t_GPU;
tinicio=clock();
int *topof_h, *R_h, *G_h, *B_h, *Rf_h, *Gf_h, *Bf_h;
float *max_h, *var_h;
int *topof_d, *R_d, *G_d, *B_d, *Rf_d, *Gf_d, *Bf_d;
float *max_d, *var_d;
//************Inicializacion de variables en el host y en el device ***************
/* // Declaracion tipo MATRIZ
max_h=(float **)malloc(sizeof(float)*dimx);
topof_h=(int **)malloc(sizeof(int)*dimx);
R_h=(int **)malloc(sizeof(int)*dimx);
G_h=(int **)malloc(sizeof(int)*dimx);
B_h=(int **)malloc(sizeof(int)*dimx);
Rf_h=(int **)malloc(sizeof(int)*dimx);
Gf_h=(int **)malloc(sizeof(int)*dimx);
Bf_h=(int **)malloc(sizeof(int)*dimx);
for(i=0;i<dimx;i++){
max_h[i]=(float*)malloc(sizeof(float)*dimy);
topof_h[i]=(int*)malloc(sizeof(int)*dimy);
R_h[i]=(int*)malloc(sizeof(int)*dimy);
G_h[i]=(int*)malloc(sizeof(int)*dimy);
B_h[i]=(int*)malloc(sizeof(int)*dimy);
Rf_h[i]=(int*)malloc(sizeof(int)*dimy);
Gf_h[i]=(int*)malloc(sizeof(int)*dimy);
Bf_h[i]=(int*)malloc(sizeof(int)*dimy);
}
var_h=(float *)malloc(sizeof(float)*tam_imag);
*/
R_h=(int *)malloc(sizeof(int)*tam_imag);
hipMalloc((void**)&R_d, tam_imag*sizeof(int));
G_h=(int *)malloc(sizeof(int)*tam_imag);
hipMalloc((void**)&G_d, tam_imag*sizeof(int));
B_h=(int *)malloc(sizeof(int)*tam_imag);
hipMalloc((void**)&B_d, tam_imag*sizeof(int));
Rf_h=(int *)malloc(sizeof(int)*tam_imag);
hipMalloc((void**)&Rf_d, tam_imag*sizeof(int));
Gf_h=(int *)malloc(sizeof(int)*tam_imag);
hipMalloc((void**)&Gf_d, tam_imag*sizeof(int));
Bf_h=(int *)malloc(sizeof(int)*tam_imag);
hipMalloc((void**)&Bf_d, tam_imag*sizeof(int));
topof_h=(int *)malloc(sizeof(int)*tam_imag);
hipMalloc((void**)&topof_d, tam_imag*sizeof(int));
max_h=(float *)malloc(sizeof(float)*tam_imag);
hipMalloc((void**)&max_d, tam_imag*sizeof(float));
var_h=(float *)malloc(sizeof(float)*tam_imag);
hipMalloc((void**)&var_d,tam_imag*sizeof(float));
//*************** For clculo EDF ****************
for(d=init;d<=fin;d++){
printf("d=%d \n", d);
//*****************Lecura de matrices RGB en el host****************
char rutaR[]="";
sprintf(rutaR, "%s%d%s","RGB/",d,"/R");
matrizR=fopen(rutaR,"r+");
char rutaG[]="";
sprintf(rutaG, "%s%d%s","RGB/",d,"/G");
matrizG=fopen(rutaG,"r+");
char rutaB[]="";
sprintf(rutaB, "%s%d%s","RGB/",d,"/B");
matrizB=fopen(rutaB,"r+");
for(i=0;i<dimx;i++){
for(j=0;j<dimy;j++){
fscanf(matrizR, "%d", &R_h[i*dimy+j]);
fscanf(matrizG, "%d", &G_h[i*dimy+j]);
fscanf(matrizB, "%d", &B_h[i*dimy+j]);
}
}
fclose(matrizR);
fclose(matrizG);
fclose(matrizB);
//***************** Kernel Varianza *******************
hipMemcpy(G_d,G_h,sizeof(int)*tam_imag,hipMemcpyHostToDevice);
dim3 Grid(347,20);
dim3 Block(13,16);
hipLaunchKernelGGL(( Varianza), dim3(Grid),dim3(Block), 0, 0, B_d,var_d);
printf("Despues de kernel \n");
hipMemcpy(var_h,var_d,sizeof(float)*tam_imag,hipMemcpyDeviceToHost);
printf("Despues de resultado a host \n");
//***************** Kernel Varianza *******************
/*
hipMemcpy(R_d,R_h,sizeof(int)*tam_imag,hipMemcpyHostToDevice);
hipMemcpy(G_d,G_h,sizeof(int)*tam_imag,hipMemcpyHostToDevice);
hipMemcpy(B_d,B_h,sizeof(int)*tam_imag,hipMemcpyHostToDevice);
hipMemcpy(Rf_d,Rf_h,sizeof(int)*tam_imag,hipMemcpyHostToDevice);
hipMemcpy(Gf_d,Gf_h,sizeof(int)*tam_imag,hipMemcpyHostToDevice);
hipMemcpy(Bf_d,Bf_h,sizeof(int)*tam_imag,hipMemcpyHostToDevice);
hipMemcpy(topof_d,topof_h,sizeof(int)*tam_imag,hipMemcpyHostToDevice);
hipMemcpy(max_d,max_h,sizeof(float)*tam_imag,hipMemcpyHostToDevice);
hipMemcpy(var_d,var_h,sizeof(float)*tam_imag,hipMemcpyHostToDevice);
dim3 Grid(347,20);
dim3 Block(13,16);
TopoRGB<<<Grid,Block>>>(R_d,G_d,B_d,Rf_d,Gf_d,Bf_d,topof_d,max_d,var_d);
hipMemcpy(Rf_h,Rf_d,sizeof(int)*tam_imag,hipMemcpyDeviceToHost);
hipMemcpy(Gf_h,Gf_d,sizeof(int)*tam_imag,hipMemcpyDeviceToHost);
hipMemcpy(Bf_h,Bf_d,sizeof(int)*tam_imag,hipMemcpyDeviceToHost);
hipMemcpy(topof_h,topof_d,sizeof(int)*tam_imag,hipMemcpyDeviceToHost);
hipMemcpy(max_h,max_d,sizeof(float)*tam_imag,hipMemcpyDeviceToHost);
*/
//*********************Calculo de TODO ********************
} //Finaliza For clculo EDF
printf("***Sale del for \n");
/*
// ***************** Generacion de archivos de resultados ************************
FILE *archTopo, *archR, *archG, *archB;
archTopo=fopen("Resultados/topos10","w+");
archR=fopen("Resultados/R10","w+");
archG=fopen("Resultados/G10","w+");
archB=fopen("Resultados/B10","w+");
for(i=0;i<dimx;i++){
for(j=0;j<dimy;j++){
fprintf(archTopo,"%d ",topof_h[i*dimy+j]);
fprintf(archR,"%d ",Rf_h[i*dimy+j]);
fprintf(archG,"%d ",Gf_h[i*dimy+j]);
fprintf(archB,"%d ",Bf_h[i*dimy+j]);
}
fprintf(archTopo,"\n");
fprintf(archR,"\n");
fprintf(archG,"\n");
fprintf(archB,"\n");
}
fclose(archTopo);
fclose(archR);
fclose(archG);
fclose(archB);
*/
//***************** Archivo de varianza final
FILE *archVar;
archVar=fopen("Resultados/VarUltima","w+");
for(i=0;i<dimx;i++){
for(j=0;j<dimy;j++){
fprintf(archVar,"%f ",var_h[i*dimy+j]);
}
fprintf(archVar,"\n");
}
fclose(archVar);
free(var_h);
free(max_h);
free(topof_h);
free(R_h);
free(G_h);
free(B_h);
free(Rf_h);
free(Gf_h);
free(Bf_h);
hipFree(var_d);
hipFree(max_d);
hipFree(topof_d);
hipFree(R_d);
hipFree(G_d);
hipFree(B_d);
hipFree(Rf_d);
hipFree(Gf_d);
hipFree(Bf_d);
t_GPU=clock();
t = ((float)t_GPU-(float)tinicio)/CLOCKS_PER_SEC;
printf("\ntiempo de procesamiento de varianzas: %6.3fs\n",t);
//getchar ();
return 0;
}//FIN funcion main()
| e1bf5885985b77b355194d5293b1724489d42891.cu |
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
//************variables globales***************
int msk=3, dimx=1040, dimy=1388, tam_imag=1388*1040;
// [i][j] = i*dimy+j
//************** Kernel CUDA *********************
__global__ void Varianza (int *G_d, float *var_d){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int idy = threadIdx.y + blockIdx.y*blockDim.y;
int id = idx + idy*blockDim.x*gridDim.x;
int M_d[9], i, dimx=1040, dimy=1388, tam_imag=1388*1040, msk=3;
float X=0.f,Xprom=0.f,Y=0.f;
var_d[id]=0;
//printf("prueba\n");
if(id<tam_imag){
//M_d[0]=((i<1 || j<1) ? 0:A[i-1][j-1]);
/*
M_d[0]=((idx<1 || idy<1) ? 0:G_d[(idx-1)+(idy-1)*blockDim.x*gridDim.x]);
M_d[1]=((idx<1) ? 0:G_d[(idx-1)+(idy)*blockDim.x*gridDim.x]);
M_d[2]=((idx<1 || idy>dimy-2) ? 0:G_d[(idx-1)+(idy+1)*blockDim.x*gridDim.x]);
M_d[3]=((idy<1) ? 0:G_d[(idx)+(idy-1)*blockDim.x*gridDim.x]);
M_d[4]=G_d[(idx)+(idy)*blockDim.x*gridDim.x];
M_d[5]=((idy>dimy-2) ? 0:G_d[(idx)+(idy+1)*blockDim.x*gridDim.x]);
M_d[6]=((idx>dimx-2 || idy<1) ? 0:G_d[(idx+1)+(idy-1)*blockDim.x*gridDim.x]);
M_d[7]=((idx>dimx-2) ? 0:G_d[(idx+1)+(idy)*blockDim.x*gridDim.x]);
M_d[8]=((idx>dimx-2 || idy>dimy-1) ? 0:G_d[(idx+1)+(idy+1)*blockDim.x*gridDim.x]);
*/
if (idx==0 || idy==0){
M_d[0]=0;
}else{
M_d[0]=G_d[id-1-dimy];
}
/*
if ((idx==0)){
M_d[1]=0;
}else{
M_d[1]=G_d[id-dimy];
//M_d[1]=8;
}
/*
if (idx==0 || idy==dimy){
M_d[2]=0;
}else{
M_d[2]=G_d[id+1-dimy];
}
*/
if (idy==0){
M_d[3]=0;
}else{
M_d[3]=G_d[id-1];
}
M_d[4]=G_d[id];
if (idy==dimy){
M_d[5]=0;
}else{
M_d[5]=G_d[id+1];
}
/*
if (id==dimx || idy==0){
M_d[6]=0;
}else{
M_d[6]=G_d[id-1+dimy];
}
*//*
if (idx==dimx){
M_d[7]=0;
}else{
M_d[7]=G_d[id+dimy];
}
*//*
if (idx==dimx || idy==dimy){
M_d[8]=0;
}else{
M_d[8]=G_d[id+1+dimy];
}
*/
//M_d[0]=1;
M_d[1]=5;
M_d[2]=8;
//M_d[3]=1;
//M_d[4]=1;
//M_d[5]=1;
M_d[6]=2;
M_d[7]=5;
M_d[8]=4;
for(i=0;i<msk*msk;i++)
X+=M_d[i];
Xprom=((float)X)/(msk*msk);
for(i=0;i<msk*msk;i++)
Y+=(Xprom-M_d[i])*(Xprom-M_d[i]);
var_d[id]=Y/(msk*msk);
}
}
//*****************Funcion main**********************
int main(int argc,char* argv[]){
//***************Declaracion de variables**************
int i,j,init,fin,d;
init=atoi(argv[1]);
fin=atoi(argv[2]);
//init=1;
//fin=328;
FILE *matrizR, *matrizG, *matrizB;
float t;
clock_t tinicio, t_GPU;
tinicio=clock();
int *topof_h, *R_h, *G_h, *B_h, *Rf_h, *Gf_h, *Bf_h;
float *max_h, *var_h;
int *topof_d, *R_d, *G_d, *B_d, *Rf_d, *Gf_d, *Bf_d;
float *max_d, *var_d;
//************Inicializacion de variables en el host y en el device ***************
/* // Declaracion tipo MATRIZ
max_h=(float **)malloc(sizeof(float)*dimx);
topof_h=(int **)malloc(sizeof(int)*dimx);
R_h=(int **)malloc(sizeof(int)*dimx);
G_h=(int **)malloc(sizeof(int)*dimx);
B_h=(int **)malloc(sizeof(int)*dimx);
Rf_h=(int **)malloc(sizeof(int)*dimx);
Gf_h=(int **)malloc(sizeof(int)*dimx);
Bf_h=(int **)malloc(sizeof(int)*dimx);
for(i=0;i<dimx;i++){
max_h[i]=(float*)malloc(sizeof(float)*dimy);
topof_h[i]=(int*)malloc(sizeof(int)*dimy);
R_h[i]=(int*)malloc(sizeof(int)*dimy);
G_h[i]=(int*)malloc(sizeof(int)*dimy);
B_h[i]=(int*)malloc(sizeof(int)*dimy);
Rf_h[i]=(int*)malloc(sizeof(int)*dimy);
Gf_h[i]=(int*)malloc(sizeof(int)*dimy);
Bf_h[i]=(int*)malloc(sizeof(int)*dimy);
}
var_h=(float *)malloc(sizeof(float)*tam_imag);
*/
R_h=(int *)malloc(sizeof(int)*tam_imag);
cudaMalloc((void**)&R_d, tam_imag*sizeof(int));
G_h=(int *)malloc(sizeof(int)*tam_imag);
cudaMalloc((void**)&G_d, tam_imag*sizeof(int));
B_h=(int *)malloc(sizeof(int)*tam_imag);
cudaMalloc((void**)&B_d, tam_imag*sizeof(int));
Rf_h=(int *)malloc(sizeof(int)*tam_imag);
cudaMalloc((void**)&Rf_d, tam_imag*sizeof(int));
Gf_h=(int *)malloc(sizeof(int)*tam_imag);
cudaMalloc((void**)&Gf_d, tam_imag*sizeof(int));
Bf_h=(int *)malloc(sizeof(int)*tam_imag);
cudaMalloc((void**)&Bf_d, tam_imag*sizeof(int));
topof_h=(int *)malloc(sizeof(int)*tam_imag);
cudaMalloc((void**)&topof_d, tam_imag*sizeof(int));
max_h=(float *)malloc(sizeof(float)*tam_imag);
cudaMalloc((void**)&max_d, tam_imag*sizeof(float));
var_h=(float *)malloc(sizeof(float)*tam_imag);
cudaMalloc((void**)&var_d,tam_imag*sizeof(float));
//*************** For cálculo EDF ****************
for(d=init;d<=fin;d++){
printf("d=%d \n", d);
//*****************Lecura de matrices RGB en el host****************
char rutaR[]="";
sprintf(rutaR, "%s%d%s","RGB/",d,"/R");
matrizR=fopen(rutaR,"r+");
char rutaG[]="";
sprintf(rutaG, "%s%d%s","RGB/",d,"/G");
matrizG=fopen(rutaG,"r+");
char rutaB[]="";
sprintf(rutaB, "%s%d%s","RGB/",d,"/B");
matrizB=fopen(rutaB,"r+");
for(i=0;i<dimx;i++){
for(j=0;j<dimy;j++){
fscanf(matrizR, "%d", &R_h[i*dimy+j]);
fscanf(matrizG, "%d", &G_h[i*dimy+j]);
fscanf(matrizB, "%d", &B_h[i*dimy+j]);
}
}
fclose(matrizR);
fclose(matrizG);
fclose(matrizB);
//***************** Kernel Varianza *******************
cudaMemcpy(G_d,G_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
dim3 Grid(347,20);
dim3 Block(13,16);
Varianza<<<Grid,Block>>>(B_d,var_d);
printf("Despues de kernel \n");
cudaMemcpy(var_h,var_d,sizeof(float)*tam_imag,cudaMemcpyDeviceToHost);
printf("Despues de resultado a host \n");
//***************** Kernel Varianza *******************
/*
cudaMemcpy(R_d,R_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(G_d,G_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(B_d,B_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(Rf_d,Rf_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(Gf_d,Gf_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(Bf_d,Bf_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(topof_d,topof_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(max_d,max_h,sizeof(float)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(var_d,var_h,sizeof(float)*tam_imag,cudaMemcpyHostToDevice);
dim3 Grid(347,20);
dim3 Block(13,16);
TopoRGB<<<Grid,Block>>>(R_d,G_d,B_d,Rf_d,Gf_d,Bf_d,topof_d,max_d,var_d);
cudaMemcpy(Rf_h,Rf_d,sizeof(int)*tam_imag,cudaMemcpyDeviceToHost);
cudaMemcpy(Gf_h,Gf_d,sizeof(int)*tam_imag,cudaMemcpyDeviceToHost);
cudaMemcpy(Bf_h,Bf_d,sizeof(int)*tam_imag,cudaMemcpyDeviceToHost);
cudaMemcpy(topof_h,topof_d,sizeof(int)*tam_imag,cudaMemcpyDeviceToHost);
cudaMemcpy(max_h,max_d,sizeof(float)*tam_imag,cudaMemcpyDeviceToHost);
*/
//*********************Calculo de TODO ********************
} //Finaliza For cálculo EDF
printf("***Sale del for \n");
/*
// ***************** Generacion de archivos de resultados ************************
FILE *archTopo, *archR, *archG, *archB;
archTopo=fopen("Resultados/topos10","w+");
archR=fopen("Resultados/R10","w+");
archG=fopen("Resultados/G10","w+");
archB=fopen("Resultados/B10","w+");
for(i=0;i<dimx;i++){
for(j=0;j<dimy;j++){
fprintf(archTopo,"%d ",topof_h[i*dimy+j]);
fprintf(archR,"%d ",Rf_h[i*dimy+j]);
fprintf(archG,"%d ",Gf_h[i*dimy+j]);
fprintf(archB,"%d ",Bf_h[i*dimy+j]);
}
fprintf(archTopo,"\n");
fprintf(archR,"\n");
fprintf(archG,"\n");
fprintf(archB,"\n");
}
fclose(archTopo);
fclose(archR);
fclose(archG);
fclose(archB);
*/
//***************** Archivo de varianza final
FILE *archVar;
archVar=fopen("Resultados/VarUltima","w+");
for(i=0;i<dimx;i++){
for(j=0;j<dimy;j++){
fprintf(archVar,"%f ",var_h[i*dimy+j]);
}
fprintf(archVar,"\n");
}
fclose(archVar);
free(var_h);
free(max_h);
free(topof_h);
free(R_h);
free(G_h);
free(B_h);
free(Rf_h);
free(Gf_h);
free(Bf_h);
cudaFree(var_d);
cudaFree(max_d);
cudaFree(topof_d);
cudaFree(R_d);
cudaFree(G_d);
cudaFree(B_d);
cudaFree(Rf_d);
cudaFree(Gf_d);
cudaFree(Bf_d);
t_GPU=clock();
t = ((float)t_GPU-(float)tinicio)/CLOCKS_PER_SEC;
printf("\ntiempo de procesamiento de varianzas: %6.3fs\n",t);
//getchar ();
return 0;
}//FIN funcion main()
|
4c59aa0784077e5c116bb824cf003748465a09e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
}
__global__ void uplo_set (const int sd, const int unit, const int bottom, const REAL alpha, REAL* a, const int offset_a, const int ld_a) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
a[offset_a + gid_0 + gid_1 * ld_a] = alpha;
}
} | 4c59aa0784077e5c116bb824cf003748465a09e6.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
}
__global__ void uplo_set (const int sd, const int unit, const int bottom, const REAL alpha, REAL* a, const int offset_a, const int ld_a) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
a[offset_a + gid_0 + gid_1 * ld_a] = alpha;
}
} |
6b1890d61bb05d7df2d7ad08dab092edb886d220.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* DepthConcatLayer.cpp
*
* Created on: 2016. 5. 25.
* Author: jhkim
*/
#include "DepthConcatLayer.h"
using namespace std;
//#define DEPTHCONCAT_LOG
template <typename Dtype>
__global__ void Concat(const int nthreads, const Dtype* in_data,
const bool forward, const int num_concats, const int concat_size,
const int top_concat_axis, const int bottom_concat_axis,
const int offset_concat_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
if (forward) {
out_data[top_index] = in_data[index];
} else {
out_data[index] = in_data[top_index];
}
}
}
template <typename Dtype>
DepthConcatLayer<Dtype>::DepthConcatLayer(Builder* builder)
: Layer<Dtype>(builder) {
initialize();
}
template <typename Dtype>
DepthConcatLayer<Dtype>::~DepthConcatLayer() {}
template <typename Dtype>
void DepthConcatLayer<Dtype>::initialize() {
this->type = Layer<Dtype>::DepthConcat;
this->concatAxis = 1;
}
template <typename Dtype>
void DepthConcatLayer<Dtype>::reshape() {
Layer<Dtype>::_adjustInputShape();
// shape
bool inputShapeReshaped = false;
const uint32_t inputSize = this->_inputData.size();
for (uint32_t i = 0; i < inputSize; i++) {
if (Layer<Dtype>::_isInputShapeChanged(i)) {
inputShapeReshaped = true;
this->_inputShape[i] = this->_inputData[i]->getShape();
}
}
if (!inputShapeReshaped) {
return;
}
uint32_t batches = this->_inputShape[0][0];
uint32_t channels = 0;
uint32_t rows = this->_inputShape[0][2];
uint32_t cols = this->_inputShape[0][3];
for (uint32_t i = 0; i < this->_inputData.size(); i++) {
channels += this->_inputData[i]->getShape()[this->concatAxis];
}
this->_outputData[0]->reshape({batches, channels, rows, cols});
this->concatInputSize = this->_inputData[0]->getCountByAxis(this->concatAxis + 1);
this->numConcats = this->_inputData[0]->getCountByAxis(0, this->concatAxis);
}
template <typename Dtype>
void DepthConcatLayer<Dtype>::feedforward() {
reshape();
Dtype* outputData = this->_outputData[0]->mutable_device_data();
int offsetConcatAxis = 0;
const int outputConcatAxis = this->_outputData[0]->getShape()[this->concatAxis];
const bool kForward = true;
for (int i = 0; i < this->_inputData.size(); i++) {
const Dtype* inputData = this->_inputData[i]->device_data();
const int inputConcatAxis = this->_inputData[i]->getShape()[this->concatAxis];
const int inputConcatSize = inputConcatAxis * this->concatInputSize;
const int nThreads = inputConcatSize * this->numConcats;
hipLaunchKernelGGL(( Concat<Dtype>), dim3(SOOOA_GET_BLOCKS(nThreads)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
nThreads, inputData, kForward, this->numConcats, this->concatInputSize,
outputConcatAxis, inputConcatAxis, offsetConcatAxis, outputData);
offsetConcatAxis += inputConcatAxis;
}
}
template <typename Dtype>
void DepthConcatLayer<Dtype>::backpropagation() {
const Dtype* outputGrad = this->_outputData[0]->device_grad();
int offsetConcatAxis = 0;
const int outputConcatAxis = this->_outputData[0]->getShape()[this->concatAxis];
const bool kForward = false;
for (int i = 0; i < this->_inputData.size(); i++) {
const int inputConcatAxis = this->_inputData[i]->getShape(this->concatAxis);
if (this->_propDown[i]) {
Dtype* inputGrad = this->_inputData[i]->mutable_device_grad();
const int inputConcatSize = inputConcatAxis * this->concatInputSize;
const int nThreads = inputConcatSize * this->numConcats;
hipLaunchKernelGGL(( Concat<Dtype>), dim3(SOOOA_GET_BLOCKS(nThreads)), dim3(SOOOA_CUDA_NUM_THREADS), 0, 0,
nThreads, outputGrad, kForward, this->numConcats, this->concatInputSize,
outputConcatAxis, inputConcatAxis, offsetConcatAxis, inputGrad);
}
offsetConcatAxis += inputConcatAxis;
}
}
/*
template <typename Dtype>
void DepthConcatLayer<Dtype>::feedforward() {
reshape();
uint32_t batchOffset = 0;
for (uint32_t i = 0; i < this->_inputs.size(); i++) {
batchOffset += this->_inputData[i]->getCountByAxis(1);
}
Dtype* d_outputData = this->_outputData[0]->mutable_device_data();
const uint32_t batchSize = this->_inputData[0]->getShape()[0];
uint32_t inBatchOffset = 0;
for (uint32_t i = 0; i < this->_inputs.size(); i++) {
const Dtype* d_inputData = this->_inputData[i]->device_data();
const uint32_t inputCountByChannel = this->_inputData[i]->getCountByAxis(1);
if (i > 0) {
inBatchOffset += this->_inputData[i-1]->getCountByAxis(1);
}
for (uint32_t j = 0; j < batchSize; j++) {
checkCudaErrors(hipMemcpyAsync(
d_outputData+batchOffset*j+inBatchOffset,
d_inputData+inputCountByChannel*j,
inputCountByChannel,
hipMemcpyDeviceToDevice));
}
}
}
template <typename Dtype>
void DepthConcatLayer<Dtype>::backpropagation() {
uint32_t batchOffset = 0;
for (uint32_t i = 0; i < this->_inputs.size(); i++) {
batchOffset += this->_inputData[i]->getCountByAxis(1);
}
const Dtype* d_outputData = this->_outputData[0]->device_data();
const uint32_t batchSize = this->_inputData[0]->getShape()[0];
uint32_t inBatchOffset = 0;
for (uint32_t i = 0; i < this->_inputs.size(); i++) {
Dtype* d_inputData = this->_inputData[i]->mutable_device_data();
const uint32_t inputCountByChannel = this->_inputData[i]->getCountByAxis(1);
if (i > 0) {
inBatchOffset += this->_inputData[i-1]->getCountByAxis(1);
}
for (uint32_t j = 0; j < batchSize; j++) {
checkCudaErrors(hipMemcpyAsync(
d_inputData+inputCountByChannel*j,
d_outputData+batchOffset*j+inBatchOffset,
inputCountByChannel,
hipMemcpyDeviceToDevice));
}
}
}
*/
#ifndef GPU_MODE
template <typename Dtype>
void DepthConcatLayer<Dtype>::initialize() {
this->type = Layer<Dtype>::DepthConcat;
this->offsetIndex = 0;
this->input.reset();
this->delta_input.set_size(size(output));
this->delta_input.zeros();
}
template <typename Dtype>
void DepthConcatLayer<Dtype>::feedforward(uint32_t idx, const rcube &input,
const char *end=0) {
this->input = join_slices(this->input, input);
Util::printCube(this->input, "input:");
this->offsets.push_back(this->input.n_slices);
if(!isLastPrevLayerRequest(idx)) return;
this->output = this->input;
propFeedforward(this->output, end);
// backward pass input reset
this->input.reset();
this->offsetIndex = 0;
}
template <typename Dtype>
void DepthConcatLayer<Dtype>::backpropagation(uint32_t idx, Layer<Dtype>* next_layer) {
Util::printCube(delta_input, "delta_input:");
rcube w_next_delta(size(delta_input));
Util::convertCube(next_layer->getDeltaInput(), delta_input);
delta_input += w_next_delta;
// delta_input = join_slices(this->delta_input, next_layer->getDeltaInput());
if(!isLastNextLayerRequest(idx)) return;
propBackpropagation();
this->delta_input.zeros();
}
#endif
template class DepthConcatLayer<float>;
| 6b1890d61bb05d7df2d7ad08dab092edb886d220.cu | /*
* DepthConcatLayer.cpp
*
* Created on: 2016. 5. 25.
* Author: jhkim
*/
#include "DepthConcatLayer.h"
using namespace std;
//#define DEPTHCONCAT_LOG
template <typename Dtype>
__global__ void Concat(const int nthreads, const Dtype* in_data,
const bool forward, const int num_concats, const int concat_size,
const int top_concat_axis, const int bottom_concat_axis,
const int offset_concat_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
if (forward) {
out_data[top_index] = in_data[index];
} else {
out_data[index] = in_data[top_index];
}
}
}
template <typename Dtype>
DepthConcatLayer<Dtype>::DepthConcatLayer(Builder* builder)
: Layer<Dtype>(builder) {
initialize();
}
template <typename Dtype>
DepthConcatLayer<Dtype>::~DepthConcatLayer() {}
template <typename Dtype>
void DepthConcatLayer<Dtype>::initialize() {
this->type = Layer<Dtype>::DepthConcat;
this->concatAxis = 1;
}
template <typename Dtype>
void DepthConcatLayer<Dtype>::reshape() {
Layer<Dtype>::_adjustInputShape();
// 입력 데이터의 shape가 변경된 것이 있는 지 확인
bool inputShapeReshaped = false;
const uint32_t inputSize = this->_inputData.size();
for (uint32_t i = 0; i < inputSize; i++) {
if (Layer<Dtype>::_isInputShapeChanged(i)) {
inputShapeReshaped = true;
this->_inputShape[i] = this->_inputData[i]->getShape();
}
}
if (!inputShapeReshaped) {
return;
}
uint32_t batches = this->_inputShape[0][0];
uint32_t channels = 0;
uint32_t rows = this->_inputShape[0][2];
uint32_t cols = this->_inputShape[0][3];
for (uint32_t i = 0; i < this->_inputData.size(); i++) {
channels += this->_inputData[i]->getShape()[this->concatAxis];
}
this->_outputData[0]->reshape({batches, channels, rows, cols});
this->concatInputSize = this->_inputData[0]->getCountByAxis(this->concatAxis + 1);
this->numConcats = this->_inputData[0]->getCountByAxis(0, this->concatAxis);
}
template <typename Dtype>
void DepthConcatLayer<Dtype>::feedforward() {
reshape();
Dtype* outputData = this->_outputData[0]->mutable_device_data();
int offsetConcatAxis = 0;
const int outputConcatAxis = this->_outputData[0]->getShape()[this->concatAxis];
const bool kForward = true;
for (int i = 0; i < this->_inputData.size(); i++) {
const Dtype* inputData = this->_inputData[i]->device_data();
const int inputConcatAxis = this->_inputData[i]->getShape()[this->concatAxis];
const int inputConcatSize = inputConcatAxis * this->concatInputSize;
const int nThreads = inputConcatSize * this->numConcats;
Concat<Dtype><<<SOOOA_GET_BLOCKS(nThreads), SOOOA_CUDA_NUM_THREADS>>>(
nThreads, inputData, kForward, this->numConcats, this->concatInputSize,
outputConcatAxis, inputConcatAxis, offsetConcatAxis, outputData);
offsetConcatAxis += inputConcatAxis;
}
}
template <typename Dtype>
void DepthConcatLayer<Dtype>::backpropagation() {
const Dtype* outputGrad = this->_outputData[0]->device_grad();
int offsetConcatAxis = 0;
const int outputConcatAxis = this->_outputData[0]->getShape()[this->concatAxis];
const bool kForward = false;
for (int i = 0; i < this->_inputData.size(); i++) {
const int inputConcatAxis = this->_inputData[i]->getShape(this->concatAxis);
if (this->_propDown[i]) {
Dtype* inputGrad = this->_inputData[i]->mutable_device_grad();
const int inputConcatSize = inputConcatAxis * this->concatInputSize;
const int nThreads = inputConcatSize * this->numConcats;
Concat<Dtype><<<SOOOA_GET_BLOCKS(nThreads), SOOOA_CUDA_NUM_THREADS>>>(
nThreads, outputGrad, kForward, this->numConcats, this->concatInputSize,
outputConcatAxis, inputConcatAxis, offsetConcatAxis, inputGrad);
}
offsetConcatAxis += inputConcatAxis;
}
}
/*
template <typename Dtype>
void DepthConcatLayer<Dtype>::feedforward() {
reshape();
uint32_t batchOffset = 0;
for (uint32_t i = 0; i < this->_inputs.size(); i++) {
batchOffset += this->_inputData[i]->getCountByAxis(1);
}
Dtype* d_outputData = this->_outputData[0]->mutable_device_data();
const uint32_t batchSize = this->_inputData[0]->getShape()[0];
uint32_t inBatchOffset = 0;
for (uint32_t i = 0; i < this->_inputs.size(); i++) {
const Dtype* d_inputData = this->_inputData[i]->device_data();
const uint32_t inputCountByChannel = this->_inputData[i]->getCountByAxis(1);
if (i > 0) {
inBatchOffset += this->_inputData[i-1]->getCountByAxis(1);
}
for (uint32_t j = 0; j < batchSize; j++) {
checkCudaErrors(cudaMemcpyAsync(
d_outputData+batchOffset*j+inBatchOffset,
d_inputData+inputCountByChannel*j,
inputCountByChannel,
cudaMemcpyDeviceToDevice));
}
}
}
template <typename Dtype>
void DepthConcatLayer<Dtype>::backpropagation() {
uint32_t batchOffset = 0;
for (uint32_t i = 0; i < this->_inputs.size(); i++) {
batchOffset += this->_inputData[i]->getCountByAxis(1);
}
const Dtype* d_outputData = this->_outputData[0]->device_data();
const uint32_t batchSize = this->_inputData[0]->getShape()[0];
uint32_t inBatchOffset = 0;
for (uint32_t i = 0; i < this->_inputs.size(); i++) {
Dtype* d_inputData = this->_inputData[i]->mutable_device_data();
const uint32_t inputCountByChannel = this->_inputData[i]->getCountByAxis(1);
if (i > 0) {
inBatchOffset += this->_inputData[i-1]->getCountByAxis(1);
}
for (uint32_t j = 0; j < batchSize; j++) {
checkCudaErrors(cudaMemcpyAsync(
d_inputData+inputCountByChannel*j,
d_outputData+batchOffset*j+inBatchOffset,
inputCountByChannel,
cudaMemcpyDeviceToDevice));
}
}
}
*/
#ifndef GPU_MODE
template <typename Dtype>
void DepthConcatLayer<Dtype>::initialize() {
this->type = Layer<Dtype>::DepthConcat;
this->offsetIndex = 0;
this->input.reset();
this->delta_input.set_size(size(output));
this->delta_input.zeros();
}
template <typename Dtype>
void DepthConcatLayer<Dtype>::feedforward(uint32_t idx, const rcube &input,
const char *end=0) {
this->input = join_slices(this->input, input);
Util::printCube(this->input, "input:");
this->offsets.push_back(this->input.n_slices);
if(!isLastPrevLayerRequest(idx)) return;
this->output = this->input;
propFeedforward(this->output, end);
// backward pass에서 input을 사용하지 않으므로 여기서 reset할 수 있음
this->input.reset();
this->offsetIndex = 0;
}
template <typename Dtype>
void DepthConcatLayer<Dtype>::backpropagation(uint32_t idx, Layer<Dtype>* next_layer) {
Util::printCube(delta_input, "delta_input:");
rcube w_next_delta(size(delta_input));
Util::convertCube(next_layer->getDeltaInput(), delta_input);
delta_input += w_next_delta;
// delta_input = join_slices(this->delta_input, next_layer->getDeltaInput());
if(!isLastNextLayerRequest(idx)) return;
propBackpropagation();
this->delta_input.zeros();
}
#endif
template class DepthConcatLayer<float>;
|
1e41a86de23e5637cc3637e290ca94322a8a491e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if !defined(CPU_ONLY)
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "syncedmem.hpp"
#include "math_functions.hpp"
#define kCutoff 1e-15
__device__ float LogSum_device(float log_a, float log_b) {
return (log_a < log_b) ? log_b + logf(1 + expf(log_a - log_b)) :
log_a + logf(1 + expf(log_b - log_a));
}
__device__ float LogSumVec_device(const float *logvec, size_t size) {
float sum = 0.;
sum = logvec[0];
for (uint i = 1; i < size; ++i) {
sum = LogSum_device(sum, logvec[i]);
}
return sum;
}
__device__ void Softmax_device(float *vec, size_t size) {
// TODO(wdai): Figure out why this is necessary. Doubt it is.
for (uint i = 0; i < size; ++i) {
if (abs(vec[i]) < kCutoff) {
vec[i] = kCutoff;
}
}
float lsum = LogSumVec_device(vec, size);
for (uint i = 0; i < size; ++i) {
vec[i] = expf(vec[i] - lsum);
//(*vec)[i] = exp((*vec)[i] - lsum);
vec[i] = vec[i] > 1 ? 1. : vec[i];
}
}
__global__ void SoftmaxBatchAndAdjust_kernel(
size_t n, size_t size, float *vecs, const uint *labels) {
CUDA_KERNEL_LOOP(i, n) {
float *vec = &vecs[i * size];
Softmax_device(vec, size);
vec[labels[i]] -= 1.; // See Bishop PRML (2006) Eq. (4.109)
}
}
void SoftmaxBatchAndAdjust_gpu(
hipStream_t cuda_stream,
size_t n, size_t size, float *vecs, const uint *labels) {
hipLaunchKernelGGL(( SoftmaxBatchAndAdjust_kernel)
, dim3(caffe::CAFFE_GET_BLOCKS(n)), dim3(caffe::CAFFE_CUDA_NUM_THREADS),
0, cuda_stream,
n, size, vecs, labels);
}
__global__ void SoftmaxBatchAndEntropyLoss_kernel(
size_t n, size_t size, float *vecs, const uint *labels, float *losses) {
CUDA_KERNEL_LOOP(i, n) {
float *vec = &vecs[i * size];
Softmax_device(vec, size);
losses[i] = -logf(vec[labels[i]]);
}
}
void SoftmaxBatchAndEntropyLoss_gpu(
hipStream_t cuda_stream,
size_t n, size_t size, float *vecs, const uint *labels, float *losses) {
hipLaunchKernelGGL(( SoftmaxBatchAndEntropyLoss_kernel)
, dim3(caffe::CAFFE_GET_BLOCKS(n)), dim3(caffe::CAFFE_CUDA_NUM_THREADS),
0, cuda_stream,
n, size, vecs, labels, losses);
}
__device__ float ZeroOneLoss(size_t size, float *vec, uint label) {
uint max_idx = 0;
float max_val = vec[0];
for (uint i = 1; i < size; i++) {
if (vec[i] > max_val) {
max_val = vec[i];
max_idx = i;
}
}
return (max_idx == label) ? 0 : 1;
}
__global__ void SoftmaxBatchAndZeroOneLoss_kernel(
size_t n, size_t size, float *vecs, const uint *labels, float *losses) {
CUDA_KERNEL_LOOP(i, n) {
float *vec = &vecs[i * size];
Softmax_device(vec, size);
losses[i] = ZeroOneLoss(size, vec, labels[i]);
}
}
void SoftmaxBatchAndZeroOneLoss_gpu(
hipStream_t cuda_stream,
size_t n, size_t size, float *vecs, const uint *labels, float *losses) {
hipLaunchKernelGGL(( SoftmaxBatchAndZeroOneLoss_kernel)
, dim3(caffe::CAFFE_GET_BLOCKS(n)), dim3(caffe::CAFFE_CUDA_NUM_THREADS),
0, cuda_stream,
n, size, vecs, labels, losses);
}
__global__ void empty_kernel() {
}
void empty_gpu_func() {
hipLaunchKernelGGL(( empty_kernel), dim3(1), dim3(1), 0, 0, );
}
#endif | 1e41a86de23e5637cc3637e290ca94322a8a491e.cu | #if !defined(CPU_ONLY)
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "syncedmem.hpp"
#include "math_functions.hpp"
#define kCutoff 1e-15
__device__ float LogSum_device(float log_a, float log_b) {
return (log_a < log_b) ? log_b + logf(1 + expf(log_a - log_b)) :
log_a + logf(1 + expf(log_b - log_a));
}
__device__ float LogSumVec_device(const float *logvec, size_t size) {
float sum = 0.;
sum = logvec[0];
for (uint i = 1; i < size; ++i) {
sum = LogSum_device(sum, logvec[i]);
}
return sum;
}
__device__ void Softmax_device(float *vec, size_t size) {
// TODO(wdai): Figure out why this is necessary. Doubt it is.
for (uint i = 0; i < size; ++i) {
if (abs(vec[i]) < kCutoff) {
vec[i] = kCutoff;
}
}
float lsum = LogSumVec_device(vec, size);
for (uint i = 0; i < size; ++i) {
vec[i] = expf(vec[i] - lsum);
//(*vec)[i] = exp((*vec)[i] - lsum);
vec[i] = vec[i] > 1 ? 1. : vec[i];
}
}
__global__ void SoftmaxBatchAndAdjust_kernel(
size_t n, size_t size, float *vecs, const uint *labels) {
CUDA_KERNEL_LOOP(i, n) {
float *vec = &vecs[i * size];
Softmax_device(vec, size);
vec[labels[i]] -= 1.; // See Bishop PRML (2006) Eq. (4.109)
}
}
void SoftmaxBatchAndAdjust_gpu(
cudaStream_t cuda_stream,
size_t n, size_t size, float *vecs, const uint *labels) {
SoftmaxBatchAndAdjust_kernel
<<<caffe::CAFFE_GET_BLOCKS(n), caffe::CAFFE_CUDA_NUM_THREADS,
0, cuda_stream>>>
(n, size, vecs, labels);
}
__global__ void SoftmaxBatchAndEntropyLoss_kernel(
size_t n, size_t size, float *vecs, const uint *labels, float *losses) {
CUDA_KERNEL_LOOP(i, n) {
float *vec = &vecs[i * size];
Softmax_device(vec, size);
losses[i] = -logf(vec[labels[i]]);
}
}
void SoftmaxBatchAndEntropyLoss_gpu(
cudaStream_t cuda_stream,
size_t n, size_t size, float *vecs, const uint *labels, float *losses) {
SoftmaxBatchAndEntropyLoss_kernel
<<<caffe::CAFFE_GET_BLOCKS(n), caffe::CAFFE_CUDA_NUM_THREADS,
0, cuda_stream>>>
(n, size, vecs, labels, losses);
}
__device__ float ZeroOneLoss(size_t size, float *vec, uint label) {
uint max_idx = 0;
float max_val = vec[0];
for (uint i = 1; i < size; i++) {
if (vec[i] > max_val) {
max_val = vec[i];
max_idx = i;
}
}
return (max_idx == label) ? 0 : 1;
}
__global__ void SoftmaxBatchAndZeroOneLoss_kernel(
size_t n, size_t size, float *vecs, const uint *labels, float *losses) {
CUDA_KERNEL_LOOP(i, n) {
float *vec = &vecs[i * size];
Softmax_device(vec, size);
losses[i] = ZeroOneLoss(size, vec, labels[i]);
}
}
void SoftmaxBatchAndZeroOneLoss_gpu(
cudaStream_t cuda_stream,
size_t n, size_t size, float *vecs, const uint *labels, float *losses) {
SoftmaxBatchAndZeroOneLoss_kernel
<<<caffe::CAFFE_GET_BLOCKS(n), caffe::CAFFE_CUDA_NUM_THREADS,
0, cuda_stream>>>
(n, size, vecs, labels, losses);
}
__global__ void empty_kernel() {
}
void empty_gpu_func() {
empty_kernel<<<1, 1>>>();
}
#endif |
318ba1b7d3013215cf81b28e25d4aeaa6c83127d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "whatDataInCentroid.h"
#include "../randomCudaScripts/DeleteFromArray.h"
#include "../randomCudaScripts/Utils.h"
#include <assert.h>
__global__ void whatDataIsInCentroidKernel(bool* output,
float* data,
bool* dimensions,
const unsigned int* centroid,
const unsigned int no_data_p,
const unsigned int point_dim,
const float width){
unsigned int indexData_p = threadIdx.x + blockIdx.x*blockDim.x;
if(indexData_p < no_data_p){
const size_t indexDataNoDim_f = indexData_p*point_dim;
const size_t centroid_f = centroid[0]*point_dim;
bool d = true;
for(unsigned int indexDim = 0 ; indexDim < point_dim ; indexDim++){
const size_t indexData_f = indexDataNoDim_f + indexDim;
const float dat = data[indexData_f];
const float cen = data[centroid_f+indexDim];
//printf("%u : c: %f p: %f \n", indexData_p, cen, dat);
const bool dim = dimensions[indexDim];
d &= (not (dim)) || (abs(cen - dat) < width);
}
output[indexData_p] = d;
}
}
// Hack
struct floatArray4{
float f0;
float f1;
float f2;
float f3;
};
struct floatArray8{
float f0;
float f1;
float f2;
float f3;
float f4;
float f5;
float f6;
float f7;
};
// Hack
struct boolArray8{
bool b0;
bool b1;
bool b2;
bool b3;
bool b4;
bool b5;
bool b6;
bool b7;
};
// Hack
struct boolArray4{
bool b0;
bool b1;
bool b2;
bool b3;
};
__global__ void whatDataIsInCentroidChunks(bool* output,
float* data,
bool* dimensions,
const unsigned int* centroid,
const unsigned int no_data_p,
const unsigned int point_dim,
const float width){
floatArray8 pointBuffer;
floatArray8 centroidBuffer;
boolArray8 dimBuffer;
unsigned int indexData_p = threadIdx.x + blockIdx.x*blockDim.x;
if(indexData_p < no_data_p){
const size_t indexDataNoDim_f = indexData_p*point_dim;
const size_t centroid_f = centroid[0]*point_dim;
bool d = true;
unsigned int indexDim = 0;
// Process the data in chunks of 8
for( ; indexDim < (point_dim/8)*8 ; indexDim+=8){
const size_t indexData_f = indexDataNoDim_f + indexDim;
pointBuffer = *((floatArray8*)(data+indexData_f));
centroidBuffer = *((floatArray8*)(data+centroid_f+indexDim));
dimBuffer = *((boolArray8*)(dimensions+indexDim));
d &= (not (dimBuffer.b0)) || (abs(centroidBuffer.f0 - pointBuffer.f0) < width);
d &= (not (dimBuffer.b1)) || (abs(centroidBuffer.f1 - pointBuffer.f1) < width);
d &= (not (dimBuffer.b2)) || (abs(centroidBuffer.f2 - pointBuffer.f2) < width);
d &= (not (dimBuffer.b3)) || (abs(centroidBuffer.f3 - pointBuffer.f3) < width);
d &= (not (dimBuffer.b4)) || (abs(centroidBuffer.f4 - pointBuffer.f4) < width);
d &= (not (dimBuffer.b5)) || (abs(centroidBuffer.f5 - pointBuffer.f5) < width);
d &= (not (dimBuffer.b6)) || (abs(centroidBuffer.f6 - pointBuffer.f6) < width);
d &= (not (dimBuffer.b7)) || (abs(centroidBuffer.f7 - pointBuffer.f7) < width);
}
// process remaining in chunks of 4
for(; indexDim < (point_dim/8)*8 +((point_dim%8)/4)*4 ; indexDim+=4){
const size_t indexData_f = indexDataNoDim_f + indexDim;
{
floatArray4 tmp = *((floatArray4*)(data+indexData_f));
pointBuffer.f0 = tmp.f0;
pointBuffer.f1 = tmp.f1;
pointBuffer.f2 = tmp.f2;
pointBuffer.f3 = tmp.f3;
tmp = *((floatArray4*)(data+centroid_f+indexDim));
centroidBuffer.f0 = tmp.f0;
centroidBuffer.f1 = tmp.f1;
centroidBuffer.f2 = tmp.f2;
centroidBuffer.f3 = tmp.f3;
}
{
boolArray4 tmp = *((boolArray4*)(dimensions+indexDim));
dimBuffer.b0 = tmp.b0;
dimBuffer.b1 = tmp.b1;
dimBuffer.b2 = tmp.b2;
dimBuffer.b3 = tmp.b3;
}
d &= (not (dimBuffer.b0)) || (abs(centroidBuffer.f0 - pointBuffer.f0) < width);
d &= (not (dimBuffer.b1)) || (abs(centroidBuffer.f1 - pointBuffer.f1) < width);
d &= (not (dimBuffer.b2)) || (abs(centroidBuffer.f2 - pointBuffer.f2) < width);
d &= (not (dimBuffer.b3)) || (abs(centroidBuffer.f3 - pointBuffer.f3) < width);
}
// process the remaining up to 3 points
for(; indexDim < (point_dim/8)*8 +((point_dim%8)/4)*4+(point_dim%8)%4 ; indexDim++){
const size_t indexData_f = indexDataNoDim_f + indexDim;
pointBuffer.f0 = data[indexData_f];
centroidBuffer.f0 = data[centroid_f+indexDim];
dimBuffer.b0 = dimensions[indexDim];
d &= (not (dimBuffer.b0)) || (abs(centroidBuffer.f0 - pointBuffer.f0) < width);
}
output[indexData_p] = d;
}
}
__global__ void whatDataIsInCentroidKernelFewPoints(bool* output,
float* data,
const unsigned int* centroid,
const unsigned int no_data_p,
const unsigned int point_dim,
const float width){
unsigned int indexData_p = threadIdx.x + blockIdx.x*blockDim.x;
if(indexData_p < no_data_p){
const size_t indexDataNoDim_f = indexData_p*point_dim;
const size_t centroid_f = centroid[0]*point_dim;
bool d = true;
for(unsigned int indexDim = 0 ; indexDim < point_dim ; indexDim++){
const size_t indexData_f = indexDataNoDim_f + indexDim;
const float dat = data[indexData_f];
const float cen = data[centroid_f+indexDim];
d &= abs(cen - dat) < width;
}
output[indexData_p] = d;
}
}
bool whatDataIsInCentroid(size_t dimGrid,
size_t dimBlock,
hipStream_t stream,
bool* output,
float* data,
unsigned int* centroids,
bool* dimensions,
const float width,
const unsigned int point_dim,
const unsigned int no_data_p){
hipLaunchKernelGGL(( whatDataIsInCentroidKernel), dim3(dimGrid),dim3(dimBlock),0,stream, output,
data,
dimensions,
centroids,
no_data_p,
point_dim,
width);
return true;
}
bool whatDataIsInCentroidChunks(size_t dimGrid,
size_t dimBlock,
hipStream_t stream,
bool* output,
float* data,
unsigned int* centroids,
bool* dimensions,
const float width,
const unsigned int point_dim,
const unsigned int no_data_p){
hipLaunchKernelGGL(( whatDataIsInCentroidChunks), dim3(dimGrid),dim3(dimBlock),0,stream, output,
data,
dimensions,
centroids,
no_data_p,
point_dim,
width);
return true;
}
bool whatDataIsInCentroidFewPoints(size_t dimGrid,
size_t dimBlock,
hipStream_t stream,
bool* output,
float* data,
bool* dimensions,
unsigned int* centroids,
const float width,
const unsigned int point_dim,
const unsigned int no_data_p){
hipLaunchKernelGGL(( whatDataIsInCentroidKernelFewPoints), dim3(dimGrid),dim3(dimBlock),0,stream, output,
data,
centroids,
no_data_p,
point_dim,
width);
return true;
}
__global__ void gpuWhereThingsGo(unsigned int* d_outData,
const unsigned int* d_data,
const unsigned int size){
const size_t idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx <= size){
d_outData[idx] = size+2;
const unsigned int offset = d_data[idx];
unsigned int nextOffset = offset+1;
if(idx != size){
nextOffset = d_data[idx+1];
}
if(offset == nextOffset){
d_outData[idx] = idx-offset;
}
}
}
__global__ void gpuDimensionChanger(float* d_outData,
const unsigned int* d_wereThingsGoArray,
const float* d_data,
const unsigned int numElements,
const unsigned int dimensions,
const unsigned int dimensionRemaning){
const size_t idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < numElements*dimensions){
const size_t pointIdex = idx/dimensions;
const size_t dimIndex = idx%dimensions;
const size_t newPointIdex = pointIdex*dimensionRemaning;
const size_t go = d_wereThingsGoArray[dimIndex];
// printf("try writing to %u\n",go);
if(go<dimensions){
const size_t newIndex = newPointIdex+go;
const float theData = d_data[idx];
d_outData[newIndex] = theData;
}
}
}
void whatDataIsInCentroidKernelFewPointsKernel(unsigned int dimGrid,
unsigned int dimBlock,
hipStream_t stream,
bool* output,
float* data,
unsigned int* centroids,
bool* dims,
const float width,
const unsigned int point_dim,
const unsigned int no_data){
//i want a prefix sum of what dimensions are used.
int size_of_out_blelloch = sizeof(unsigned int)*(point_dim+1);
unsigned int* d_out_blelloch;
checkCudaErrors(hipMalloc(&d_out_blelloch, size_of_out_blelloch));
sum_scan_blelloch(stream, d_out_blelloch,dims,point_dim+1, true);
unsigned int* h_out_blelloch;
hipHostMalloc(&h_out_blelloch,4*sizeof(unsigned int));
hipMemcpyAsync(h_out_blelloch, d_out_blelloch+point_dim, sizeof(unsigned int), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
const unsigned int dimensionsLeft = point_dim-(h_out_blelloch[0]);
unsigned int* d_out_whereThingsGo;
checkCudaErrors(hipMalloc(&d_out_whereThingsGo, size_of_out_blelloch));
const unsigned int dimBlockWhereThingsGo = dimBlock;
unsigned int dimGridWhereThingsGo = point_dim/dimBlock;
if(point_dim%dimBlock != 0){
dimGridWhereThingsGo++;
}
hipLaunchKernelGGL(( gpuWhereThingsGo), dim3(dimGridWhereThingsGo),dim3(dimBlockWhereThingsGo),0,stream, d_out_whereThingsGo,d_out_blelloch,point_dim);
unsigned int size_of_reducedData = sizeof(float)*dimensionsLeft*no_data;
float* d_reducedData;
checkCudaErrors(hipMalloc(&d_reducedData, size_of_reducedData));
const unsigned int dimBlockgpuDimensionChanger = dimBlock;
unsigned int dimGridgpuDimensionChanger = (no_data*point_dim)/dimBlock;
if((no_data*point_dim)%dimBlock != 0){
dimGridgpuDimensionChanger++;
}
hipLaunchKernelGGL(( gpuDimensionChanger), dim3(dimGridgpuDimensionChanger),dim3(dimBlockgpuDimensionChanger),0,stream, d_reducedData,d_out_whereThingsGo,data,no_data,point_dim,dimensionsLeft);
whatDataIsInCentroidFewPoints(dimGrid,
dimBlock,
stream,
output,
d_reducedData,
dims,
centroids,
width,
dimensionsLeft,
no_data);
};
__global__ void whatDataIsInCentroidLessReading(bool* output,
float* data,
bool* dimensions,
const unsigned int* centroid,
const unsigned int no_data_p,
const unsigned int point_dim,
const float width){
unsigned int indexData_p = threadIdx.x + blockIdx.x*blockDim.x;
if(indexData_p < no_data_p){
const size_t indexDataNoDim_f = indexData_p*point_dim;
const size_t centroid_f = centroid[0]*point_dim;
bool d = true;
for(unsigned int indexDim = 0 ; indexDim < point_dim ; indexDim++){
const size_t indexData_f = indexDataNoDim_f + indexDim;
const bool dim = dimensions[indexDim];
if(dim){
const float dat = data[indexData_f];
const float cen = data[centroid_f+indexDim];
//printf("%u : c: %f p: %f \n", indexData_p, cen, dat);
d &= (abs(cen - dat) < width);
}
}
output[indexData_p] = d;
}
}
__global__ void whatDataIsInCentroidLessReadingAndBreaking(bool* output,
float* data,
bool* dimensions,
const unsigned int* centroid,
const unsigned int no_data_p,
const unsigned int point_dim,
const float width){
unsigned int indexData_p = threadIdx.x + blockIdx.x*blockDim.x;
if(indexData_p < no_data_p){
const size_t indexDataNoDim_f = indexData_p*point_dim;
const size_t centroid_f = centroid[0]*point_dim;
bool d = true;
for(unsigned int indexDim = 0 ; indexDim < point_dim ; indexDim++){
const size_t indexData_f = indexDataNoDim_f + indexDim;
const bool dim = dimensions[indexDim];
if(dim){
const float dat = data[indexData_f];
const float cen = data[centroid_f+indexDim];
//printf("%u : c: %f p: %f \n", indexData_p, cen, dat);
d &= (abs(cen - dat) < width);
if(!d){
break;
}
}
}
output[indexData_p] = d;
}
}
bool whatDataIsInCentroidLessReadingWrapper(size_t dimGrid,
size_t dimBlock,
hipStream_t stream,
bool* output,
float* data,
unsigned int* centroids,
bool* dimensions,
const float width,
const unsigned int point_dim,
const unsigned int no_data_p){
hipLaunchKernelGGL(( whatDataIsInCentroidLessReading), dim3(dimGrid),dim3(dimBlock),0,stream, output,
data,
dimensions,
centroids,
no_data_p,
point_dim,
width);
return true;
}
bool whatDataIsInCentroidLessReadingAndBreakingWrapper(size_t dimGrid,
size_t dimBlock,
hipStream_t stream,
bool* output,
float* data,
unsigned int* centroids,
bool* dimensions,
const float width,
const unsigned int point_dim,
const unsigned int no_data_p){
hipLaunchKernelGGL(( whatDataIsInCentroidLessReadingAndBreaking), dim3(dimGrid),dim3(dimBlock),0,stream, output,
data,
dimensions,
centroids,
no_data_p,
point_dim,
width);
return true;
}
std::vector<bool>* whatDataIsInCentroidTester(std::vector<bool>* dims,
std::vector<std::vector<float>*>* data,
unsigned int centroid,
float width,
containedType type){
// Calculaating sizes
unsigned int point_dim = dims->size();
unsigned int no_of_points = data->size();
unsigned int no_of_centroids = 1;
unsigned int floats_in_data = point_dim * no_of_points;
unsigned int bools_in_dims = point_dim;
unsigned int bools_in_output = no_of_points;
unsigned int size_of_data = floats_in_data*sizeof(float);
unsigned int size_of_dims = (bools_in_dims+1)*sizeof(bool);
unsigned int size_of_centroids = no_of_centroids*sizeof(unsigned int);
unsigned int size_of_output = bools_in_output*sizeof(bool);
// allocating on the host
float* data_h = (float*) malloc(size_of_data);
bool* dims_h = (bool*) malloc(size_of_dims);
unsigned int* centroids_h = (unsigned int*) malloc(size_of_centroids);
bool* output_h = (bool*) malloc(size_of_output);
// filling data array
for(int i= 0; i < no_of_points; i++){
for(int j = 0; j < point_dim; j++){
data_h[i*point_dim+j] = data->at(i)->at(j);
}
}
// filling dims array
for(int j = 0; j < point_dim; j++){
dims_h[j] = dims->at(j);
}
// filling centroid array
centroids_h[0] = centroid;
// allocating on device
float* data_d;
bool* dims_d;
unsigned int* centroids_d;
bool* output_d;
hipMalloc((void **) &data_d, size_of_data);
hipMalloc((void **) &dims_d, size_of_dims);
hipMalloc((void **) ¢roids_d, size_of_centroids);
hipMalloc((void **) &output_d, size_of_output);
//Copy from host to device
hipMemcpy(data_d, data_h, size_of_data, hipMemcpyHostToDevice);
hipMemcpy(dims_d, dims_h, size_of_dims, hipMemcpyHostToDevice);
hipMemcpy(centroids_d, centroids_h, size_of_centroids, hipMemcpyHostToDevice);
// Call kernel
if(type == NaiveContained){
hipLaunchKernelGGL(( whatDataIsInCentroidKernel), dim3(ceilf((float)no_of_points/1024)), dim3(1024), 0, 0, output_d, data_d, dims_d, centroids_d,
no_of_points, point_dim, width);
}else if(type == ChunksContained){
hipLaunchKernelGGL(( whatDataIsInCentroidChunks), dim3(ceilf((float)no_of_points/1024)), dim3(1024), 0, 0, output_d, data_d, dims_d, centroids_d,
no_of_points, point_dim, width);
}else if(type == FewDimsContained){
hipStream_t stream;
hipStreamCreate ( &stream);
whatDataIsInCentroidKernelFewPointsKernel(ceilf((float)no_of_points/1024),
1024,
stream,
output_d,
data_d,
centroids_d,
dims_d,
width,
point_dim,
no_of_points
);
hipStreamSynchronize(stream);
hipStreamDestroy(stream);
}else if(type == LessReadingContained){
hipLaunchKernelGGL(( whatDataIsInCentroidLessReading), dim3(ceilf((float)no_of_points/1024)), dim3(1024), 0, 0, output_d, data_d, dims_d, centroids_d,
no_of_points, point_dim, width);
}else if(type == LessReadingBreakContained){
hipLaunchKernelGGL(( whatDataIsInCentroidLessReadingAndBreaking), dim3(ceilf((float)no_of_points/1024)), dim3(1024), 0, 0, output_d, data_d, dims_d, centroids_d,
no_of_points, point_dim, width);
}
// copy from device
hipMemcpy(output_h, output_d, size_of_output, hipMemcpyDeviceToHost);
// construnct output
auto output = new std::vector<bool>;
for(int j = 0; j < no_of_points; j++){
output->push_back(output_h[j]);
}
hipFree(data_d);
hipFree(dims_d);
hipFree(centroids_d);
hipFree(output_d);
free(data_h);
free(dims_h);
free(centroids_h);
free(output_h);
return output;
};
bool whatDataIsInCentroidWrapper(size_t dimGrid,
size_t dimBlock,
hipStream_t stream,
bool* output,
float* data,
unsigned int* centroids,
bool* dimensions,
const float width,
const unsigned int point_dim,
const unsigned int no_data_p,
containedType type){
if(type == NaiveContained){
whatDataIsInCentroid(dimGrid,
dimBlock,
stream,
output,
data,
centroids,
dimensions,
width,
point_dim,
no_data_p);
}else if(type == ChunksContained){
whatDataIsInCentroidChunks(dimGrid,
dimBlock,
stream,
output,
data,
centroids,
dimensions,
width,
point_dim,
no_data_p);
}else if(type == FewDimsContained){
whatDataIsInCentroidKernelFewPointsKernel(dimGrid,
dimBlock,
stream,
output,
data,
centroids,
dimensions,
width,
point_dim,
no_data_p);
}else if(type == LessReadingContained){
whatDataIsInCentroidLessReadingWrapper(dimGrid,
dimBlock,
stream,
output,
data,
centroids,
dimensions,
width,
point_dim,
no_data_p);
}else if(type == LessReadingBreakContained){
whatDataIsInCentroidLessReadingAndBreakingWrapper(dimGrid,
dimBlock,
stream,
output,
data,
centroids,
dimensions,
width,
point_dim,
no_data_p);
}
} | 318ba1b7d3013215cf81b28e25d4aeaa6c83127d.cu | #include "whatDataInCentroid.h"
#include "../randomCudaScripts/DeleteFromArray.h"
#include "../randomCudaScripts/Utils.h"
#include <assert.h>
__global__ void whatDataIsInCentroidKernel(bool* output,
float* data,
bool* dimensions,
const unsigned int* centroid,
const unsigned int no_data_p,
const unsigned int point_dim,
const float width){
unsigned int indexData_p = threadIdx.x + blockIdx.x*blockDim.x;
if(indexData_p < no_data_p){
const size_t indexDataNoDim_f = indexData_p*point_dim;
const size_t centroid_f = centroid[0]*point_dim;
bool d = true;
for(unsigned int indexDim = 0 ; indexDim < point_dim ; indexDim++){
const size_t indexData_f = indexDataNoDim_f + indexDim;
const float dat = data[indexData_f];
const float cen = data[centroid_f+indexDim];
//printf("%u : c: %f p: %f \n", indexData_p, cen, dat);
const bool dim = dimensions[indexDim];
d &= (not (dim)) || (abs(cen - dat) < width);
}
output[indexData_p] = d;
}
}
// Hack
struct floatArray4{
float f0;
float f1;
float f2;
float f3;
};
struct floatArray8{
float f0;
float f1;
float f2;
float f3;
float f4;
float f5;
float f6;
float f7;
};
// Hack
struct boolArray8{
bool b0;
bool b1;
bool b2;
bool b3;
bool b4;
bool b5;
bool b6;
bool b7;
};
// Hack
struct boolArray4{
bool b0;
bool b1;
bool b2;
bool b3;
};
__global__ void whatDataIsInCentroidChunks(bool* output,
float* data,
bool* dimensions,
const unsigned int* centroid,
const unsigned int no_data_p,
const unsigned int point_dim,
const float width){
floatArray8 pointBuffer;
floatArray8 centroidBuffer;
boolArray8 dimBuffer;
unsigned int indexData_p = threadIdx.x + blockIdx.x*blockDim.x;
if(indexData_p < no_data_p){
const size_t indexDataNoDim_f = indexData_p*point_dim;
const size_t centroid_f = centroid[0]*point_dim;
bool d = true;
unsigned int indexDim = 0;
// Process the data in chunks of 8
for( ; indexDim < (point_dim/8)*8 ; indexDim+=8){
const size_t indexData_f = indexDataNoDim_f + indexDim;
pointBuffer = *((floatArray8*)(data+indexData_f));
centroidBuffer = *((floatArray8*)(data+centroid_f+indexDim));
dimBuffer = *((boolArray8*)(dimensions+indexDim));
d &= (not (dimBuffer.b0)) || (abs(centroidBuffer.f0 - pointBuffer.f0) < width);
d &= (not (dimBuffer.b1)) || (abs(centroidBuffer.f1 - pointBuffer.f1) < width);
d &= (not (dimBuffer.b2)) || (abs(centroidBuffer.f2 - pointBuffer.f2) < width);
d &= (not (dimBuffer.b3)) || (abs(centroidBuffer.f3 - pointBuffer.f3) < width);
d &= (not (dimBuffer.b4)) || (abs(centroidBuffer.f4 - pointBuffer.f4) < width);
d &= (not (dimBuffer.b5)) || (abs(centroidBuffer.f5 - pointBuffer.f5) < width);
d &= (not (dimBuffer.b6)) || (abs(centroidBuffer.f6 - pointBuffer.f6) < width);
d &= (not (dimBuffer.b7)) || (abs(centroidBuffer.f7 - pointBuffer.f7) < width);
}
// process remaining in chunks of 4
for(; indexDim < (point_dim/8)*8 +((point_dim%8)/4)*4 ; indexDim+=4){
const size_t indexData_f = indexDataNoDim_f + indexDim;
{
floatArray4 tmp = *((floatArray4*)(data+indexData_f));
pointBuffer.f0 = tmp.f0;
pointBuffer.f1 = tmp.f1;
pointBuffer.f2 = tmp.f2;
pointBuffer.f3 = tmp.f3;
tmp = *((floatArray4*)(data+centroid_f+indexDim));
centroidBuffer.f0 = tmp.f0;
centroidBuffer.f1 = tmp.f1;
centroidBuffer.f2 = tmp.f2;
centroidBuffer.f3 = tmp.f3;
}
{
boolArray4 tmp = *((boolArray4*)(dimensions+indexDim));
dimBuffer.b0 = tmp.b0;
dimBuffer.b1 = tmp.b1;
dimBuffer.b2 = tmp.b2;
dimBuffer.b3 = tmp.b3;
}
d &= (not (dimBuffer.b0)) || (abs(centroidBuffer.f0 - pointBuffer.f0) < width);
d &= (not (dimBuffer.b1)) || (abs(centroidBuffer.f1 - pointBuffer.f1) < width);
d &= (not (dimBuffer.b2)) || (abs(centroidBuffer.f2 - pointBuffer.f2) < width);
d &= (not (dimBuffer.b3)) || (abs(centroidBuffer.f3 - pointBuffer.f3) < width);
}
// process the remaining up to 3 points
for(; indexDim < (point_dim/8)*8 +((point_dim%8)/4)*4+(point_dim%8)%4 ; indexDim++){
const size_t indexData_f = indexDataNoDim_f + indexDim;
pointBuffer.f0 = data[indexData_f];
centroidBuffer.f0 = data[centroid_f+indexDim];
dimBuffer.b0 = dimensions[indexDim];
d &= (not (dimBuffer.b0)) || (abs(centroidBuffer.f0 - pointBuffer.f0) < width);
}
output[indexData_p] = d;
}
}
__global__ void whatDataIsInCentroidKernelFewPoints(bool* output,
float* data,
const unsigned int* centroid,
const unsigned int no_data_p,
const unsigned int point_dim,
const float width){
unsigned int indexData_p = threadIdx.x + blockIdx.x*blockDim.x;
if(indexData_p < no_data_p){
const size_t indexDataNoDim_f = indexData_p*point_dim;
const size_t centroid_f = centroid[0]*point_dim;
bool d = true;
for(unsigned int indexDim = 0 ; indexDim < point_dim ; indexDim++){
const size_t indexData_f = indexDataNoDim_f + indexDim;
const float dat = data[indexData_f];
const float cen = data[centroid_f+indexDim];
d &= abs(cen - dat) < width;
}
output[indexData_p] = d;
}
}
bool whatDataIsInCentroid(size_t dimGrid,
size_t dimBlock,
cudaStream_t stream,
bool* output,
float* data,
unsigned int* centroids,
bool* dimensions,
const float width,
const unsigned int point_dim,
const unsigned int no_data_p){
whatDataIsInCentroidKernel<<<dimGrid,dimBlock,0,stream>>>(output,
data,
dimensions,
centroids,
no_data_p,
point_dim,
width);
return true;
}
bool whatDataIsInCentroidChunks(size_t dimGrid,
size_t dimBlock,
cudaStream_t stream,
bool* output,
float* data,
unsigned int* centroids,
bool* dimensions,
const float width,
const unsigned int point_dim,
const unsigned int no_data_p){
whatDataIsInCentroidChunks<<<dimGrid,dimBlock,0,stream>>>(output,
data,
dimensions,
centroids,
no_data_p,
point_dim,
width);
return true;
}
bool whatDataIsInCentroidFewPoints(size_t dimGrid,
size_t dimBlock,
cudaStream_t stream,
bool* output,
float* data,
bool* dimensions,
unsigned int* centroids,
const float width,
const unsigned int point_dim,
const unsigned int no_data_p){
whatDataIsInCentroidKernelFewPoints<<<dimGrid,dimBlock,0,stream>>>(output,
data,
centroids,
no_data_p,
point_dim,
width);
return true;
}
__global__ void gpuWhereThingsGo(unsigned int* d_outData,
const unsigned int* d_data,
const unsigned int size){
const size_t idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx <= size){
d_outData[idx] = size+2;
const unsigned int offset = d_data[idx];
unsigned int nextOffset = offset+1;
if(idx != size){
nextOffset = d_data[idx+1];
}
if(offset == nextOffset){
d_outData[idx] = idx-offset;
}
}
}
__global__ void gpuDimensionChanger(float* d_outData,
const unsigned int* d_wereThingsGoArray,
const float* d_data,
const unsigned int numElements,
const unsigned int dimensions,
const unsigned int dimensionRemaning){
const size_t idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < numElements*dimensions){
const size_t pointIdex = idx/dimensions;
const size_t dimIndex = idx%dimensions;
const size_t newPointIdex = pointIdex*dimensionRemaning;
const size_t go = d_wereThingsGoArray[dimIndex];
// printf("try writing to %u\n",go);
if(go<dimensions){
const size_t newIndex = newPointIdex+go;
const float theData = d_data[idx];
d_outData[newIndex] = theData;
}
}
}
void whatDataIsInCentroidKernelFewPointsKernel(unsigned int dimGrid,
unsigned int dimBlock,
cudaStream_t stream,
bool* output,
float* data,
unsigned int* centroids,
bool* dims,
const float width,
const unsigned int point_dim,
const unsigned int no_data){
//i want a prefix sum of what dimensions are used.
int size_of_out_blelloch = sizeof(unsigned int)*(point_dim+1);
unsigned int* d_out_blelloch;
checkCudaErrors(cudaMalloc(&d_out_blelloch, size_of_out_blelloch));
sum_scan_blelloch(stream, d_out_blelloch,dims,point_dim+1, true);
unsigned int* h_out_blelloch;
cudaMallocHost(&h_out_blelloch,4*sizeof(unsigned int));
cudaMemcpyAsync(h_out_blelloch, d_out_blelloch+point_dim, sizeof(unsigned int), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
const unsigned int dimensionsLeft = point_dim-(h_out_blelloch[0]);
unsigned int* d_out_whereThingsGo;
checkCudaErrors(cudaMalloc(&d_out_whereThingsGo, size_of_out_blelloch));
const unsigned int dimBlockWhereThingsGo = dimBlock;
unsigned int dimGridWhereThingsGo = point_dim/dimBlock;
if(point_dim%dimBlock != 0){
dimGridWhereThingsGo++;
}
gpuWhereThingsGo<<<dimGridWhereThingsGo,dimBlockWhereThingsGo,0,stream>>>(d_out_whereThingsGo,d_out_blelloch,point_dim);
unsigned int size_of_reducedData = sizeof(float)*dimensionsLeft*no_data;
float* d_reducedData;
checkCudaErrors(cudaMalloc(&d_reducedData, size_of_reducedData));
const unsigned int dimBlockgpuDimensionChanger = dimBlock;
unsigned int dimGridgpuDimensionChanger = (no_data*point_dim)/dimBlock;
if((no_data*point_dim)%dimBlock != 0){
dimGridgpuDimensionChanger++;
}
gpuDimensionChanger<<<dimGridgpuDimensionChanger,dimBlockgpuDimensionChanger,0,stream>>>(d_reducedData,d_out_whereThingsGo,data,no_data,point_dim,dimensionsLeft);
whatDataIsInCentroidFewPoints(dimGrid,
dimBlock,
stream,
output,
d_reducedData,
dims,
centroids,
width,
dimensionsLeft,
no_data);
};
__global__ void whatDataIsInCentroidLessReading(bool* output,
float* data,
bool* dimensions,
const unsigned int* centroid,
const unsigned int no_data_p,
const unsigned int point_dim,
const float width){
unsigned int indexData_p = threadIdx.x + blockIdx.x*blockDim.x;
if(indexData_p < no_data_p){
const size_t indexDataNoDim_f = indexData_p*point_dim;
const size_t centroid_f = centroid[0]*point_dim;
bool d = true;
for(unsigned int indexDim = 0 ; indexDim < point_dim ; indexDim++){
const size_t indexData_f = indexDataNoDim_f + indexDim;
const bool dim = dimensions[indexDim];
if(dim){
const float dat = data[indexData_f];
const float cen = data[centroid_f+indexDim];
//printf("%u : c: %f p: %f \n", indexData_p, cen, dat);
d &= (abs(cen - dat) < width);
}
}
output[indexData_p] = d;
}
}
__global__ void whatDataIsInCentroidLessReadingAndBreaking(bool* output,
float* data,
bool* dimensions,
const unsigned int* centroid,
const unsigned int no_data_p,
const unsigned int point_dim,
const float width){
unsigned int indexData_p = threadIdx.x + blockIdx.x*blockDim.x;
if(indexData_p < no_data_p){
const size_t indexDataNoDim_f = indexData_p*point_dim;
const size_t centroid_f = centroid[0]*point_dim;
bool d = true;
for(unsigned int indexDim = 0 ; indexDim < point_dim ; indexDim++){
const size_t indexData_f = indexDataNoDim_f + indexDim;
const bool dim = dimensions[indexDim];
if(dim){
const float dat = data[indexData_f];
const float cen = data[centroid_f+indexDim];
//printf("%u : c: %f p: %f \n", indexData_p, cen, dat);
d &= (abs(cen - dat) < width);
if(!d){
break;
}
}
}
output[indexData_p] = d;
}
}
bool whatDataIsInCentroidLessReadingWrapper(size_t dimGrid,
size_t dimBlock,
cudaStream_t stream,
bool* output,
float* data,
unsigned int* centroids,
bool* dimensions,
const float width,
const unsigned int point_dim,
const unsigned int no_data_p){
whatDataIsInCentroidLessReading<<<dimGrid,dimBlock,0,stream>>>(output,
data,
dimensions,
centroids,
no_data_p,
point_dim,
width);
return true;
}
bool whatDataIsInCentroidLessReadingAndBreakingWrapper(size_t dimGrid,
size_t dimBlock,
cudaStream_t stream,
bool* output,
float* data,
unsigned int* centroids,
bool* dimensions,
const float width,
const unsigned int point_dim,
const unsigned int no_data_p){
whatDataIsInCentroidLessReadingAndBreaking<<<dimGrid,dimBlock,0,stream>>>(output,
data,
dimensions,
centroids,
no_data_p,
point_dim,
width);
return true;
}
std::vector<bool>* whatDataIsInCentroidTester(std::vector<bool>* dims,
std::vector<std::vector<float>*>* data,
unsigned int centroid,
float width,
containedType type){
// Calculaating sizes
unsigned int point_dim = dims->size();
unsigned int no_of_points = data->size();
unsigned int no_of_centroids = 1;
unsigned int floats_in_data = point_dim * no_of_points;
unsigned int bools_in_dims = point_dim;
unsigned int bools_in_output = no_of_points;
unsigned int size_of_data = floats_in_data*sizeof(float);
unsigned int size_of_dims = (bools_in_dims+1)*sizeof(bool);
unsigned int size_of_centroids = no_of_centroids*sizeof(unsigned int);
unsigned int size_of_output = bools_in_output*sizeof(bool);
// allocating on the host
float* data_h = (float*) malloc(size_of_data);
bool* dims_h = (bool*) malloc(size_of_dims);
unsigned int* centroids_h = (unsigned int*) malloc(size_of_centroids);
bool* output_h = (bool*) malloc(size_of_output);
// filling data array
for(int i= 0; i < no_of_points; i++){
for(int j = 0; j < point_dim; j++){
data_h[i*point_dim+j] = data->at(i)->at(j);
}
}
// filling dims array
for(int j = 0; j < point_dim; j++){
dims_h[j] = dims->at(j);
}
// filling centroid array
centroids_h[0] = centroid;
// allocating on device
float* data_d;
bool* dims_d;
unsigned int* centroids_d;
bool* output_d;
cudaMalloc((void **) &data_d, size_of_data);
cudaMalloc((void **) &dims_d, size_of_dims);
cudaMalloc((void **) ¢roids_d, size_of_centroids);
cudaMalloc((void **) &output_d, size_of_output);
//Copy from host to device
cudaMemcpy(data_d, data_h, size_of_data, cudaMemcpyHostToDevice);
cudaMemcpy(dims_d, dims_h, size_of_dims, cudaMemcpyHostToDevice);
cudaMemcpy(centroids_d, centroids_h, size_of_centroids, cudaMemcpyHostToDevice);
// Call kernel
if(type == NaiveContained){
whatDataIsInCentroidKernel<<<ceilf((float)no_of_points/1024), 1024>>>(output_d, data_d, dims_d, centroids_d,
no_of_points, point_dim, width);
}else if(type == ChunksContained){
whatDataIsInCentroidChunks<<<ceilf((float)no_of_points/1024), 1024>>>(output_d, data_d, dims_d, centroids_d,
no_of_points, point_dim, width);
}else if(type == FewDimsContained){
cudaStream_t stream;
cudaStreamCreate ( &stream);
whatDataIsInCentroidKernelFewPointsKernel(ceilf((float)no_of_points/1024),
1024,
stream,
output_d,
data_d,
centroids_d,
dims_d,
width,
point_dim,
no_of_points
);
cudaStreamSynchronize(stream);
cudaStreamDestroy(stream);
}else if(type == LessReadingContained){
whatDataIsInCentroidLessReading<<<ceilf((float)no_of_points/1024), 1024>>>(output_d, data_d, dims_d, centroids_d,
no_of_points, point_dim, width);
}else if(type == LessReadingBreakContained){
whatDataIsInCentroidLessReadingAndBreaking<<<ceilf((float)no_of_points/1024), 1024>>>(output_d, data_d, dims_d, centroids_d,
no_of_points, point_dim, width);
}
// copy from device
cudaMemcpy(output_h, output_d, size_of_output, cudaMemcpyDeviceToHost);
// construnct output
auto output = new std::vector<bool>;
for(int j = 0; j < no_of_points; j++){
output->push_back(output_h[j]);
}
cudaFree(data_d);
cudaFree(dims_d);
cudaFree(centroids_d);
cudaFree(output_d);
free(data_h);
free(dims_h);
free(centroids_h);
free(output_h);
return output;
};
bool whatDataIsInCentroidWrapper(size_t dimGrid,
size_t dimBlock,
cudaStream_t stream,
bool* output,
float* data,
unsigned int* centroids,
bool* dimensions,
const float width,
const unsigned int point_dim,
const unsigned int no_data_p,
containedType type){
if(type == NaiveContained){
whatDataIsInCentroid(dimGrid,
dimBlock,
stream,
output,
data,
centroids,
dimensions,
width,
point_dim,
no_data_p);
}else if(type == ChunksContained){
whatDataIsInCentroidChunks(dimGrid,
dimBlock,
stream,
output,
data,
centroids,
dimensions,
width,
point_dim,
no_data_p);
}else if(type == FewDimsContained){
whatDataIsInCentroidKernelFewPointsKernel(dimGrid,
dimBlock,
stream,
output,
data,
centroids,
dimensions,
width,
point_dim,
no_data_p);
}else if(type == LessReadingContained){
whatDataIsInCentroidLessReadingWrapper(dimGrid,
dimBlock,
stream,
output,
data,
centroids,
dimensions,
width,
point_dim,
no_data_p);
}else if(type == LessReadingBreakContained){
whatDataIsInCentroidLessReadingAndBreakingWrapper(dimGrid,
dimBlock,
stream,
output,
data,
centroids,
dimensions,
width,
point_dim,
no_data_p);
}
} |
bd225db33c3cb77bc731e9accb303b3051923c86.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <exceptions/cuda_exception.h>
#include <ConstantHelper.h>
#include <DataTypeUtils.h>
#include <execution/LaunchContext.h>
#include <specials.h>
#include <logger.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define CONSTANT_LIMIT 49152
__constant__ char deviceConstantMemory[CONSTANT_LIMIT];
namespace nd4j {
static void* getConstantSpace() {
Nd4jPointer dConstAddr;
auto dZ = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0)
throw cuda_exception::build("hipGetSymbolAddress(...) failed", dZ);
return dConstAddr;
}
int ConstantHelper::getCurrentDevice() {
int dev = 0;
auto res = hipGetDevice(&dev);
if (res != 0)
throw cuda_exception::build("hipGetDevice failed", res);
return dev;
}
int ConstantHelper::getNumberOfDevices() {
int dev = 0;
auto res = hipGetDeviceCount(&dev);
if (res != 0)
throw cuda_exception::build("hipGetDeviceCount failed", res);
return dev;
}
ConstantHelper::ConstantHelper() {
auto initialDevice = getCurrentDevice();
auto numDevices = getNumberOfDevices();
_devicePointers.resize(numDevices);
_deviceOffsets.resize(numDevices);
_cache.resize(numDevices);
_counters.resize(numDevices);
// filling all pointers
for (int e = 0; e < numDevices; e++) {
auto res = hipSetDevice(e);
if (res != 0)
throw cuda_exception::build("hipSetDevice failed", res);
auto constant = getConstantSpace();
std::map<ConstantDescriptor, ConstantHolder> devCache;
_devicePointers[e] = constant;
_deviceOffsets[e] = 0;
_cache[e] = devCache;
_counters[e] = 0L;
}
//
auto res = hipSetDevice(initialDevice);
if (res != 0)
throw cuda_exception::build("Final hipSetDevice failed", res);
}
ConstantHelper* ConstantHelper::getInstance() {
if (!_INSTANCE)
_INSTANCE = new nd4j::ConstantHelper();
return _INSTANCE;
}
void* ConstantHelper::replicatePointer(void *src, size_t numBytes, memory::Workspace *workspace) {
_mutex.lock();
auto deviceId = getCurrentDevice();
Nd4jPointer constantPtr = nullptr;
Nd4jLong constantOffset = 0L;
if (_devicePointers[deviceId] == 0) {
auto constant = getConstantSpace();
// filling default ptr, which will be 0 probably
_devicePointers[deviceId] = constant;
_deviceOffsets[deviceId] = 0;
constantPtr = constant;
} else {
constantPtr = _devicePointers[deviceId];
constantOffset = _deviceOffsets[deviceId];
}
if (constantOffset + numBytes >= CONSTANT_LIMIT) {
int8_t *ptr = nullptr;
ALLOCATE_SPECIAL(ptr, workspace, numBytes, int8_t);
auto res = hipMemcpy(ptr, src, numBytes, hipMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("hipMemcpy failed", res);
_mutex.unlock();
return ptr;
} else {
auto originalBytes = numBytes;
auto rem = numBytes % 8;
if (rem != 0)
numBytes += 8 - rem;
_deviceOffsets[deviceId] += numBytes;
auto res = hipMemcpyToSymbol(deviceConstantMemory, const_cast<const void *>(src), originalBytes, constantOffset, hipMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("hipMemcpyToSymbol failed", res);
_mutex.unlock();
return reinterpret_cast<int8_t *>(constantPtr) + constantOffset;
}
}
ConstantDataBuffer* ConstantHelper::constantBuffer(const ConstantDescriptor &descriptor, nd4j::DataType dataType) {
const auto deviceId = getCurrentDevice();
if (_cache[deviceId].count(descriptor) == 0) {
ConstantHolder holder;
_cache[deviceId][descriptor] = holder;
}
ConstantHolder* holder = &_cache[deviceId][descriptor];
if (holder->hasBuffer(dataType)) {
return holder->getConstantDataBuffer(dataType);
} else {
auto numBytes = descriptor.length() * DataTypeUtils::sizeOf(dataType);
auto cbuff = new int8_t[numBytes];
_counters[deviceId] += numBytes;
// create buffer with this dtype
if (descriptor.isFloat()) {
BUILD_DOUBLE_SELECTOR(nd4j::DataType::DOUBLE, dataType, nd4j::SpecialTypeConverter::convertGeneric, (nullptr, const_cast<double *>(descriptor.floatValues().data()), descriptor.length(), cbuff), (nd4j::DataType::DOUBLE, double), LIBND4J_TYPES);
} else if (descriptor.isInteger()) {
BUILD_DOUBLE_SELECTOR(nd4j::DataType::INT64, dataType, nd4j::SpecialTypeConverter::convertGeneric, (nullptr, const_cast<Nd4jLong *>(descriptor.integerValues().data()), descriptor.length(), cbuff), (nd4j::DataType::INT64, Nd4jLong), LIBND4J_TYPES);
}
auto dbuff = replicatePointer(cbuff, descriptor.length() * DataTypeUtils::sizeOf(dataType));
ConstantDataBuffer dataBuffer(cbuff, dbuff, descriptor.length(), DataTypeUtils::sizeOf(dataType));
holder->addBuffer(dataBuffer, dataType);
return holder->getConstantDataBuffer(dataType);
}
}
Nd4jLong ConstantHelper::getCachedAmount(int deviceId) {
int numDevices = getNumberOfDevices();
if (deviceId > numDevices || deviceId < 0)
return 0L;
else
return _counters[deviceId];
}
nd4j::ConstantHelper* nd4j::ConstantHelper::_INSTANCE = 0;
} | bd225db33c3cb77bc731e9accb303b3051923c86.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <exceptions/cuda_exception.h>
#include <ConstantHelper.h>
#include <DataTypeUtils.h>
#include <execution/LaunchContext.h>
#include <specials.h>
#include <logger.h>
#include <cuda_runtime.h>
#include <cuda.h>
#define CONSTANT_LIMIT 49152
__constant__ char deviceConstantMemory[CONSTANT_LIMIT];
namespace nd4j {
static void* getConstantSpace() {
Nd4jPointer dConstAddr;
auto dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0)
throw cuda_exception::build("cudaGetSymbolAddress(...) failed", dZ);
return dConstAddr;
}
int ConstantHelper::getCurrentDevice() {
int dev = 0;
auto res = cudaGetDevice(&dev);
if (res != 0)
throw cuda_exception::build("cudaGetDevice failed", res);
return dev;
}
int ConstantHelper::getNumberOfDevices() {
int dev = 0;
auto res = cudaGetDeviceCount(&dev);
if (res != 0)
throw cuda_exception::build("cudaGetDeviceCount failed", res);
return dev;
}
ConstantHelper::ConstantHelper() {
auto initialDevice = getCurrentDevice();
auto numDevices = getNumberOfDevices();
_devicePointers.resize(numDevices);
_deviceOffsets.resize(numDevices);
_cache.resize(numDevices);
_counters.resize(numDevices);
// filling all pointers
for (int e = 0; e < numDevices; e++) {
auto res = cudaSetDevice(e);
if (res != 0)
throw cuda_exception::build("cudaSetDevice failed", res);
auto constant = getConstantSpace();
std::map<ConstantDescriptor, ConstantHolder> devCache;
_devicePointers[e] = constant;
_deviceOffsets[e] = 0;
_cache[e] = devCache;
_counters[e] = 0L;
}
//
auto res = cudaSetDevice(initialDevice);
if (res != 0)
throw cuda_exception::build("Final cudaSetDevice failed", res);
}
ConstantHelper* ConstantHelper::getInstance() {
if (!_INSTANCE)
_INSTANCE = new nd4j::ConstantHelper();
return _INSTANCE;
}
void* ConstantHelper::replicatePointer(void *src, size_t numBytes, memory::Workspace *workspace) {
_mutex.lock();
auto deviceId = getCurrentDevice();
Nd4jPointer constantPtr = nullptr;
Nd4jLong constantOffset = 0L;
if (_devicePointers[deviceId] == 0) {
auto constant = getConstantSpace();
// filling default ptr, which will be 0 probably
_devicePointers[deviceId] = constant;
_deviceOffsets[deviceId] = 0;
constantPtr = constant;
} else {
constantPtr = _devicePointers[deviceId];
constantOffset = _deviceOffsets[deviceId];
}
if (constantOffset + numBytes >= CONSTANT_LIMIT) {
int8_t *ptr = nullptr;
ALLOCATE_SPECIAL(ptr, workspace, numBytes, int8_t);
auto res = cudaMemcpy(ptr, src, numBytes, cudaMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("cudaMemcpy failed", res);
_mutex.unlock();
return ptr;
} else {
auto originalBytes = numBytes;
auto rem = numBytes % 8;
if (rem != 0)
numBytes += 8 - rem;
_deviceOffsets[deviceId] += numBytes;
auto res = cudaMemcpyToSymbol(deviceConstantMemory, const_cast<const void *>(src), originalBytes, constantOffset, cudaMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("cudaMemcpyToSymbol failed", res);
_mutex.unlock();
return reinterpret_cast<int8_t *>(constantPtr) + constantOffset;
}
}
ConstantDataBuffer* ConstantHelper::constantBuffer(const ConstantDescriptor &descriptor, nd4j::DataType dataType) {
const auto deviceId = getCurrentDevice();
if (_cache[deviceId].count(descriptor) == 0) {
ConstantHolder holder;
_cache[deviceId][descriptor] = holder;
}
ConstantHolder* holder = &_cache[deviceId][descriptor];
if (holder->hasBuffer(dataType)) {
return holder->getConstantDataBuffer(dataType);
} else {
auto numBytes = descriptor.length() * DataTypeUtils::sizeOf(dataType);
auto cbuff = new int8_t[numBytes];
_counters[deviceId] += numBytes;
// create buffer with this dtype
if (descriptor.isFloat()) {
BUILD_DOUBLE_SELECTOR(nd4j::DataType::DOUBLE, dataType, nd4j::SpecialTypeConverter::convertGeneric, (nullptr, const_cast<double *>(descriptor.floatValues().data()), descriptor.length(), cbuff), (nd4j::DataType::DOUBLE, double), LIBND4J_TYPES);
} else if (descriptor.isInteger()) {
BUILD_DOUBLE_SELECTOR(nd4j::DataType::INT64, dataType, nd4j::SpecialTypeConverter::convertGeneric, (nullptr, const_cast<Nd4jLong *>(descriptor.integerValues().data()), descriptor.length(), cbuff), (nd4j::DataType::INT64, Nd4jLong), LIBND4J_TYPES);
}
auto dbuff = replicatePointer(cbuff, descriptor.length() * DataTypeUtils::sizeOf(dataType));
ConstantDataBuffer dataBuffer(cbuff, dbuff, descriptor.length(), DataTypeUtils::sizeOf(dataType));
holder->addBuffer(dataBuffer, dataType);
return holder->getConstantDataBuffer(dataType);
}
}
Nd4jLong ConstantHelper::getCachedAmount(int deviceId) {
int numDevices = getNumberOfDevices();
if (deviceId > numDevices || deviceId < 0)
return 0L;
else
return _counters[deviceId];
}
nd4j::ConstantHelper* nd4j::ConstantHelper::_INSTANCE = 0;
} |
Fidelity.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cpp/dev_random.cpp>
#include <tclap/CmdLine.h>
#include <itpp/itbase.h>
#include <itpp/stat/histogram.h>
#include "cpp/RMT.cpp"
#include <cpp/itpp_ext_math.cpp>
#include <cpp/spinchain.cpp>
#include <itpp/stat/misc_stat.h>
#include <fstream>
#include <hip/hip_runtime.h>
#include "hip_functions.hip"
#include "hip_utils.hip"
#include "ev_routines.cu"
#include "cfp_routines.cu"
//using namespace std;
//using namespace itpp;
//using namespace itppextmath;
//using namespace cfpmath;
//using namespace spinchain;
TCLAP::CmdLine cmd("Command description message", ' ', "0.1");
TCLAP::ValueArg<string> optionArg("o","option", "Option" ,false,"normalito", "string",cmd);
TCLAP::ValueArg<string> optionArg2("","option2", "Option2" ,false,"fidelity", "string",cmd);
TCLAP::ValueArg<unsigned int> seed("s","seed", "Random seed [0 for urandom]",false, 243243,"unsigned int",cmd);
TCLAP::ValueArg<int> qubits("q","qubits", "number of qubits",false, 4,"int",cmd);
TCLAP::ValueArg<double> J("J","ising_coupling", "Ising interaction in the z-direction",false, 1.0,"double",cmd);
TCLAP::ValueArg<double> bx("","bx", "Magnetic field in x direction",false, 1.4,"double",cmd);
TCLAP::ValueArg<double> by("","by", "Magnetic field in y direction",false, 0.,"double",cmd);
TCLAP::ValueArg<double> bz("","bz", "Magnetic field in z direction",false, 1.4,"double",cmd);
TCLAP::ValueArg<double> theta("","theta", "polar angle",false, 1.0,"double",cmd);
TCLAP::ValueArg<double> phi("","phi", "azimultal angle",false, 1.0,"double",cmd);
TCLAP::ValueArg<double> deltabx("","deltabx", "perturbation",false, 0.1,"double",cmd);
TCLAP::ValueArg<int> steps("","steps","steps",false, 100,"int",cmd);
TCLAP::ValueArg<double> Jpert("","Jpert","Perturbation on Ising",false, 0.0,"double",cmd);
TCLAP::ValueArg<int> dev("","dev", "Gpu to be used, 0 for c20, 1 para la jodida",false, 0,"int",cmd);
int main(int argc, char* argv[])
{
cmd.parse( argc, argv );
cout.precision(17);
hipSetDevice(dev.getValue());
// {{{ Set seed for random
unsigned int semilla=seed.getValue();
if (semilla == 0){
Random semilla_uran; semilla=semilla_uran.strong();
}
itpp::RNG_reset(semilla);
// }}}
itpp::vec b(3), bpert(3);
b(0)=bx.getValue();
b(1)=by.getValue();
b(2)=bz.getValue();
bpert=b;
bpert(0)=b(0)+deltabx.getValue();
string option=optionArg.getValue();
string option2=optionArg2.getValue();
itpp::cvec state, staterev, qustate;
//ofstream fidelity;
//fidelity.open("fidelity.dat");
//qustate=RandomState(64);
//int dim=pow_2(qubits.getValue());
qustate=itppextmath::BlochToQubit(theta.getValue(),phi.getValue());
//qustate=RandomState(2);
//for(int i=0; i<qubits.getValue()+1;i++){
//list(i)=qustate;
//}
if(option=="normalito")
state=itppextmath::TensorPow(qustate,qubits.getValue());
if(option=="randU")
state=RMT::RandomCUE(pow(2, qubits.getValue()))*itppextmath::TensorPow(qustate,qubits.getValue());
if(option=="klimov")
state=itppextmath::TensorProduct(itppextmath::TensorProduct(itppextmath::TensorPow(qustate,3),itppextmath::sigma(1)*qustate),itppextmath::TensorPow(qustate,qubits.getValue()-4));
if(option=="klimovy")
state=itppextmath::TensorProduct(itppextmath::TensorProduct(itppextmath::TensorPow(qustate,3),itppextmath::sigma(2)*qustate),itppextmath::TensorPow(qustate,qubits.getValue()-4));
if(option=="klimov2")
state=itppextmath::TensorProduct(itppextmath::TensorProduct(itppextmath::TensorPow(qustate,2),itppextmath::TensorPow(itppextmath::sigma(1)*qustate,2)),itppextmath::TensorPow(qustate,qubits.getValue()-4));
//cout<< qustate ;
staterev=state;
double Jrev=J.getValue()+Jpert.getValue();
if(option2=="fidelity"){
itpp::vec list(steps.getValue());
for(int i=0;i<steps.getValue();i++){
list(i)=pow( abs( dot( conj(staterev),state)),2);
//cout<< pow( abs( dot( conj(staterev),state)),2) <<endl;
std::cout << list(i) <<endl;
// cout<< i<< " " << list(i) <<endl;
list(i)=sqrt(list(i));
itppcuda::apply_floquet(state, J.getValue(), b);
itppcuda::apply_floquet(staterev, Jrev, bpert);
//cout<<abs(dot(conj(staterev),state))<<endl;
//fidelity<<pow(abs(dot(conj(staterev),state)),2)<<endl;
}
//fidelity.close();
//cout << staterev;
std::cout<< itppextmath::sum_positive_derivatives(list)<< endl;
}
if(option2=="correlacion"){
itpp::cvec list(steps.getValue());
itpp::cvec init=state;
for(int i=0;i<steps.getValue();i++){
list(i)=dot(conj(init),state);
std::cout << real(list(i)) << " " << imag(list(i)) <<endl;
//cout << list <<endl;
itppcuda::apply_floquet(state, J.getValue(), b);
}
}
if(option2=="fidelityandipr"){
itpp::vec listfidel(steps.getValue());
itpp::cvec listcorr(steps.getValue());
itpp::cvec init=state;
for(int i=0;i<steps.getValue();i++){
listfidel(i)=pow( abs( dot( conj(staterev),state)),2);
listcorr(i)=pow(abs(dot(conj(init),state)),2);
//cout<< pow( abs( dot( conj(staterev),state)),2) <<endl;
std::cout << listfidel(i) <<endl;
// cout<< i<< " " << list(i) <<endl;
listfidel(i)=sqrt(listfidel(i));
itppcuda::apply_floquet(state, J.getValue(), b);
itppcuda::apply_floquet(staterev, Jrev, bpert);
//cout<<abs(dot(conj(staterev),state))<<endl;
//fidelity<<pow(abs(dot(conj(staterev),state)),2)<<endl;
}
//fidelity.close();
//cout << staterev;
cout<< itppextmath::sum_positive_derivatives(listfidel)<< endl;
cout<< real(mean(listcorr))<< endl;
}
}
| Fidelity.cu | #include <iostream>
#include <cpp/dev_random.cpp>
#include <tclap/CmdLine.h>
#include <itpp/itbase.h>
#include <itpp/stat/histogram.h>
#include "cpp/RMT.cpp"
#include <cpp/itpp_ext_math.cpp>
#include <cpp/spinchain.cpp>
#include <itpp/stat/misc_stat.h>
#include <fstream>
#include <cuda.h>
#include "cuda_functions.cu"
#include "cuda_utils.cu"
#include "ev_routines.cu"
#include "cfp_routines.cu"
//using namespace std;
//using namespace itpp;
//using namespace itppextmath;
//using namespace cfpmath;
//using namespace spinchain;
TCLAP::CmdLine cmd("Command description message", ' ', "0.1");
TCLAP::ValueArg<string> optionArg("o","option", "Option" ,false,"normalito", "string",cmd);
TCLAP::ValueArg<string> optionArg2("","option2", "Option2" ,false,"fidelity", "string",cmd);
TCLAP::ValueArg<unsigned int> seed("s","seed", "Random seed [0 for urandom]",false, 243243,"unsigned int",cmd);
TCLAP::ValueArg<int> qubits("q","qubits", "number of qubits",false, 4,"int",cmd);
TCLAP::ValueArg<double> J("J","ising_coupling", "Ising interaction in the z-direction",false, 1.0,"double",cmd);
TCLAP::ValueArg<double> bx("","bx", "Magnetic field in x direction",false, 1.4,"double",cmd);
TCLAP::ValueArg<double> by("","by", "Magnetic field in y direction",false, 0.,"double",cmd);
TCLAP::ValueArg<double> bz("","bz", "Magnetic field in z direction",false, 1.4,"double",cmd);
TCLAP::ValueArg<double> theta("","theta", "polar angle",false, 1.0,"double",cmd);
TCLAP::ValueArg<double> phi("","phi", "azimultal angle",false, 1.0,"double",cmd);
TCLAP::ValueArg<double> deltabx("","deltabx", "perturbation",false, 0.1,"double",cmd);
TCLAP::ValueArg<int> steps("","steps","steps",false, 100,"int",cmd);
TCLAP::ValueArg<double> Jpert("","Jpert","Perturbation on Ising",false, 0.0,"double",cmd);
TCLAP::ValueArg<int> dev("","dev", "Gpu to be used, 0 for c20, 1 para la jodida",false, 0,"int",cmd);
int main(int argc, char* argv[])
{
cmd.parse( argc, argv );
cout.precision(17);
cudaSetDevice(dev.getValue());
// {{{ Set seed for random
unsigned int semilla=seed.getValue();
if (semilla == 0){
Random semilla_uran; semilla=semilla_uran.strong();
}
itpp::RNG_reset(semilla);
// }}}
itpp::vec b(3), bpert(3);
b(0)=bx.getValue();
b(1)=by.getValue();
b(2)=bz.getValue();
bpert=b;
bpert(0)=b(0)+deltabx.getValue();
string option=optionArg.getValue();
string option2=optionArg2.getValue();
itpp::cvec state, staterev, qustate;
//ofstream fidelity;
//fidelity.open("fidelity.dat");
//qustate=RandomState(64);
//int dim=pow_2(qubits.getValue());
qustate=itppextmath::BlochToQubit(theta.getValue(),phi.getValue());
//qustate=RandomState(2);
//for(int i=0; i<qubits.getValue()+1;i++){
//list(i)=qustate;
//}
if(option=="normalito")
state=itppextmath::TensorPow(qustate,qubits.getValue());
if(option=="randU")
state=RMT::RandomCUE(pow(2, qubits.getValue()))*itppextmath::TensorPow(qustate,qubits.getValue());
if(option=="klimov")
state=itppextmath::TensorProduct(itppextmath::TensorProduct(itppextmath::TensorPow(qustate,3),itppextmath::sigma(1)*qustate),itppextmath::TensorPow(qustate,qubits.getValue()-4));
if(option=="klimovy")
state=itppextmath::TensorProduct(itppextmath::TensorProduct(itppextmath::TensorPow(qustate,3),itppextmath::sigma(2)*qustate),itppextmath::TensorPow(qustate,qubits.getValue()-4));
if(option=="klimov2")
state=itppextmath::TensorProduct(itppextmath::TensorProduct(itppextmath::TensorPow(qustate,2),itppextmath::TensorPow(itppextmath::sigma(1)*qustate,2)),itppextmath::TensorPow(qustate,qubits.getValue()-4));
//cout<< qustate ;
staterev=state;
double Jrev=J.getValue()+Jpert.getValue();
if(option2=="fidelity"){
itpp::vec list(steps.getValue());
for(int i=0;i<steps.getValue();i++){
list(i)=pow( abs( dot( conj(staterev),state)),2);
//cout<< pow( abs( dot( conj(staterev),state)),2) <<endl;
std::cout << list(i) <<endl;
// cout<< i<< " " << list(i) <<endl;
list(i)=sqrt(list(i));
itppcuda::apply_floquet(state, J.getValue(), b);
itppcuda::apply_floquet(staterev, Jrev, bpert);
//cout<<abs(dot(conj(staterev),state))<<endl;
//fidelity<<pow(abs(dot(conj(staterev),state)),2)<<endl;
}
//fidelity.close();
//cout << staterev;
std::cout<< itppextmath::sum_positive_derivatives(list)<< endl;
}
if(option2=="correlacion"){
itpp::cvec list(steps.getValue());
itpp::cvec init=state;
for(int i=0;i<steps.getValue();i++){
list(i)=dot(conj(init),state);
std::cout << real(list(i)) << " " << imag(list(i)) <<endl;
//cout << list <<endl;
itppcuda::apply_floquet(state, J.getValue(), b);
}
}
if(option2=="fidelityandipr"){
itpp::vec listfidel(steps.getValue());
itpp::cvec listcorr(steps.getValue());
itpp::cvec init=state;
for(int i=0;i<steps.getValue();i++){
listfidel(i)=pow( abs( dot( conj(staterev),state)),2);
listcorr(i)=pow(abs(dot(conj(init),state)),2);
//cout<< pow( abs( dot( conj(staterev),state)),2) <<endl;
std::cout << listfidel(i) <<endl;
// cout<< i<< " " << list(i) <<endl;
listfidel(i)=sqrt(listfidel(i));
itppcuda::apply_floquet(state, J.getValue(), b);
itppcuda::apply_floquet(staterev, Jrev, bpert);
//cout<<abs(dot(conj(staterev),state))<<endl;
//fidelity<<pow(abs(dot(conj(staterev),state)),2)<<endl;
}
//fidelity.close();
//cout << staterev;
cout<< itppextmath::sum_positive_derivatives(listfidel)<< endl;
cout<< real(mean(listcorr))<< endl;
}
}
|
ea9616e12cdb9ae2e595a73503730cab6b11ef8e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/device_info.cuh>
#include <nvbench/cuda_call.cuh>
#include <nvbench/detail/device_scope.cuh>
#include <hip/hip_runtime_api.h>
namespace nvbench
{
device_info::memory_info device_info::get_global_memory_usage() const
{
nvbench::detail::device_scope _{m_id};
memory_info result{};
NVBENCH_CUDA_CALL(hipMemGetInfo(&result.bytes_free, &result.bytes_total));
return result;
}
device_info::device_info(int id)
: m_id{id}
, m_prop{}
{
NVBENCH_CUDA_CALL(hipGetDeviceProperties(&m_prop, m_id));
}
} // namespace nvbench
| ea9616e12cdb9ae2e595a73503730cab6b11ef8e.cu | /*
* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 with the LLVM exception
* (the "License"); you may not use this file except in compliance with
* the License.
*
* You may obtain a copy of the License at
*
* http://llvm.org/foundation/relicensing/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <nvbench/device_info.cuh>
#include <nvbench/cuda_call.cuh>
#include <nvbench/detail/device_scope.cuh>
#include <cuda_runtime_api.h>
namespace nvbench
{
device_info::memory_info device_info::get_global_memory_usage() const
{
nvbench::detail::device_scope _{m_id};
memory_info result{};
NVBENCH_CUDA_CALL(cudaMemGetInfo(&result.bytes_free, &result.bytes_total));
return result;
}
device_info::device_info(int id)
: m_id{id}
, m_prop{}
{
NVBENCH_CUDA_CALL(cudaGetDeviceProperties(&m_prop, m_id));
}
} // namespace nvbench
|
d63457a3b509ae92127531d4656efe6666c746e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudamat_kernels.cuh"
#include "float.h"
template<int NUM_THREADS>
__device__ void reduceToMax(float* sdata, unsigned int tid){
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
// do reduction in shared mem
if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 256]); } __syncthreads(); }
if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 128]); } __syncthreads(); }
if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 64]); } __syncthreads(); }
if (NUM_THREADS == 32){
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); }
if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); }
if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); }
if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); }
if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 64) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 32]); }
if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); }
if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); }
if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); }
if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); }
if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); }
}
}
}
__device__ void reduceToMax32(float* sdata, unsigned int tid) {
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
if (tid < 16) {
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]);
}
}
template __device__ void reduceToMax<NUM_VECTOR_OP_THREADS_PER_BLOCK>(float* sdata, unsigned int tid);
template<int NUM_THREADS>
__device__ void reduceToSumLocal(float* sdata, unsigned int tid)
{
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
// do reduction in shared mem
if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
if (NUM_THREADS == 32){
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
}
}
__device__ void reduceToSumLocal32(float* sdata, unsigned int tid) {
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
if (tid < 16) {
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
smem[tid] = mySum = mySum + smem[tid + 16];
smem[tid] = mySum = mySum + smem[tid + 8];
smem[tid] = mySum = mySum + smem[tid + 4];
smem[tid] = mySum = mySum + smem[tid + 2];
smem[tid] = mySum = mySum + smem[tid + 1];
}
}
/*
* tanh is predefined in CUDA.
__device__ inline float tanh(float x) {
return (1.0f - __expf(-x)) / (1.0f + __expf(-x));
}
*/
__device__ inline float relu(float x) {
return ((x > 0) ? x : 0);
}
__device__ inline float deriv_of_relu(float y) {
return ((y > 0) ? 1 : 0);
}
__device__ inline float sigmoid(float x) {
return 1.0f / (1.0f + __expf(-x));
}
__device__ inline float deriv_of_sigmoid(float y) {
return y * (1 - y);
}
__device__ inline float deriv_of_tanh(float y) {
return 1 - y*y;
}
template __device__ void reduceToSumLocal<NUM_VECTOR_OP_THREADS_PER_BLOCK>(float* sdata, unsigned int tid);
/* ------------------------- Random number generation ------------------------- */
__global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// The initial x is the seed and the initial carry is 1
unsigned long long rndWord = ((unsigned long long)seed << 32) + 1;
const unsigned int rndMult = rndMults[idx];
/*
* Run the chain for a few steps so that all the streams have a chance
* to differentiate. They start out generating similar random numbers
* because all the multipliers are similar.
*/
for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
__global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] = R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] = R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomGaussianDropout(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements, float scale) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] *= 1 + scale * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] *= 1 + scale * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomDropout(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements, float dropprob, float val, float scale) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) > dropprob) ? (scale * gData[i]) : val;
}
rndWords[idx] = rndWord;
}
__global__ void kSampleBernoulli(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) < gData[i] ? 1:0;
}
rndWords[idx] = rndWord;
}
__global__ void kSampleBernoulliTanh(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) < (1.0 + gData[i]) / 2.0 ? 1:0;
}
rndWords[idx] = rndWord;
}
__global__ void kSamplePoisson(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = gData[i];
}
rndWords[idx] = rndWord;
}
__global__ void kSampleGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements, float mult) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
target[i] = gData[i] + mult * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
target[i + NUM_RND_STREAMS] = gData[i + NUM_RND_STREAMS] + mult * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kPerturbEnergy(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd;
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
target[i] = gData[i] - __logf( - __logf(rnd));
}
rndWords[idx] = rndWord;
}
__global__ void kPerturbProb(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd;
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
target[i] = - gData[i] / __logf(rnd);
}
rndWords[idx] = rndWord;
}
/* ------------------------- Data copying ------------------------- */
/*
Copy row slice from source to target. There is a block for every 32x32 chunk being copied.
*/
__global__ void kGetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int target_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * target_height + row - start] = source[cur_col * height + row];
}
}
__global__ void kSetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int source_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * height + row] = source[cur_col * source_height + row - start];
//source[cur_col * height + row - start] = target[cur_col * target_height + row];
}
}
__global__ void kTranspose(float *odata, float *idata, int width, int height) {
__shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
__global__ void kTransposeBig(float *odata, float *idata, int height, int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
int r, c;
for (unsigned int i = idx; i < width * height; i += numThreads) {
r = i % width;
c = i / width;
odata[i] = idata[height * r + c];
}
}
/* ------------------------- Mathematical operations ------------------------- */
__global__ void kLessThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] < mat2[i];
}
__global__ void kLessThanEq(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] <= mat2[i];
}
__global__ void kLessThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] < val;
}
__global__ void kLessThanEqScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] <= val;
}
__global__ void kGreaterThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] > mat2[i];
}
__global__ void kGreaterThanEq(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] >= mat2[i];
}
__global__ void kGreaterThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] > val;
}
__global__ void kGreaterThanEqScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] >= val;
}
__global__ void kUpperBound(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] > mat2[i] ? mat2[i] : mat1[i];
}
__global__ void kLowerBound(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] < mat2[i] ? mat2[i] : mat1[i];
}
__global__ void kUpperBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] > val ? val:mat[i];
}
__global__ void kLowerBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] < val ? val:mat[i];
}
__global__ void kUpperBoundModScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] > val ? val : (mat[i] < -val ? -val : mat[i]);
}
__global__ void kSparseDot(int m, int n, int k, float *data, int* indptr, int* indices, float *dense_data, float* target, float beta, float alpha) {
const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < m && col < n) {
const int start = indptr[row];
const int end = indptr[row + 1];
float sum = 0.f;
for (int i = start; i < end; i++) {
sum += data[i] * dense_data[col * k + indices[i]];
}
const int pos = col * m + row;
target[pos] = alpha * sum + ((beta == 0) ? 0 : beta * target[pos]);
}
}
__global__ void kSign(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] ? copysignf(1., mat[i]) : 0;
}
__global__ void kApplySin(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __sinf(mat[i]);
}
__global__ void kApplyCos(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __cosf(mat[i]);
}
__global__ void kApplySigmoid(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = sigmoid(mat[i]);
}
__global__ void kApplyTanh(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i, exp2x;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
exp2x = __expf(2 * mat_i);
target[i] = 1 - 2 / (exp2x + 1);
}
}
__global__ void kApplyAbs(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] * ((mat[i] > 0) - (mat[i] < 0));
}
__global__ void kApplyLog1PlusExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
if (mat_i > 0)
target[i] = (__logf(1 + __expf(-mat_i)) + mat_i);
else
target[i] = __logf(1 + __expf(mat_i));
}
}
__global__ void kLog(float* mat, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __logf(mat[i] + tiny);
}
__global__ void kSquashRelu(float* mat, float* target, unsigned int len, float lambda) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = 2 / (1 + __expf(-lambda * mat[i])) - 1;
}
__global__ void kExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __expf(mat[i]);
}
__global__ void kCeil(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = ceil(mat[i]);
}
__global__ void kFloor(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = floor(mat[i]);
}
__global__ void kSqrt(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = sqrt(mat[i]);
}
__global__ void kPow(float* mat, float pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = powf(mat[i], pow);
}
__global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = powf(mat[i], pow[i]);
}
__global__ void kCrossEntropy(float* mat, float* p, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = -mat[i] * __logf(p[i] + tiny);
}
__global__ void kCrossEntropyBernoulli(float* mat, float* p, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads)
target[i] = -mat[i] * __logf(p[i] + tiny) - (1 - mat[i]) * __logf(1 - p[i] + tiny);
}
__global__ void kCorrectPreds(float* mat, float* p, float* target, unsigned int len, float cutoff) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads)
target[i] = mat[i] * (p[i] >= cutoff) + (1 - mat[i]) * (p[i] < cutoff);
}
__global__ void kReciprocal(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = 1. / mat[i];
}
__global__ void kBesselRatioActivation(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
float r = mat[i];
target[i] = cyl_bessel_i1f(r) / cyl_bessel_i0f(r);
}
}
__global__ void kBesselRatioActivationContinuedFraction(float* mat, float* target, float order, int num_terms, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
float k = mat[i];
float result = 2 * (order + num_terms) / k;
for(int j = num_terms - 1; j > 0; j--) {
result = 2 * (order + j) / k + 1 / result;
}
target[i] = 1 / result;
}
}
__global__ void kAddColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i % height];
}
}
__global__ void kAddDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + val;
}
}
__global__ void kAddDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + vec[i];
}
}
__global__ void kMultDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * val;
}
}
__global__ void kMultDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * vec[i];
}
}
__global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i / height];
}
}
__global__ void kAddColMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i % height];
}
}
__global__ void kAddToEachPixel(float* mat1, float* mat2, float* tgtMat, float mult, unsigned int width, unsigned int height, unsigned int num_pix) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat1[i] + mult * mat2[i % height + height * (i / (height * num_pix))];
}
}
__global__ void kAddRowMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i / height];
}
}
__global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i % height];
}
}
__global__ void kDivByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i / height];
}
}
__global__ void kDivByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i % height];
}
}
__global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i / height];
}
}
__global__ void kMultByRowVectorScale(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height, float scale_targets) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = scale_targets * tgtMat[i] + mat[i] * vec[i / height];
}
}
__global__ void kAddMultSign(float* a, float* b, unsigned int numEls, float mult) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
a[i] = a[i] + ((b[i] > 0) ? mult : ((b[i] < 0) ? -mult : 0));
}
}
__global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + b[i];
}
}
__global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] - b[i];
}
}
__global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] / b[i];
}
}
__global__ void kMult(float* a, float* b, float* dest, unsigned int numEls, float scale_targets) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
if (scale_targets == 0) {
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i];
}
} else {
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = scale_targets * dest[i] + a[i] * b[i];
}
}
}
__global__ void kCosDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = -a[i] * __sinf(b[i]);
}
}
__global__ void kSinDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * __cosf(b[i]);
}
}
__global__ void kLogisticDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i] * (1.0 - b[i]);
}
}
// target[i] < 0 means don't care.
__global__ void kLogisticGrad(float* mat, float* targets, float* out_grad, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
out_grad[i] = (targets[i] < 0) ? 0 : (mat[i] - targets[i]);
}
}
__global__ void kLogisticCorrectNormalized(float* mat, float* targets, float* out, unsigned int height, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < height) {
float correct = 0;
float total = 0;
float p, t;
for (int i = idx; i < width * height; i += height) {
p = mat[i];
t = targets[i];
correct += (t < 0) ? 0 : (((t >= 0.5 && p >= 0.5) || (t < 0.5 && p < 0.5)) ? 1: 0);
total += (t < 0) ? 0 : 1;
__syncthreads();
}
out[idx] = (total > 0) ? (correct / total) : 0;
}
}
__global__ void kTanhDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (1.0 + b[i]) * (1.0 - b[i]);
}
}
__global__ void kRectifiedLinearDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (b[i] > 0 ? 1 : 0);
}
}
__global__ void kRectifiedLinearSmoothDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (1 - __expf(-b[i]));
}
}
__global__ void kMultScalar(float* mat, float alpha, float* dest, unsigned int len, float scale_targets) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
if (scale_targets == 0) {
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha * mat[i];
}
} else {
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = scale_targets * dest[i] + alpha * mat[i];
}
}
}
__global__ void kAssignScalar(float* dest, float alpha, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha;
}
}
__global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = mat[i] / alpha;
}
}
__global__ void kAddScalar(float* a, float alpha, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + alpha;
}
}
__global__ void kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){
__shared__ int sourceRowIndices[32];
const int startTargetRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startTargetRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
sourceRowIndices[tid] = int(indices[startTargetRowI + tid]);
if (sourceRowIndices[tid]<0)
sourceRowIndices[tid] += nSourceRows;
if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows)
sourceRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kSwapColumns(float* source, float* target, float* indices1, float* indices2, int cols, int width, int height){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float temp;
unsigned int column, row, source_pos, target_pos;
for (unsigned int i = idx; i < height * cols; i += numThreads) {
column = i / height;
row = i % height;
source_pos = height * (int)indices1[column] + row;
target_pos = height * (int)indices2[column] + row;
temp = source[source_pos];
source[source_pos] = target[target_pos];
target[target_pos] = temp;
}
}
__global__ void kShuffleColumns(float* source, float* target, float* indices, int width, int height){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float temp1, temp2;
unsigned int column, row, pos1, pos2;
for (unsigned int i = idx; i < height * ((width+1) / 2); i += numThreads) {
column = 2 * (i / height);
row = i % height;
if (column + 1 >= width) {
pos1 = height * (int)indices[column] + row;
target[pos1] = source[pos1];
} else {
pos1 = height * (int)indices[column] + row;
pos2 = height * (int)indices[column + 1] + row;
temp1 = source[pos1];
temp2 = source[pos2];
target[pos2] = temp1;
target[pos1] = temp2;
}
}
}
__global__ void kSetSelectedRows(float* target, float* source, float* indices, int nRowIs, int nCols, int nTargetRows){
__shared__ int targetRowIndices[32];
const int startSourceRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startSourceRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
targetRowIndices[tid] = int(indices[startSourceRowI + tid]);
if (targetRowIndices[tid]<0)
targetRowIndices[tid] += nTargetRows;
if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nTargetRows)
targetRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int sourceRowI = startSourceRowI + i, targetRowI = targetRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kBlockify(float* source, float* target, int numdims, int blocksize) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
const int off = blockIdx.x * numdims;
for (unsigned int target_ind = idx; target_ind < numdims; target_ind += numThreads) {
const int block = target_ind / blocksize;
target[off + target_ind] = source[off + block * blocksize];
}
}
__global__ void kGenerateTranslationsBigVarOff(float* source, float* target, float* off_x_arr, float* off_y_arr, int source_w, int target_w, int num_channels) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
int target_x, target_y;
int pad = (source_w - target_w)/2;
int target_tile_size = target_w * target_w;
int source_tile_size = source_w * source_w;
int off_x = off_x_arr[blockIdx.x];
int off_y = off_y_arr[blockIdx.x];
int target_off = blockIdx.x * target_tile_size;
int source_off = blockIdx.x * source_tile_size + (pad + off_x) * source_w + (pad + off_y);
for (unsigned int target_ind = idx; target_ind < target_tile_size; target_ind += numThreads) {
target_x = target_ind / target_w;
target_y = target_ind - target_x * target_w;
for (unsigned int ch = 0; ch < num_channels; ch += 1) {
target[num_channels*(target_off + target_x * target_w + target_y) + ch] = source[num_channels*(source_off + target_x * source_w + target_y) + ch];
}
}
}
__global__ void kSoftMaxGrad(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
target[i] = mat[i] - ((int)labels[i / height] == i % height ? 1 : 0);
}
}
__global__ void kSoftMaxGradRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
target[i] = mat[i] - ((int)labels[i % height] == i / height ? 1 : 0);
}
}
__global__ void kHingeQuadraticRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float margin) {
int image_id = blockIdx.x * blockDim.x + threadIdx.x;
if (image_id < height) {
mat += image_id;
target += image_id;
const int correct_label = (int)labels[image_id];
const float correct_label_score = mat[correct_label * height];
float sum = 0;
for (unsigned int i = 0; i < width; i++) {
float diff = margin + mat[i*height] - correct_label_score;
float grad = (diff > 0) ? diff : 0;
target[i*height] = (i == correct_label) ? 0 : grad;
sum += (i == correct_label) ? 0 : grad;
}
target[correct_label * height] = -sum;
}
}
__global__ void kHingeLinearRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float margin) {
int image_id = blockIdx.x * blockDim.x + threadIdx.x;
if (image_id < height) {
mat += image_id;
target += image_id;
const int correct_label = (int)labels[image_id];
const float correct_label_score = mat[correct_label * height];
float sum = 0;
for (unsigned int i = 0; i < width; i++) {
float diff = margin + mat[i*height] - correct_label_score;
float grad = (diff > 0) ? 1 : 0;
target[i*height] = (i == correct_label) ? 0 : grad;
sum += (i == correct_label) ? 0 : grad;
}
target[correct_label * height] = -sum;
}
}
__global__ void kSoftMaxGradCLS(float* mat, int* labels, float* indices, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
target[i] = mat[i] - (labels[(int)indices[i % height]] == i / height ? 1 : 0);
}
}
__global__ void kSoftMaxCrossEntropy(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
target[i] = -__logf(mat[height * i + (int)labels[i]] + tiny);
}
}
__global__ void kSoftMaxCrossEntropyRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < height; i += numThreads) {
target[i] = -__logf(mat[height * (int)labels[i] + i] + tiny);
}
}
__global__ void kSoftMaxCorrect(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[column] = (cur_argmax == (int)labels[column]) ? 1 : 0;
}
}
}
__global__ void kSoftMaxCorrectRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
val = cur_data[i * height];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[row] = (cur_argmax == (int)labels[row]) ? 1 : 0;
}
}
}
__global__ void kSoftMaxCorrectCLS(float* mat, int* labels, float* indices, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
val = cur_data[i * height];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[row] = (cur_argmax == labels[(int)indices[row]]) ? 1 : 0;
}
}
}
__global__ void kSoftMax(float* mat, float* target, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
max_vals[threadIdx.x]=-FLT_MAX;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
}
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
cur_max = max_vals[0] ;
__syncthreads();
val = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val += __expf(cur_data[i]-cur_max);
}
max_vals[threadIdx.x] = val;
reduceToSumLocal32(max_vals, threadIdx.x);
__syncthreads();
float norm = max_vals[0] ;
float *cur_target = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_target[i] = __expf(cur_data[i]-cur_max) / norm ;
}
}
}
__global__ void kSoftMaxOverwrite(float* mat, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
max_vals[threadIdx.x]=-FLT_MAX;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
}
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
cur_max = max_vals[0] ;
__syncthreads();
val = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data[i] = __expf(cur_data[i]-cur_max);
val += cur_data[i];
}
max_vals[threadIdx.x] = val;
reduceToSumLocal32(max_vals, threadIdx.x);
__syncthreads();
float norm = max_vals[0] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data[i] /= norm;
}
}
}
__global__ void kSoftMaxRowMajor(float* mat, unsigned int width, unsigned int height, float* target) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float *cur_data = &mat[row] ;
float *cur_target = &target[row] ;
max_vals[threadIdx.x]=-FLT_MAX;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
val = cur_data[i * height];
if (val > cur_max) {
cur_max = val;
}
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
cur_max = max_vals[0] ;
__syncthreads();
val = 0;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_target[i * height] = __expf(cur_data[i * height]-cur_max);
val += cur_target[i * height];
}
max_vals[threadIdx.x] = val;
reduceToSumLocal32(max_vals, threadIdx.x);
__syncthreads();
float norm = max_vals[0] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_target[i * height] /= norm;
}
}
}
__global__ void kChooseMaxAndAccumulate(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target_data[cur_argmax] += 1;
}
}
}
__global__ void kChooseMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
target[i] = 0;
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target_data[cur_argmax] = 1;
}
}
}
__global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) cur_max = val;
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = max_vals[0];
}
}
__global__ void kArgMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[column] = cur_argmax;
}
}
}
__global__ void kSqSumColumnwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i] * cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = p * target[column] + mult * sum_vals[0];
}
}
__global__ void kSumColumnwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = p * target[column] + mult * sum_vals[0];
}
}
__global__ void kSumAll(float* mat, float* target, unsigned int len, unsigned int len_per_block, unsigned int left_over) {
extern __shared__ float sum_vals[];
float cur_sum = 0;
int block_id = blockIdx.x;
mat += block_id * len_per_block + (block_id < left_over ? block_id : left_over);
int l = len_per_block + (block_id < left_over ? 1 : 0);
__syncthreads();
for (unsigned int i = threadIdx.x; i < l; i += blockDim.x) {
cur_sum += mat[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal<NUM_VECTOR_OP_THREADS_PER_BLOCK>(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[block_id] = sum_vals[0];
}
__global__ void kSqSumRowwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float cur_sum = 0;
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_sum += cur_data[i * height] * cur_data[i * height];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[row] = p * target[row] + mult * sum_vals[0];
}
}
// Works well when number of rows is large.
__global__ void kSumRowwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int row = (gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x + threadIdx.x;
if (row < height) {
float sum = 0;
float *data = mat + row;
for (unsigned int i = 0; i < width; i++) sum += data[i*height];
__syncthreads();
target[row] = p * target[row] + mult * sum;
}
}
__global__ void kNormLimitColumnwise(float* mat, float* target, float norm, unsigned int width, unsigned int height, int constraint) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i] * cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sqrt(sum_vals[0]);
cur_sum = (constraint == 1 || cur_sum > norm) ? (norm / cur_sum) : 1;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
target_data[i] = cur_data[i] * cur_sum;
}
__syncthreads();
}
}
__global__ void kNormalizeColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sum_vals[0] / height;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
target_data[i] = cur_data[i] - cur_sum;
}
__syncthreads();
}
}
__global__ void kNormLimitRowwise(float* mat, float* target, float norm, unsigned int width, unsigned int height, int constraint) {
extern __shared__ float sum_vals[];
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float cur_sum = 0;
float *cur_data = &mat[row] ;
float *target_data = &target[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_sum += cur_data[i * height] * cur_data[i * height];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sqrt(sum_vals[0]);
cur_sum = (constraint == 1 || cur_sum > norm) ? (norm / cur_sum) : 1;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
target_data[i * height] = cur_data[i * height] * cur_sum;
}
__syncthreads();
}
}
__global__ void kNormalizeRowwiseBprop(float* deriv, float* input, float* target, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float cur_sum = 0, cur_sum2 = 0;
float *cur_data = &input[row] ;
float *cur_data2 = &deriv[row] ;
float *target_data = &target[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
float x = cur_data[i * height];
float d = cur_data2[i * height];
cur_sum += x * x;
cur_sum2 += x * d;
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
float sigma_sq = sum_vals[0];
float sigma = sqrt(sigma_sq);
sum_vals[threadIdx.x] = cur_sum2;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
float xd_stats = sum_vals[0] / (sigma_sq * sigma);
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
target_data[i * height] = cur_data2[i * height] / sigma - cur_data[i * height] * xd_stats;
}
__syncthreads();
}
}
__global__ void kExpand(float* source, float* indices, float* target, int height, int width, int target_width){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < target_width*height; i += numThreads) {
const int pos = height * (int)indices[i / height] + i % height;
target[i] = (pos < height * width)? source[pos] : 1.0/0.0 - 1.0/0.0;
}
}
__global__ void kExpandAndAdd(float* source, float* mat, float* indices, float* target, int width, int height, float mult, int width2){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
const int pos = height * (int)indices[i / height] + i % height;
target[i] = (pos < height * width2)? source[i] + mult * mat[pos] : 1.0/0.0 - 1.0/0.0;
}
}
__global__ void kAccumulateColumns(float* mat, float* indices, float* target, int mat_width, int target_width, int height, float mult, int avg){
const int row = gridDim.x * blockIdx.y + blockIdx.x;
const int column = threadIdx.x;
if (row < height && column < target_width) {
float cur_sum = 0.0;
unsigned int count = 0;
for (unsigned int i = 0; i < mat_width; i ++) {
count += ((int)indices[i] == column) ? 1 : 0 ;
cur_sum += ((int)indices[i] == column) ? mat[row + i * height] : 0 ;
}
target[row + height * column] = mult * cur_sum / ((avg == 1 && count > 0) ? count : 1);
}
}
__global__ void kExtractPatches(float* images, float* patches, float* indices, float* width_offset, float* height_offset, int num_images, int img_width, int img_height, int patch_width, int patch_height, int num_colors) {
const unsigned long idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned long numThreads = blockDim.x * gridDim.x;
const unsigned long total_pixels = patch_width * patch_height * num_colors * num_images;
unsigned long ind, pos;
unsigned long image_id, dest_row, dest_col, color, source_row, source_col;
for (unsigned long i = idx; i < total_pixels; i += numThreads) {
ind = i;
image_id = ind % num_images; ind /= num_images;
dest_col = ind % patch_width; ind /= patch_width;
dest_row = ind % patch_height; ind /= patch_height;
color = ind % num_colors;
source_row = int(height_offset[image_id]) + dest_row;
source_col = int(width_offset[image_id]) + dest_col;
//pos = img_width * img_height * num_colors * (int)indices[image_id] + img_width * img_height * color + img_width * source_row + source_col;
pos = source_col + img_width * (source_row + img_height * (color + num_colors * (int)indices[image_id]));
patches[i] = images[pos];
}
}
__global__ void kExtractPatches2(float* images, float* patches, float* width_offset, float* height_offset, float* flip, int num_images, int img_width, int img_height, int patch_width, int patch_height, int num_colors) {
int image_id = blockIdx.z % num_images;
int color = blockIdx.z / num_images;
int dest_col = blockIdx.x * blockDim.x + threadIdx.x;
int dest_row = blockIdx.y * blockDim.y + threadIdx.y;
if (dest_col < patch_width && dest_row < patch_height) {
int source_row = int(height_offset[image_id]) + dest_row;
int source_col = int(width_offset[image_id]) + dest_col;
source_col = (flip[image_id] > 0.5) ? (img_width - source_col - 1) : source_col;
unsigned long dest_index = image_id + num_images * (dest_col + patch_width * (dest_row + patch_height * color));
unsigned long source_index = source_col + img_width * (source_row + img_height * (color + num_colors * image_id));
patches[dest_index] = images[source_index];
}
}
__global__ void kExtractPatches3(float* images, float* patches,
float* width_offset, float* height_offset, float* flip,
int num_images, int img_width, int img_height,
int patch_width, int patch_height, int num_colors) {
int dest_col = blockIdx.x * blockDim.x + threadIdx.x;
int dest_row = blockIdx.y * blockDim.y + threadIdx.y;
if (dest_col < patch_width && dest_row < patch_height) {
for (unsigned int b = blockIdx.z; b < num_colors * num_images; b += gridDim.z) {
int color = b % num_colors;
int image_id = b / num_colors;
int source_row = int(height_offset[image_id]) + dest_row;
int source_col = int(width_offset[image_id]) + dest_col;
source_col = (flip[image_id] > 0.5) ? (img_width - source_col - 1) : source_col;
unsigned long source_index = source_col + img_width * (source_row + img_height * (color + num_colors * image_id));
unsigned long dest_index = dest_col + patch_width * (dest_row + patch_height * (color + num_colors * image_id));
__syncthreads();
patches[dest_index] = images[source_index];
}
}
}
__global__ void kCapsulify(float* images, float* output, int image_size, int crop_size, int num_images) {
unsigned int image_id = blockIdx.z;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < image_size && col < image_size && image_id < num_images) {
images += image_id * image_size * image_size;
output += image_id * image_size * image_size;
unsigned int source_index = row * image_size + col;
unsigned int capsule_id = (row / crop_size) * (image_size / crop_size) + (col / crop_size);
unsigned int within_capsule_index = (row % crop_size) * crop_size + (col % crop_size);
unsigned int dest_index = capsule_id * crop_size * crop_size + within_capsule_index;
output[dest_index] = images[source_index];
}
}
__global__ void kRectifyBoundingBox(
float* boxes, float* width_offset, float* height_offset, float* flip,
int num_images, int patch_width, int patch_height, int num_locs) {
for (int loc_id = blockIdx.x; loc_id < num_locs; loc_id += gridDim.x) {
float *xmin_block = boxes + num_images * loc_id,
*ymin_block = boxes + num_images * (loc_id + num_locs),
*xmax_block = boxes + num_images * (loc_id + num_locs * 2),
*ymax_block = boxes + num_images * (loc_id + num_locs * 3);
for (int image_id = threadIdx.x; image_id < num_images; image_id += blockDim.x) {
float xmin = (flip[image_id] > 0.5) ? (256.0/patch_width - xmax_block[image_id]) : xmin_block[image_id],
xmax = (flip[image_id] > 0.5) ? (256.0/patch_width - xmin_block[image_id]) : xmax_block[image_id],
ymin = ymin_block[image_id],
ymax = ymax_block[image_id],
wo = width_offset[image_id],
ho = height_offset[image_id];
xmin_block[image_id] = xmin - wo / patch_width;
xmax_block[image_id] = xmax - wo / patch_width;
ymin_block[image_id] = ymin - ho / patch_height;
ymax_block[image_id] = ymax - ho / patch_height;
}
}
}
__global__ void kAdagrad(float *history, float *grad, float delta, int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
float curr_norm = history[i] - delta;
history[i] = delta + sqrt(curr_norm * curr_norm + grad[i] * grad[i]);
}
}
__global__ void kRMSProp(float *history, float *grad, float factor, int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
history[i] = sqrt(factor * history[i] * history[i] + (1-factor) * grad[i] * grad[i]);
}
}
__global__ void kBoundingBoxLogisticGrad(
float* mat, int* bbox, int* label, int* seg, float* indices, float* width_offset, float* height_offset,
int size, int width, int height, int depth, float scale_width, float scale_height, float* grad) {
const int color = blockIdx.z;
/*
const int numXBlocksPerImage = DIVUP(width, blockDim.x);
const int image_id = blockIdx.x / numXBlocksPerImage;
const int col = (blockIdx.x % numXBlocksPerImage) * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
*/
const int image_id = threadIdx.x;
const int col = blockIdx.x;
const int row = blockIdx.y;
int num_bboxes = 0, num_bboxes_of_this_depth = 0, num_bboxes_of_this_depth_inside = 0;
if (col < width && row < height && image_id < size && color < depth) {
int src_image_id = (int)indices[image_id];
int src_col = (int)(scale_width * col);
int src_row = (int)(scale_height * row);
int start = seg[src_image_id];
int end = seg[src_image_id + 1];
int x1, y1, x2, y2, l, inside;
for (int box_id = start; box_id < end; box_id++) {
l = label[box_id];
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (src_col >= x1 && src_col <= x2 && src_row >= y1 && src_row <= y2) ? 1:0;
num_bboxes += inside;
num_bboxes_of_this_depth += (l == color) ? 1: 0;
num_bboxes_of_this_depth_inside += (inside == 1 && l == color) ? 1: 0;
}
}
unsigned long i = image_id + size * (col + width * (row + height * color));
__syncthreads();
if (col < width && row < height && image_id < size && color < depth) {
if (num_bboxes > 0) {
grad[i] = (num_bboxes_of_this_depth_inside > 0) ? (mat[i] - 1) : 0;
} else {
grad[i] = (num_bboxes_of_this_depth > 0) ? mat[i] : 0;
}
}
}
__global__ void kLogisticCorrectBoundingBox(
float* mat, int* bbox, int* label, int* seg, float* indices,
float* width_offset, float* height_offset, int size, int width, int height,
int depth, float scale_width, float scale_height, float* target, float cutoff) {
const int color = blockIdx.z;
const int numXBlocksPerImage = DIVUP(width, blockDim.x);
const int image_id = blockIdx.x / numXBlocksPerImage;
const int col = (blockIdx.x % numXBlocksPerImage) * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < width && row < height && image_id < size && color < depth) {
int src_image_id = (int)indices[image_id];
int src_col = (int)(scale_width * col);
int src_row = (int)(scale_height * row);
int start = seg[src_image_id];
int end = seg[src_image_id + 1];
int x1, y1, x2, y2, l, inside;
int num_bboxes = 0, num_bboxes_of_this_depth = 0, num_bboxes_of_this_depth_inside = 0;
for (int box_id = start; box_id < end; box_id++) {
l = label[box_id];
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (src_col >= x1 && src_col <= x2 && src_row >= y1 && src_row <= y2) ? 1:0;
num_bboxes += inside;
num_bboxes_of_this_depth += (l == color) ? 1: 0;
num_bboxes_of_this_depth_inside += (inside == 1 && l == color) ? 1: 0;
}
unsigned long i = image_id + size * (col + width * (row + height * color));
if (num_bboxes > 0) {
target[i] = (num_bboxes_of_this_depth_inside > 0 && mat[i] >= cutoff) ? 1 : 0;
} else {
target[i] = (num_bboxes_of_this_depth > 0 && mat[i] < cutoff) ? 1 : 0;
}
}
}
__global__ void kBoundingBoxSoftMaxGrad(
float* mat, int* bbox, int* label, int* seg, float* indices, float* width_offset, float* height_offset,
int size, int width, int height, int depth, float scale_width, float scale_height, float* grad) {
const unsigned int len = width * height * depth * size;
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
int ind, image_id, source_depth, x1, y1, x2, y2, start,
end, src_image_id, num_bboxes, num_bboxes_of_this_depth, box_id, inside;
float source_x, source_y;
for (unsigned int i = idx; i < len; i += numThreads) {
ind = i;
image_id = ind % size; ind /= size;
source_x = scale_width * (ind % width); ind /= width;
source_y = scale_height * (ind % height); ind /= height;
source_depth = ind % depth;
src_image_id = (int)indices[image_id];
start = seg[src_image_id];
end = seg[src_image_id + 1];
num_bboxes = 0;
num_bboxes_of_this_depth = 0;
for (box_id = start; box_id < end; box_id++) {
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (source_x >= x1 && source_x <= x2 && source_y >= y1 && source_y <= y2) ? 1:0;
num_bboxes += inside;
num_bboxes_of_this_depth += (inside == 1 && label[box_id] == source_depth) ? 1: 0;
}
grad[i] = mat[i] - ((num_bboxes > 0) ? ((float)num_bboxes_of_this_depth / num_bboxes) : (source_depth == 0 ? 1:0));
}
}
__global__ void kSoftMaxCorrectBoundingBox(
float* mat, int* bbox, int* label, int* seg, float* indices,
float* width_offset, float* height_offset, int size, int width, int height,
int depth, float scale_width, float scale_height, float* target) {
const int row = gridDim.x * blockIdx.y + blockIdx.x;
const int num_pixels = size * width * height;
if (row < num_pixels) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < depth; i += blockDim.x) {
val = cur_data[i * num_pixels];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
int ind, image_id, src_image_id, x1, y1, x2, y2, start,
end, num_bboxes, correct, box_id, inside;
float source_x, source_y;
ind = row;
image_id = ind % size; ind /= size;
source_x = scale_width * (ind % width); ind /= width;
source_y = scale_height * (ind % height); ind /= height;
src_image_id = (int)indices[image_id];
start = seg[src_image_id];
end = seg[src_image_id + 1];
num_bboxes = 0;
correct = 0;
for (box_id = start; box_id < end; box_id++) {
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (source_x >= x1 && source_x <= x2 && source_y >= y1 && source_y <= y2) ? 1:0;
num_bboxes += inside;
correct += (inside == 1 && cur_argmax == label[box_id]) ? 1 : 0;
}
target[row] = (num_bboxes > 0) ? ((correct > 0) ? 1 : 0) : ((cur_argmax == 0) ? 1: 0);
}
}
}
__global__ void kLSTMFprop(float *s_in, float* s_out, float* w_diag, float* b, int numcases, int num_lstms, bool init, bool use_relu) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int numEls = numcases * num_lstms;
if (idx < numEls) {
const unsigned int numThreads = blockDim.x * gridDim.x;
float *h_out = s_out,
*c_out = s_out + numEls,
*i_out = s_out + 2 * numEls,
*f_out = s_out + 3 * numEls,
*a_out = s_out + 4 * numEls,
*o_out = s_out + 5 * numEls;
float *c_in = s_in + 1 * numEls;
float *w_i = w_diag,
*w_f = w_diag + num_lstms,
*w_o = w_diag + 2 * num_lstms;
float *b_i = b,
*b_f = b + num_lstms,
*b_a = b + 2 * num_lstms,
*b_o = b + 3 * num_lstms;
float i, f, a, o, c, h;
for (unsigned int p = idx; p < numEls; p += numThreads) {
int j = p / numcases;
i = i_out[p];
f = f_out[p];
a = a_out[p];
o = o_out[p];
c = init ? 0 : c_in[p];
i = sigmoid(i + c * w_i[j] + b_i[j]);
f = sigmoid(f + c * w_f[j] + b_f[j]);
a = use_relu ? relu(a + b_a[j]) : tanh(a + b_a[j]);
c = c * f + i * a;
o = sigmoid(o + c * w_o[j] + b_o[j]);
h = o * (use_relu ? c : tanh(c)); // relu(c) = c, because c is always +ve here.
__syncthreads();
i_out[p] = i;
f_out[p] = f;
a_out[p] = a;
o_out[p] = o;
c_out[p] = c;
h_out[p] = h;
}
}
}
__global__ void kLSTMBprop(float *s_in, float* s_out, float* d_in, float* d_out, float* w_diag, int numcases, int num_lstms, bool init, bool use_relu) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int numEls = numcases * num_lstms;
if (idx < numEls) {
const unsigned int numThreads = blockDim.x * gridDim.x;
float *s_c_out = s_out + numEls,
*s_i_out = s_out + 2 * numEls,
*s_f_out = s_out + 3 * numEls,
*s_a_out = s_out + 4 * numEls,
*s_o_out = s_out + 5 * numEls;
float *s_c_in = s_in + 1 * numEls;
float *d_h_out = d_out,
*d_c_out = d_out + numEls,
*d_i_out = d_out + 2 * numEls,
*d_f_out = d_out + 3 * numEls,
*d_a_out = d_out + 4 * numEls,
*d_o_out = d_out + 5 * numEls;
float *d_c_in = d_in + 1 * numEls;
float *w_i = w_diag,
*w_f = w_diag + num_lstms,
*w_o = w_diag + 2 * num_lstms;
float i, f, a, o, c,
grad_i, grad_f, grad_a, grad_o, grad_c, grad_h,
c_old, tanhc;
for (unsigned int p = idx; p < numEls; p += numThreads) {
int j = p / numcases;
grad_h = d_h_out[p];
grad_c = d_c_out[p];
i = s_i_out[p];
f = s_f_out[p];
a = s_a_out[p];
o = s_o_out[p];
c = s_c_out[p];
c_old = init ? 0 : s_c_in[p];
tanhc = use_relu ? c : tanh(c);
grad_o = grad_h * tanhc * deriv_of_sigmoid(o);
grad_c += grad_o * w_o[j] + grad_h * o * (use_relu ? deriv_of_relu(tanhc) : deriv_of_tanh(tanhc));
grad_a = grad_c * i * (use_relu ? deriv_of_relu(a) : deriv_of_tanh(a));
grad_i = grad_c * a * deriv_of_sigmoid(i);
grad_f = grad_c * c_old * deriv_of_sigmoid(f);
grad_c = grad_c * f + grad_f * w_f[j] + grad_i * w_i[j];
__syncthreads();
d_i_out[p] = grad_i;
d_f_out[p] = grad_f;
d_o_out[p] = grad_o;
d_a_out[p] = grad_a;
if (!init) d_c_in[p] = grad_c;
}
}
}
__global__ void kLSTMOutp(float* s_in, float* s_out, float* d_out, float* dw_diag, float* db, int numcases, int num_lstms, bool init) {
extern __shared__ float sum_vals[];
const int lstm_id = gridDim.x * blockIdx.y + blockIdx.x;
if (lstm_id < num_lstms) {
float* d_i = d_out + numcases * (num_lstms * 2 + lstm_id);
float* d_f = d_out + numcases * (num_lstms * 3 + lstm_id);
float* d_a = d_out + numcases * (num_lstms * 4 + lstm_id);
float* d_o = d_out + numcases * (num_lstms * 5 + lstm_id);
float* s_c = s_out + numcases * (num_lstms * 1 + lstm_id);
float* s_c_old = s_in + numcases * (num_lstms * 1 + lstm_id);
float dwi = 0, dwf = 0, dwo = 0, dbi = 0, dbf = 0, dba = 0, dbo = 0;
float c_old, grad_i, grad_f, grad_a, grad_o;
for (unsigned int i = threadIdx.x; i < numcases; i += blockDim.x) {
c_old = init ? 0 : s_c_old[i];
grad_i = d_i[i];
grad_f = d_f[i];
grad_a = d_a[i];
grad_o = d_o[i];
dwi += c_old * grad_i;
dwf += c_old * grad_f;
dwo += s_c[i] * grad_o;
dbi += grad_i;
dbf += grad_f;
dba += grad_a;
dbo += grad_o;
}
sum_vals[threadIdx.x] = dwi;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) dw_diag[lstm_id] += sum_vals[0];
sum_vals[threadIdx.x] = dwf;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) dw_diag[lstm_id + num_lstms] += sum_vals[0];
sum_vals[threadIdx.x] = dwo;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) dw_diag[lstm_id + num_lstms * 2] += sum_vals[0];
sum_vals[threadIdx.x] = dbi;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id] += sum_vals[0];
sum_vals[threadIdx.x] = dbf;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id + num_lstms] += sum_vals[0];
sum_vals[threadIdx.x] = dba;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id + num_lstms * 2] += sum_vals[0];
sum_vals[threadIdx.x] = dbo;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id + num_lstms * 3] += sum_vals[0];
}
}
__global__ void kBNBprop(float* d, float* x, float* gamma, float* mu, float* sigma,
float* target, unsigned int width, unsigned int height, float scale_targets) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float mu_val = mu[column];
float sigma_val = sigma[column];
float gamma_val = gamma[column];
__syncthreads();
float *cur_x = &x[column * height] ;
float *cur_d = &d[column * height] ;
float *cur_target = &target[column * height] ;
float cur_sum = 0, cur_sum2 = 0, val;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += (cur_x[i] - mu_val) * cur_d[i];
}
sum_vals[threadIdx.x] = cur_sum / ((height - 1) * sigma_val * sigma_val);
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sum_vals[0];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = gamma_val * (cur_d[i] - (cur_x[i] - mu_val) * cur_sum) / sigma_val;
cur_sum2 += val;
cur_target[i] = scale_targets * cur_target[i] + val;
}
sum_vals[threadIdx.x] = cur_sum2 / height;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sum_vals[0];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_target[i] -= cur_sum;
}
__syncthreads();
}
}
__global__ void kBNBpropInplace(float* d, float* y, float* dgamma, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data1 = &d[column * height];
float *cur_data2 = &y[column * height];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data1[i] * cur_data2[i];
}
sum_vals[threadIdx.x] = cur_sum / height;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
float stat = sum_vals[0];
if (threadIdx.x == 0) dgamma[column] = stat;
__syncthreads();
cur_sum = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data1[i] -= stat * cur_data2[i];
cur_sum += cur_data1[i];
}
sum_vals[threadIdx.x] = cur_sum / height;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
stat = sum_vals[0];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data1[i] -= stat;
}
}
}
__global__ void kBNGrad(float* d, float* x, float* mu, float* sigma,
float* dgamma, float* dbeta, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float mu_val = mu[column];
float sigma_val = sigma[column];
__syncthreads();
float *cur_x = &x[column * height] ;
float *cur_d = &d[column * height] ;
float z, d, sum_gamma = 0, sum_beta = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
z = (cur_x[i] - mu_val) / sigma_val;
d = cur_d[i];
sum_gamma += z * d;
sum_beta += d;
}
sum_vals[threadIdx.x] = sum_gamma; reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads();
if (threadIdx.x == 0) dgamma[column] = sum_vals[0];
__syncthreads();
sum_vals[threadIdx.x] = sum_beta; reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads();
if (threadIdx.x == 0) dbeta[column] = sum_vals[0];
__syncthreads();
}
}
__global__ void kLSTMFprop2Init(float *gates, float* cell, float* output, float* w, int num_lstms, int num_cases) {
const unsigned int case_id = blockIdx.y;
const unsigned int lstm_id = blockIdx.x * blockDim.x + threadIdx.x;
if (lstm_id < num_lstms && case_id < num_cases) {
gates += case_id * num_lstms * 4;
cell += case_id * num_lstms;
output += case_id * num_lstms;
float w_co = w[lstm_id + 2 * num_lstms];
float i = gates[lstm_id],
f = gates[lstm_id + num_lstms],
a = gates[lstm_id + num_lstms * 2],
o = gates[lstm_id + num_lstms * 3];
i = sigmoid(i);
f = sigmoid(f);
a = tanh(a);
float c = i * a;
o = sigmoid(o + c * w_co);
float r = tanh(c) * o;
gates[lstm_id] = i;
gates[lstm_id + num_lstms] = f;
gates[lstm_id + 2 * num_lstms] = a;
gates[lstm_id + 3 * num_lstms] = o;
cell[lstm_id] = c;
output[lstm_id] = r;
}
}
__global__ void kLSTMFprop2(float *gates, float* cell_prev, float* cell, float* output, float* w, int num_lstms, int num_cases) {
const unsigned int case_id = blockIdx.y;
const unsigned int lstm_id = blockIdx.x * blockDim.x + threadIdx.x;
if (lstm_id < num_lstms && case_id < num_cases) {
gates += case_id * num_lstms * 4;
cell_prev += case_id * num_lstms;
cell += case_id * num_lstms;
output += case_id * num_lstms;
float w_ci = w[lstm_id],
w_cf = w[lstm_id + num_lstms],
w_co = w[lstm_id + 2 * num_lstms];
float i = gates[lstm_id],
f = gates[lstm_id + num_lstms],
a = gates[lstm_id + num_lstms * 2],
o = gates[lstm_id + num_lstms * 3],
c = cell_prev[lstm_id];
i = sigmoid(i + c * w_ci);
f = sigmoid(f + c * w_cf);
a = tanh(a);
c = c * f + i * a;
o = sigmoid(o + c * w_co);
float r = tanh(c) * o;
gates[lstm_id] = i;
gates[lstm_id + num_lstms] = f;
gates[lstm_id + 2 * num_lstms] = a;
gates[lstm_id + 3 * num_lstms] = o;
cell[lstm_id] = c;
output[lstm_id] = r;
}
}
__global__ void kLSTMBprop2Init(float *gates, float* gates_deriv, float* cell, float* cell_deriv,
float* output_deriv, float* w, int num_lstms, int num_cases) {
const unsigned int case_id = blockIdx.y;
const unsigned int lstm_id = blockIdx.x * blockDim.x + threadIdx.x;
if (lstm_id < num_lstms && case_id < num_cases) {
gates += case_id * num_lstms * 4;
gates_deriv += case_id * num_lstms * 4;
cell += case_id * num_lstms;
cell_deriv += case_id * num_lstms;
output_deriv += case_id * num_lstms;
float w_co = w[lstm_id + 2 * num_lstms];
float i = gates[lstm_id],
a = gates[lstm_id + num_lstms * 2],
o = gates[lstm_id + num_lstms * 3],
c = cell[lstm_id],
c_d = cell_deriv[lstm_id],
r_d = output_deriv[lstm_id];
float tanhc = tanh(c);
float o_d = r_d * tanhc * deriv_of_sigmoid(o);
c_d += o * r_d * deriv_of_tanh(tanhc) + o_d * w_co;
float a_d = c_d * i * deriv_of_tanh(a);
float i_d = c_d * a * deriv_of_sigmoid(i);
gates_deriv[lstm_id] = i_d;
gates_deriv[lstm_id + num_lstms] = 0;
gates_deriv[lstm_id + 2 * num_lstms] = a_d;
gates_deriv[lstm_id + 3 * num_lstms] = o_d;
cell_deriv[lstm_id] = c_d;
}
}
__global__ void kLSTMBprop2(float *gates, float* gates_deriv, float* cell_prev, float* cell_prev_deriv,
float* cell, float* cell_deriv, float* output_deriv, float* w, int num_lstms, int num_cases) {
const unsigned int case_id = blockIdx.y;
const unsigned int lstm_id = blockIdx.x * blockDim.x + threadIdx.x;
if (lstm_id < num_lstms && case_id < num_cases) {
gates += case_id * num_lstms * 4;
gates_deriv += case_id * num_lstms * 4;
cell += case_id * num_lstms;
cell_deriv += case_id * num_lstms;
cell_prev += case_id * num_lstms;
cell_prev_deriv += case_id * num_lstms;
output_deriv += case_id * num_lstms;
float w_ci = w[lstm_id],
w_cf = w[lstm_id + num_lstms],
w_co = w[lstm_id + 2 * num_lstms];
float i = gates[lstm_id],
f = gates[lstm_id + num_lstms],
a = gates[lstm_id + num_lstms * 2],
o = gates[lstm_id + num_lstms * 3],
c = cell[lstm_id],
c_d = cell_deriv[lstm_id],
c_p = cell_prev[lstm_id],
r_d = output_deriv[lstm_id];
float tanhc = tanh(c);
float o_d = r_d * tanhc * deriv_of_sigmoid(o);
c_d += o * r_d * deriv_of_tanh(tanhc) + o_d * w_co;
float a_d = c_d * i * deriv_of_tanh(a);
float i_d = c_d * a * deriv_of_sigmoid(i);
float f_d = c_d * c_p * deriv_of_sigmoid(f);
float c_p_d = c_d * f + i_d * w_ci + f_d * w_cf;
gates_deriv[lstm_id] = i_d;
gates_deriv[lstm_id + num_lstms] = f_d;
gates_deriv[lstm_id + 2 * num_lstms] = a_d;
gates_deriv[lstm_id + 3 * num_lstms] = o_d;
cell_deriv[lstm_id] = c_d;
cell_prev_deriv[lstm_id] = c_p_d;
}
}
__global__ void kCapsuleActivation(float* h, float* l, float* s, float* output, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
// Compute length.
float cur_sum = 0;
float *cur_data = &h[column * height];
float *cur_data_output = &output[column * height];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i] * cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) {
cur_sum = sqrt(sum_vals[0]); // length.
float f = cyl_bessel_i1f(cur_sum) / cyl_bessel_i0f(cur_sum); // Apply activation.
l[column] = cur_sum;
s[column] = f;
sum_vals[0] = f / cur_sum;
}
__syncthreads();
cur_sum = sum_vals[0];
// Scale the data.
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data_output[i] = cur_data[i] * cur_sum;
}
}
}
__global__ void kBpropCapsuleActivation(float* d, float* y, float* l, float* s, float* output_d,
float sparsity_cost, float sparsity_scale, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0.0f;
float *cur_data_d = &d[column * height];
float *cur_data_d_output = &d[column * height];
float *cur_data_y = &y[column * height];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data_d[i] * cur_data_y[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) {
float f = s[column];
float length = l[column];
float d_f = 1.0f - f * f - f / length;
cur_sum = (sum_vals[0] / f) * (d_f / f - 1.0f / length);
cur_sum += sparsity_cost * length / (f * (sparsity_scale * sparsity_scale + length * length));
sum_vals[0] = cur_sum;
sum_vals[1] = f / length;
}
__syncthreads();
cur_sum = sum_vals[0];
float scale = sum_vals[1];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data_d_output[i] = cur_sum * cur_data_y[i] + scale * cur_data_d[i];
}
}
}
__global__ void kSampleVMF(unsigned int* rndMults, unsigned long long* rndWords, float* kappa, float* target, int n, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rand;
for (unsigned int i = idx; i < len; i += NUM_RND_STREAMS) {
float k = kappa[i];
float m = (n - 1.0f) / 2.0f;
float b = (-k + sqrtf(k * k + m * m)) / m;
float x = (1 - b) / (1 + b);
float c = k * x + 2 * m * __logf(1 - x * x);
float w;
float accept_val = -1.0f;
int counter = 0;
while (accept_val < 0) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rand = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
float theta = rand * PI;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rand = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
float s = rand;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rand = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
float r = sqrtf(s);
float u = r * __cosf(theta), v = r * __sinf(theta);
float z = 0.5 + ((u * v) / s) * ((n > 2) ? sqrtf(1 - powf(s, 1.0f / (m - 0.5))) : 1.0);
w = (1 - (1 + b) * z) / (1 - (1 - b) * z);
w = (w < -1) ? -1 : ((w > 1) ? 1 : w);
accept_val = k * w + 2 * m * __logf(1 - x * w + tiny) - c - __logf(rand + tiny);
if (++counter > 100) {
w = 1.0f;
break;
}
}
__syncthreads();
target[i] = w;
}
rndWords[idx] = rndWord;
}
| d63457a3b509ae92127531d4656efe6666c746e8.cu | #include "cudamat_kernels.cuh"
#include "float.h"
template<int NUM_THREADS>
__device__ void reduceToMax(float* sdata, unsigned int tid){
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
// do reduction in shared mem
if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 256]); } __syncthreads(); }
if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 128]); } __syncthreads(); }
if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 64]); } __syncthreads(); }
if (NUM_THREADS == 32){
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); }
if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); }
if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); }
if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); }
if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 64) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 32]); }
if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); }
if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); }
if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); }
if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); }
if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); }
}
}
}
__device__ void reduceToMax32(float* sdata, unsigned int tid) {
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
if (tid < 16) {
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]);
}
}
template __device__ void reduceToMax<NUM_VECTOR_OP_THREADS_PER_BLOCK>(float* sdata, unsigned int tid);
template<int NUM_THREADS>
__device__ void reduceToSumLocal(float* sdata, unsigned int tid)
{
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
// do reduction in shared mem
if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
if (NUM_THREADS == 32){
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
}
}
__device__ void reduceToSumLocal32(float* sdata, unsigned int tid) {
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
if (tid < 16) {
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
smem[tid] = mySum = mySum + smem[tid + 16];
smem[tid] = mySum = mySum + smem[tid + 8];
smem[tid] = mySum = mySum + smem[tid + 4];
smem[tid] = mySum = mySum + smem[tid + 2];
smem[tid] = mySum = mySum + smem[tid + 1];
}
}
/*
* tanh is predefined in CUDA.
__device__ inline float tanh(float x) {
return (1.0f - __expf(-x)) / (1.0f + __expf(-x));
}
*/
__device__ inline float relu(float x) {
return ((x > 0) ? x : 0);
}
__device__ inline float deriv_of_relu(float y) {
return ((y > 0) ? 1 : 0);
}
__device__ inline float sigmoid(float x) {
return 1.0f / (1.0f + __expf(-x));
}
__device__ inline float deriv_of_sigmoid(float y) {
return y * (1 - y);
}
__device__ inline float deriv_of_tanh(float y) {
return 1 - y*y;
}
template __device__ void reduceToSumLocal<NUM_VECTOR_OP_THREADS_PER_BLOCK>(float* sdata, unsigned int tid);
/* ------------------------- Random number generation ------------------------- */
__global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// The initial x is the seed and the initial carry is 1
unsigned long long rndWord = ((unsigned long long)seed << 32) + 1;
const unsigned int rndMult = rndMults[idx];
/*
* Run the chain for a few steps so that all the streams have a chance
* to differentiate. They start out generating similar random numbers
* because all the multipliers are similar.
*/
for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
__global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] = R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] = R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomGaussianDropout(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements, float scale) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] *= 1 + scale * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] *= 1 + scale * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomDropout(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements, float dropprob, float val, float scale) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) > dropprob) ? (scale * gData[i]) : val;
}
rndWords[idx] = rndWord;
}
__global__ void kSampleBernoulli(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) < gData[i] ? 1:0;
}
rndWords[idx] = rndWord;
}
__global__ void kSampleBernoulliTanh(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) < (1.0 + gData[i]) / 2.0 ? 1:0;
}
rndWords[idx] = rndWord;
}
__global__ void kSamplePoisson(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = gData[i];
}
rndWords[idx] = rndWord;
}
__global__ void kSampleGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements, float mult) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
target[i] = gData[i] + mult * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
target[i + NUM_RND_STREAMS] = gData[i + NUM_RND_STREAMS] + mult * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kPerturbEnergy(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd;
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
target[i] = gData[i] - __logf( - __logf(rnd));
}
rndWords[idx] = rndWord;
}
__global__ void kPerturbProb(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd;
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
target[i] = - gData[i] / __logf(rnd);
}
rndWords[idx] = rndWord;
}
/* ------------------------- Data copying ------------------------- */
/*
Copy row slice from source to target. There is a block for every 32x32 chunk being copied.
*/
__global__ void kGetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int target_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * target_height + row - start] = source[cur_col * height + row];
}
}
__global__ void kSetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int source_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * height + row] = source[cur_col * source_height + row - start];
//source[cur_col * height + row - start] = target[cur_col * target_height + row];
}
}
__global__ void kTranspose(float *odata, float *idata, int width, int height) {
__shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
__global__ void kTransposeBig(float *odata, float *idata, int height, int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
int r, c;
for (unsigned int i = idx; i < width * height; i += numThreads) {
r = i % width;
c = i / width;
odata[i] = idata[height * r + c];
}
}
/* ------------------------- Mathematical operations ------------------------- */
__global__ void kLessThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] < mat2[i];
}
__global__ void kLessThanEq(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] <= mat2[i];
}
__global__ void kLessThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] < val;
}
__global__ void kLessThanEqScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] <= val;
}
__global__ void kGreaterThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] > mat2[i];
}
__global__ void kGreaterThanEq(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] >= mat2[i];
}
__global__ void kGreaterThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] > val;
}
__global__ void kGreaterThanEqScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] >= val;
}
__global__ void kUpperBound(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] > mat2[i] ? mat2[i] : mat1[i];
}
__global__ void kLowerBound(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] < mat2[i] ? mat2[i] : mat1[i];
}
__global__ void kUpperBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] > val ? val:mat[i];
}
__global__ void kLowerBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] < val ? val:mat[i];
}
__global__ void kUpperBoundModScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] > val ? val : (mat[i] < -val ? -val : mat[i]);
}
__global__ void kSparseDot(int m, int n, int k, float *data, int* indptr, int* indices, float *dense_data, float* target, float beta, float alpha) {
const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < m && col < n) {
const int start = indptr[row];
const int end = indptr[row + 1];
float sum = 0.f;
for (int i = start; i < end; i++) {
sum += data[i] * dense_data[col * k + indices[i]];
}
const int pos = col * m + row;
target[pos] = alpha * sum + ((beta == 0) ? 0 : beta * target[pos]);
}
}
__global__ void kSign(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] ? copysignf(1., mat[i]) : 0;
}
__global__ void kApplySin(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __sinf(mat[i]);
}
__global__ void kApplyCos(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __cosf(mat[i]);
}
__global__ void kApplySigmoid(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = sigmoid(mat[i]);
}
__global__ void kApplyTanh(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i, exp2x;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
exp2x = __expf(2 * mat_i);
target[i] = 1 - 2 / (exp2x + 1);
}
}
__global__ void kApplyAbs(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] * ((mat[i] > 0) - (mat[i] < 0));
}
__global__ void kApplyLog1PlusExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
if (mat_i > 0)
target[i] = (__logf(1 + __expf(-mat_i)) + mat_i);
else
target[i] = __logf(1 + __expf(mat_i));
}
}
__global__ void kLog(float* mat, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __logf(mat[i] + tiny);
}
__global__ void kSquashRelu(float* mat, float* target, unsigned int len, float lambda) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = 2 / (1 + __expf(-lambda * mat[i])) - 1;
}
__global__ void kExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __expf(mat[i]);
}
__global__ void kCeil(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = ceil(mat[i]);
}
__global__ void kFloor(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = floor(mat[i]);
}
__global__ void kSqrt(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = sqrt(mat[i]);
}
__global__ void kPow(float* mat, float pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = powf(mat[i], pow);
}
__global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = powf(mat[i], pow[i]);
}
__global__ void kCrossEntropy(float* mat, float* p, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = -mat[i] * __logf(p[i] + tiny);
}
__global__ void kCrossEntropyBernoulli(float* mat, float* p, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads)
target[i] = -mat[i] * __logf(p[i] + tiny) - (1 - mat[i]) * __logf(1 - p[i] + tiny);
}
__global__ void kCorrectPreds(float* mat, float* p, float* target, unsigned int len, float cutoff) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads)
target[i] = mat[i] * (p[i] >= cutoff) + (1 - mat[i]) * (p[i] < cutoff);
}
__global__ void kReciprocal(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = 1. / mat[i];
}
__global__ void kBesselRatioActivation(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
float r = mat[i];
target[i] = cyl_bessel_i1f(r) / cyl_bessel_i0f(r);
}
}
__global__ void kBesselRatioActivationContinuedFraction(float* mat, float* target, float order, int num_terms, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
float k = mat[i];
float result = 2 * (order + num_terms) / k;
for(int j = num_terms - 1; j > 0; j--) {
result = 2 * (order + j) / k + 1 / result;
}
target[i] = 1 / result;
}
}
__global__ void kAddColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i % height];
}
}
__global__ void kAddDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + val;
}
}
__global__ void kAddDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + vec[i];
}
}
__global__ void kMultDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * val;
}
}
__global__ void kMultDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * vec[i];
}
}
__global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i / height];
}
}
__global__ void kAddColMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i % height];
}
}
__global__ void kAddToEachPixel(float* mat1, float* mat2, float* tgtMat, float mult, unsigned int width, unsigned int height, unsigned int num_pix) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat1[i] + mult * mat2[i % height + height * (i / (height * num_pix))];
}
}
__global__ void kAddRowMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i / height];
}
}
__global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i % height];
}
}
__global__ void kDivByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i / height];
}
}
__global__ void kDivByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i % height];
}
}
__global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i / height];
}
}
__global__ void kMultByRowVectorScale(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height, float scale_targets) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = scale_targets * tgtMat[i] + mat[i] * vec[i / height];
}
}
__global__ void kAddMultSign(float* a, float* b, unsigned int numEls, float mult) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
a[i] = a[i] + ((b[i] > 0) ? mult : ((b[i] < 0) ? -mult : 0));
}
}
__global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + b[i];
}
}
__global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] - b[i];
}
}
__global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] / b[i];
}
}
__global__ void kMult(float* a, float* b, float* dest, unsigned int numEls, float scale_targets) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
if (scale_targets == 0) {
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i];
}
} else {
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = scale_targets * dest[i] + a[i] * b[i];
}
}
}
__global__ void kCosDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = -a[i] * __sinf(b[i]);
}
}
__global__ void kSinDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * __cosf(b[i]);
}
}
__global__ void kLogisticDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i] * (1.0 - b[i]);
}
}
// target[i] < 0 means don't care.
__global__ void kLogisticGrad(float* mat, float* targets, float* out_grad, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
out_grad[i] = (targets[i] < 0) ? 0 : (mat[i] - targets[i]);
}
}
__global__ void kLogisticCorrectNormalized(float* mat, float* targets, float* out, unsigned int height, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < height) {
float correct = 0;
float total = 0;
float p, t;
for (int i = idx; i < width * height; i += height) {
p = mat[i];
t = targets[i];
correct += (t < 0) ? 0 : (((t >= 0.5 && p >= 0.5) || (t < 0.5 && p < 0.5)) ? 1: 0);
total += (t < 0) ? 0 : 1;
__syncthreads();
}
out[idx] = (total > 0) ? (correct / total) : 0;
}
}
__global__ void kTanhDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (1.0 + b[i]) * (1.0 - b[i]);
}
}
__global__ void kRectifiedLinearDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (b[i] > 0 ? 1 : 0);
}
}
__global__ void kRectifiedLinearSmoothDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (1 - __expf(-b[i]));
}
}
__global__ void kMultScalar(float* mat, float alpha, float* dest, unsigned int len, float scale_targets) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
if (scale_targets == 0) {
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha * mat[i];
}
} else {
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = scale_targets * dest[i] + alpha * mat[i];
}
}
}
__global__ void kAssignScalar(float* dest, float alpha, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha;
}
}
__global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = mat[i] / alpha;
}
}
__global__ void kAddScalar(float* a, float alpha, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + alpha;
}
}
__global__ void kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){
__shared__ int sourceRowIndices[32];
const int startTargetRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startTargetRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
sourceRowIndices[tid] = int(indices[startTargetRowI + tid]);
if (sourceRowIndices[tid]<0)
sourceRowIndices[tid] += nSourceRows;
if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows)
sourceRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kSwapColumns(float* source, float* target, float* indices1, float* indices2, int cols, int width, int height){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float temp;
unsigned int column, row, source_pos, target_pos;
for (unsigned int i = idx; i < height * cols; i += numThreads) {
column = i / height;
row = i % height;
source_pos = height * (int)indices1[column] + row;
target_pos = height * (int)indices2[column] + row;
temp = source[source_pos];
source[source_pos] = target[target_pos];
target[target_pos] = temp;
}
}
__global__ void kShuffleColumns(float* source, float* target, float* indices, int width, int height){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float temp1, temp2;
unsigned int column, row, pos1, pos2;
for (unsigned int i = idx; i < height * ((width+1) / 2); i += numThreads) {
column = 2 * (i / height);
row = i % height;
if (column + 1 >= width) {
pos1 = height * (int)indices[column] + row;
target[pos1] = source[pos1];
} else {
pos1 = height * (int)indices[column] + row;
pos2 = height * (int)indices[column + 1] + row;
temp1 = source[pos1];
temp2 = source[pos2];
target[pos2] = temp1;
target[pos1] = temp2;
}
}
}
__global__ void kSetSelectedRows(float* target, float* source, float* indices, int nRowIs, int nCols, int nTargetRows){
__shared__ int targetRowIndices[32];
const int startSourceRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startSourceRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
targetRowIndices[tid] = int(indices[startSourceRowI + tid]);
if (targetRowIndices[tid]<0)
targetRowIndices[tid] += nTargetRows;
if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nTargetRows)
targetRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int sourceRowI = startSourceRowI + i, targetRowI = targetRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kBlockify(float* source, float* target, int numdims, int blocksize) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
const int off = blockIdx.x * numdims;
for (unsigned int target_ind = idx; target_ind < numdims; target_ind += numThreads) {
const int block = target_ind / blocksize;
target[off + target_ind] = source[off + block * blocksize];
}
}
__global__ void kGenerateTranslationsBigVarOff(float* source, float* target, float* off_x_arr, float* off_y_arr, int source_w, int target_w, int num_channels) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
int target_x, target_y;
int pad = (source_w - target_w)/2;
int target_tile_size = target_w * target_w;
int source_tile_size = source_w * source_w;
int off_x = off_x_arr[blockIdx.x];
int off_y = off_y_arr[blockIdx.x];
int target_off = blockIdx.x * target_tile_size;
int source_off = blockIdx.x * source_tile_size + (pad + off_x) * source_w + (pad + off_y);
for (unsigned int target_ind = idx; target_ind < target_tile_size; target_ind += numThreads) {
target_x = target_ind / target_w;
target_y = target_ind - target_x * target_w;
for (unsigned int ch = 0; ch < num_channels; ch += 1) {
target[num_channels*(target_off + target_x * target_w + target_y) + ch] = source[num_channels*(source_off + target_x * source_w + target_y) + ch];
}
}
}
__global__ void kSoftMaxGrad(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
target[i] = mat[i] - ((int)labels[i / height] == i % height ? 1 : 0);
}
}
__global__ void kSoftMaxGradRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
target[i] = mat[i] - ((int)labels[i % height] == i / height ? 1 : 0);
}
}
__global__ void kHingeQuadraticRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float margin) {
int image_id = blockIdx.x * blockDim.x + threadIdx.x;
if (image_id < height) {
mat += image_id;
target += image_id;
const int correct_label = (int)labels[image_id];
const float correct_label_score = mat[correct_label * height];
float sum = 0;
for (unsigned int i = 0; i < width; i++) {
float diff = margin + mat[i*height] - correct_label_score;
float grad = (diff > 0) ? diff : 0;
target[i*height] = (i == correct_label) ? 0 : grad;
sum += (i == correct_label) ? 0 : grad;
}
target[correct_label * height] = -sum;
}
}
__global__ void kHingeLinearRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float margin) {
int image_id = blockIdx.x * blockDim.x + threadIdx.x;
if (image_id < height) {
mat += image_id;
target += image_id;
const int correct_label = (int)labels[image_id];
const float correct_label_score = mat[correct_label * height];
float sum = 0;
for (unsigned int i = 0; i < width; i++) {
float diff = margin + mat[i*height] - correct_label_score;
float grad = (diff > 0) ? 1 : 0;
target[i*height] = (i == correct_label) ? 0 : grad;
sum += (i == correct_label) ? 0 : grad;
}
target[correct_label * height] = -sum;
}
}
__global__ void kSoftMaxGradCLS(float* mat, int* labels, float* indices, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
target[i] = mat[i] - (labels[(int)indices[i % height]] == i / height ? 1 : 0);
}
}
__global__ void kSoftMaxCrossEntropy(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
target[i] = -__logf(mat[height * i + (int)labels[i]] + tiny);
}
}
__global__ void kSoftMaxCrossEntropyRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < height; i += numThreads) {
target[i] = -__logf(mat[height * (int)labels[i] + i] + tiny);
}
}
__global__ void kSoftMaxCorrect(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[column] = (cur_argmax == (int)labels[column]) ? 1 : 0;
}
}
}
__global__ void kSoftMaxCorrectRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
val = cur_data[i * height];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[row] = (cur_argmax == (int)labels[row]) ? 1 : 0;
}
}
}
__global__ void kSoftMaxCorrectCLS(float* mat, int* labels, float* indices, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
val = cur_data[i * height];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[row] = (cur_argmax == labels[(int)indices[row]]) ? 1 : 0;
}
}
}
__global__ void kSoftMax(float* mat, float* target, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
max_vals[threadIdx.x]=-FLT_MAX;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
}
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
cur_max = max_vals[0] ;
__syncthreads();
val = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val += __expf(cur_data[i]-cur_max);
}
max_vals[threadIdx.x] = val;
reduceToSumLocal32(max_vals, threadIdx.x);
__syncthreads();
float norm = max_vals[0] ;
float *cur_target = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_target[i] = __expf(cur_data[i]-cur_max) / norm ;
}
}
}
__global__ void kSoftMaxOverwrite(float* mat, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
max_vals[threadIdx.x]=-FLT_MAX;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
}
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
cur_max = max_vals[0] ;
__syncthreads();
val = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data[i] = __expf(cur_data[i]-cur_max);
val += cur_data[i];
}
max_vals[threadIdx.x] = val;
reduceToSumLocal32(max_vals, threadIdx.x);
__syncthreads();
float norm = max_vals[0] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data[i] /= norm;
}
}
}
__global__ void kSoftMaxRowMajor(float* mat, unsigned int width, unsigned int height, float* target) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float *cur_data = &mat[row] ;
float *cur_target = &target[row] ;
max_vals[threadIdx.x]=-FLT_MAX;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
val = cur_data[i * height];
if (val > cur_max) {
cur_max = val;
}
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
cur_max = max_vals[0] ;
__syncthreads();
val = 0;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_target[i * height] = __expf(cur_data[i * height]-cur_max);
val += cur_target[i * height];
}
max_vals[threadIdx.x] = val;
reduceToSumLocal32(max_vals, threadIdx.x);
__syncthreads();
float norm = max_vals[0] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_target[i * height] /= norm;
}
}
}
__global__ void kChooseMaxAndAccumulate(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target_data[cur_argmax] += 1;
}
}
}
__global__ void kChooseMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
target[i] = 0;
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target_data[cur_argmax] = 1;
}
}
}
__global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) cur_max = val;
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = max_vals[0];
}
}
__global__ void kArgMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[column] = cur_argmax;
}
}
}
__global__ void kSqSumColumnwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i] * cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = p * target[column] + mult * sum_vals[0];
}
}
__global__ void kSumColumnwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = p * target[column] + mult * sum_vals[0];
}
}
__global__ void kSumAll(float* mat, float* target, unsigned int len, unsigned int len_per_block, unsigned int left_over) {
extern __shared__ float sum_vals[];
float cur_sum = 0;
int block_id = blockIdx.x;
mat += block_id * len_per_block + (block_id < left_over ? block_id : left_over);
int l = len_per_block + (block_id < left_over ? 1 : 0);
__syncthreads();
for (unsigned int i = threadIdx.x; i < l; i += blockDim.x) {
cur_sum += mat[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal<NUM_VECTOR_OP_THREADS_PER_BLOCK>(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[block_id] = sum_vals[0];
}
__global__ void kSqSumRowwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float cur_sum = 0;
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_sum += cur_data[i * height] * cur_data[i * height];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[row] = p * target[row] + mult * sum_vals[0];
}
}
// Works well when number of rows is large.
__global__ void kSumRowwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int row = (gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x + threadIdx.x;
if (row < height) {
float sum = 0;
float *data = mat + row;
for (unsigned int i = 0; i < width; i++) sum += data[i*height];
__syncthreads();
target[row] = p * target[row] + mult * sum;
}
}
__global__ void kNormLimitColumnwise(float* mat, float* target, float norm, unsigned int width, unsigned int height, int constraint) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i] * cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sqrt(sum_vals[0]);
cur_sum = (constraint == 1 || cur_sum > norm) ? (norm / cur_sum) : 1;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
target_data[i] = cur_data[i] * cur_sum;
}
__syncthreads();
}
}
__global__ void kNormalizeColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sum_vals[0] / height;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
target_data[i] = cur_data[i] - cur_sum;
}
__syncthreads();
}
}
__global__ void kNormLimitRowwise(float* mat, float* target, float norm, unsigned int width, unsigned int height, int constraint) {
extern __shared__ float sum_vals[];
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float cur_sum = 0;
float *cur_data = &mat[row] ;
float *target_data = &target[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_sum += cur_data[i * height] * cur_data[i * height];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sqrt(sum_vals[0]);
cur_sum = (constraint == 1 || cur_sum > norm) ? (norm / cur_sum) : 1;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
target_data[i * height] = cur_data[i * height] * cur_sum;
}
__syncthreads();
}
}
__global__ void kNormalizeRowwiseBprop(float* deriv, float* input, float* target, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float cur_sum = 0, cur_sum2 = 0;
float *cur_data = &input[row] ;
float *cur_data2 = &deriv[row] ;
float *target_data = &target[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
float x = cur_data[i * height];
float d = cur_data2[i * height];
cur_sum += x * x;
cur_sum2 += x * d;
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
float sigma_sq = sum_vals[0];
float sigma = sqrt(sigma_sq);
sum_vals[threadIdx.x] = cur_sum2;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
float xd_stats = sum_vals[0] / (sigma_sq * sigma);
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
target_data[i * height] = cur_data2[i * height] / sigma - cur_data[i * height] * xd_stats;
}
__syncthreads();
}
}
__global__ void kExpand(float* source, float* indices, float* target, int height, int width, int target_width){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < target_width*height; i += numThreads) {
const int pos = height * (int)indices[i / height] + i % height;
target[i] = (pos < height * width)? source[pos] : 1.0/0.0 - 1.0/0.0;
}
}
__global__ void kExpandAndAdd(float* source, float* mat, float* indices, float* target, int width, int height, float mult, int width2){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
const int pos = height * (int)indices[i / height] + i % height;
target[i] = (pos < height * width2)? source[i] + mult * mat[pos] : 1.0/0.0 - 1.0/0.0;
}
}
__global__ void kAccumulateColumns(float* mat, float* indices, float* target, int mat_width, int target_width, int height, float mult, int avg){
const int row = gridDim.x * blockIdx.y + blockIdx.x;
const int column = threadIdx.x;
if (row < height && column < target_width) {
float cur_sum = 0.0;
unsigned int count = 0;
for (unsigned int i = 0; i < mat_width; i ++) {
count += ((int)indices[i] == column) ? 1 : 0 ;
cur_sum += ((int)indices[i] == column) ? mat[row + i * height] : 0 ;
}
target[row + height * column] = mult * cur_sum / ((avg == 1 && count > 0) ? count : 1);
}
}
__global__ void kExtractPatches(float* images, float* patches, float* indices, float* width_offset, float* height_offset, int num_images, int img_width, int img_height, int patch_width, int patch_height, int num_colors) {
const unsigned long idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned long numThreads = blockDim.x * gridDim.x;
const unsigned long total_pixels = patch_width * patch_height * num_colors * num_images;
unsigned long ind, pos;
unsigned long image_id, dest_row, dest_col, color, source_row, source_col;
for (unsigned long i = idx; i < total_pixels; i += numThreads) {
ind = i;
image_id = ind % num_images; ind /= num_images;
dest_col = ind % patch_width; ind /= patch_width;
dest_row = ind % patch_height; ind /= patch_height;
color = ind % num_colors;
source_row = int(height_offset[image_id]) + dest_row;
source_col = int(width_offset[image_id]) + dest_col;
//pos = img_width * img_height * num_colors * (int)indices[image_id] + img_width * img_height * color + img_width * source_row + source_col;
pos = source_col + img_width * (source_row + img_height * (color + num_colors * (int)indices[image_id]));
patches[i] = images[pos];
}
}
__global__ void kExtractPatches2(float* images, float* patches, float* width_offset, float* height_offset, float* flip, int num_images, int img_width, int img_height, int patch_width, int patch_height, int num_colors) {
int image_id = blockIdx.z % num_images;
int color = blockIdx.z / num_images;
int dest_col = blockIdx.x * blockDim.x + threadIdx.x;
int dest_row = blockIdx.y * blockDim.y + threadIdx.y;
if (dest_col < patch_width && dest_row < patch_height) {
int source_row = int(height_offset[image_id]) + dest_row;
int source_col = int(width_offset[image_id]) + dest_col;
source_col = (flip[image_id] > 0.5) ? (img_width - source_col - 1) : source_col;
unsigned long dest_index = image_id + num_images * (dest_col + patch_width * (dest_row + patch_height * color));
unsigned long source_index = source_col + img_width * (source_row + img_height * (color + num_colors * image_id));
patches[dest_index] = images[source_index];
}
}
__global__ void kExtractPatches3(float* images, float* patches,
float* width_offset, float* height_offset, float* flip,
int num_images, int img_width, int img_height,
int patch_width, int patch_height, int num_colors) {
int dest_col = blockIdx.x * blockDim.x + threadIdx.x;
int dest_row = blockIdx.y * blockDim.y + threadIdx.y;
if (dest_col < patch_width && dest_row < patch_height) {
for (unsigned int b = blockIdx.z; b < num_colors * num_images; b += gridDim.z) {
int color = b % num_colors;
int image_id = b / num_colors;
int source_row = int(height_offset[image_id]) + dest_row;
int source_col = int(width_offset[image_id]) + dest_col;
source_col = (flip[image_id] > 0.5) ? (img_width - source_col - 1) : source_col;
unsigned long source_index = source_col + img_width * (source_row + img_height * (color + num_colors * image_id));
unsigned long dest_index = dest_col + patch_width * (dest_row + patch_height * (color + num_colors * image_id));
__syncthreads();
patches[dest_index] = images[source_index];
}
}
}
__global__ void kCapsulify(float* images, float* output, int image_size, int crop_size, int num_images) {
unsigned int image_id = blockIdx.z;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < image_size && col < image_size && image_id < num_images) {
images += image_id * image_size * image_size;
output += image_id * image_size * image_size;
unsigned int source_index = row * image_size + col;
unsigned int capsule_id = (row / crop_size) * (image_size / crop_size) + (col / crop_size);
unsigned int within_capsule_index = (row % crop_size) * crop_size + (col % crop_size);
unsigned int dest_index = capsule_id * crop_size * crop_size + within_capsule_index;
output[dest_index] = images[source_index];
}
}
__global__ void kRectifyBoundingBox(
float* boxes, float* width_offset, float* height_offset, float* flip,
int num_images, int patch_width, int patch_height, int num_locs) {
for (int loc_id = blockIdx.x; loc_id < num_locs; loc_id += gridDim.x) {
float *xmin_block = boxes + num_images * loc_id,
*ymin_block = boxes + num_images * (loc_id + num_locs),
*xmax_block = boxes + num_images * (loc_id + num_locs * 2),
*ymax_block = boxes + num_images * (loc_id + num_locs * 3);
for (int image_id = threadIdx.x; image_id < num_images; image_id += blockDim.x) {
float xmin = (flip[image_id] > 0.5) ? (256.0/patch_width - xmax_block[image_id]) : xmin_block[image_id],
xmax = (flip[image_id] > 0.5) ? (256.0/patch_width - xmin_block[image_id]) : xmax_block[image_id],
ymin = ymin_block[image_id],
ymax = ymax_block[image_id],
wo = width_offset[image_id],
ho = height_offset[image_id];
xmin_block[image_id] = xmin - wo / patch_width;
xmax_block[image_id] = xmax - wo / patch_width;
ymin_block[image_id] = ymin - ho / patch_height;
ymax_block[image_id] = ymax - ho / patch_height;
}
}
}
__global__ void kAdagrad(float *history, float *grad, float delta, int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
float curr_norm = history[i] - delta;
history[i] = delta + sqrt(curr_norm * curr_norm + grad[i] * grad[i]);
}
}
__global__ void kRMSProp(float *history, float *grad, float factor, int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
history[i] = sqrt(factor * history[i] * history[i] + (1-factor) * grad[i] * grad[i]);
}
}
__global__ void kBoundingBoxLogisticGrad(
float* mat, int* bbox, int* label, int* seg, float* indices, float* width_offset, float* height_offset,
int size, int width, int height, int depth, float scale_width, float scale_height, float* grad) {
const int color = blockIdx.z;
/*
const int numXBlocksPerImage = DIVUP(width, blockDim.x);
const int image_id = blockIdx.x / numXBlocksPerImage;
const int col = (blockIdx.x % numXBlocksPerImage) * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
*/
const int image_id = threadIdx.x;
const int col = blockIdx.x;
const int row = blockIdx.y;
int num_bboxes = 0, num_bboxes_of_this_depth = 0, num_bboxes_of_this_depth_inside = 0;
if (col < width && row < height && image_id < size && color < depth) {
int src_image_id = (int)indices[image_id];
int src_col = (int)(scale_width * col);
int src_row = (int)(scale_height * row);
int start = seg[src_image_id];
int end = seg[src_image_id + 1];
int x1, y1, x2, y2, l, inside;
for (int box_id = start; box_id < end; box_id++) {
l = label[box_id];
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (src_col >= x1 && src_col <= x2 && src_row >= y1 && src_row <= y2) ? 1:0;
num_bboxes += inside;
num_bboxes_of_this_depth += (l == color) ? 1: 0;
num_bboxes_of_this_depth_inside += (inside == 1 && l == color) ? 1: 0;
}
}
unsigned long i = image_id + size * (col + width * (row + height * color));
__syncthreads();
if (col < width && row < height && image_id < size && color < depth) {
if (num_bboxes > 0) {
grad[i] = (num_bboxes_of_this_depth_inside > 0) ? (mat[i] - 1) : 0;
} else {
grad[i] = (num_bboxes_of_this_depth > 0) ? mat[i] : 0;
}
}
}
__global__ void kLogisticCorrectBoundingBox(
float* mat, int* bbox, int* label, int* seg, float* indices,
float* width_offset, float* height_offset, int size, int width, int height,
int depth, float scale_width, float scale_height, float* target, float cutoff) {
const int color = blockIdx.z;
const int numXBlocksPerImage = DIVUP(width, blockDim.x);
const int image_id = blockIdx.x / numXBlocksPerImage;
const int col = (blockIdx.x % numXBlocksPerImage) * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < width && row < height && image_id < size && color < depth) {
int src_image_id = (int)indices[image_id];
int src_col = (int)(scale_width * col);
int src_row = (int)(scale_height * row);
int start = seg[src_image_id];
int end = seg[src_image_id + 1];
int x1, y1, x2, y2, l, inside;
int num_bboxes = 0, num_bboxes_of_this_depth = 0, num_bboxes_of_this_depth_inside = 0;
for (int box_id = start; box_id < end; box_id++) {
l = label[box_id];
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (src_col >= x1 && src_col <= x2 && src_row >= y1 && src_row <= y2) ? 1:0;
num_bboxes += inside;
num_bboxes_of_this_depth += (l == color) ? 1: 0;
num_bboxes_of_this_depth_inside += (inside == 1 && l == color) ? 1: 0;
}
unsigned long i = image_id + size * (col + width * (row + height * color));
if (num_bboxes > 0) {
target[i] = (num_bboxes_of_this_depth_inside > 0 && mat[i] >= cutoff) ? 1 : 0;
} else {
target[i] = (num_bboxes_of_this_depth > 0 && mat[i] < cutoff) ? 1 : 0;
}
}
}
__global__ void kBoundingBoxSoftMaxGrad(
float* mat, int* bbox, int* label, int* seg, float* indices, float* width_offset, float* height_offset,
int size, int width, int height, int depth, float scale_width, float scale_height, float* grad) {
const unsigned int len = width * height * depth * size;
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
int ind, image_id, source_depth, x1, y1, x2, y2, start,
end, src_image_id, num_bboxes, num_bboxes_of_this_depth, box_id, inside;
float source_x, source_y;
for (unsigned int i = idx; i < len; i += numThreads) {
ind = i;
image_id = ind % size; ind /= size;
source_x = scale_width * (ind % width); ind /= width;
source_y = scale_height * (ind % height); ind /= height;
source_depth = ind % depth;
src_image_id = (int)indices[image_id];
start = seg[src_image_id];
end = seg[src_image_id + 1];
num_bboxes = 0;
num_bboxes_of_this_depth = 0;
for (box_id = start; box_id < end; box_id++) {
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (source_x >= x1 && source_x <= x2 && source_y >= y1 && source_y <= y2) ? 1:0;
num_bboxes += inside;
num_bboxes_of_this_depth += (inside == 1 && label[box_id] == source_depth) ? 1: 0;
}
grad[i] = mat[i] - ((num_bboxes > 0) ? ((float)num_bboxes_of_this_depth / num_bboxes) : (source_depth == 0 ? 1:0));
}
}
__global__ void kSoftMaxCorrectBoundingBox(
float* mat, int* bbox, int* label, int* seg, float* indices,
float* width_offset, float* height_offset, int size, int width, int height,
int depth, float scale_width, float scale_height, float* target) {
const int row = gridDim.x * blockIdx.y + blockIdx.x;
const int num_pixels = size * width * height;
if (row < num_pixels) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < depth; i += blockDim.x) {
val = cur_data[i * num_pixels];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
int ind, image_id, src_image_id, x1, y1, x2, y2, start,
end, num_bboxes, correct, box_id, inside;
float source_x, source_y;
ind = row;
image_id = ind % size; ind /= size;
source_x = scale_width * (ind % width); ind /= width;
source_y = scale_height * (ind % height); ind /= height;
src_image_id = (int)indices[image_id];
start = seg[src_image_id];
end = seg[src_image_id + 1];
num_bboxes = 0;
correct = 0;
for (box_id = start; box_id < end; box_id++) {
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (source_x >= x1 && source_x <= x2 && source_y >= y1 && source_y <= y2) ? 1:0;
num_bboxes += inside;
correct += (inside == 1 && cur_argmax == label[box_id]) ? 1 : 0;
}
target[row] = (num_bboxes > 0) ? ((correct > 0) ? 1 : 0) : ((cur_argmax == 0) ? 1: 0);
}
}
}
__global__ void kLSTMFprop(float *s_in, float* s_out, float* w_diag, float* b, int numcases, int num_lstms, bool init, bool use_relu) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int numEls = numcases * num_lstms;
if (idx < numEls) {
const unsigned int numThreads = blockDim.x * gridDim.x;
float *h_out = s_out,
*c_out = s_out + numEls,
*i_out = s_out + 2 * numEls,
*f_out = s_out + 3 * numEls,
*a_out = s_out + 4 * numEls,
*o_out = s_out + 5 * numEls;
float *c_in = s_in + 1 * numEls;
float *w_i = w_diag,
*w_f = w_diag + num_lstms,
*w_o = w_diag + 2 * num_lstms;
float *b_i = b,
*b_f = b + num_lstms,
*b_a = b + 2 * num_lstms,
*b_o = b + 3 * num_lstms;
float i, f, a, o, c, h;
for (unsigned int p = idx; p < numEls; p += numThreads) {
int j = p / numcases;
i = i_out[p];
f = f_out[p];
a = a_out[p];
o = o_out[p];
c = init ? 0 : c_in[p];
i = sigmoid(i + c * w_i[j] + b_i[j]);
f = sigmoid(f + c * w_f[j] + b_f[j]);
a = use_relu ? relu(a + b_a[j]) : tanh(a + b_a[j]);
c = c * f + i * a;
o = sigmoid(o + c * w_o[j] + b_o[j]);
h = o * (use_relu ? c : tanh(c)); // relu(c) = c, because c is always +ve here.
__syncthreads();
i_out[p] = i;
f_out[p] = f;
a_out[p] = a;
o_out[p] = o;
c_out[p] = c;
h_out[p] = h;
}
}
}
__global__ void kLSTMBprop(float *s_in, float* s_out, float* d_in, float* d_out, float* w_diag, int numcases, int num_lstms, bool init, bool use_relu) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int numEls = numcases * num_lstms;
if (idx < numEls) {
const unsigned int numThreads = blockDim.x * gridDim.x;
float *s_c_out = s_out + numEls,
*s_i_out = s_out + 2 * numEls,
*s_f_out = s_out + 3 * numEls,
*s_a_out = s_out + 4 * numEls,
*s_o_out = s_out + 5 * numEls;
float *s_c_in = s_in + 1 * numEls;
float *d_h_out = d_out,
*d_c_out = d_out + numEls,
*d_i_out = d_out + 2 * numEls,
*d_f_out = d_out + 3 * numEls,
*d_a_out = d_out + 4 * numEls,
*d_o_out = d_out + 5 * numEls;
float *d_c_in = d_in + 1 * numEls;
float *w_i = w_diag,
*w_f = w_diag + num_lstms,
*w_o = w_diag + 2 * num_lstms;
float i, f, a, o, c,
grad_i, grad_f, grad_a, grad_o, grad_c, grad_h,
c_old, tanhc;
for (unsigned int p = idx; p < numEls; p += numThreads) {
int j = p / numcases;
grad_h = d_h_out[p];
grad_c = d_c_out[p];
i = s_i_out[p];
f = s_f_out[p];
a = s_a_out[p];
o = s_o_out[p];
c = s_c_out[p];
c_old = init ? 0 : s_c_in[p];
tanhc = use_relu ? c : tanh(c);
grad_o = grad_h * tanhc * deriv_of_sigmoid(o);
grad_c += grad_o * w_o[j] + grad_h * o * (use_relu ? deriv_of_relu(tanhc) : deriv_of_tanh(tanhc));
grad_a = grad_c * i * (use_relu ? deriv_of_relu(a) : deriv_of_tanh(a));
grad_i = grad_c * a * deriv_of_sigmoid(i);
grad_f = grad_c * c_old * deriv_of_sigmoid(f);
grad_c = grad_c * f + grad_f * w_f[j] + grad_i * w_i[j];
__syncthreads();
d_i_out[p] = grad_i;
d_f_out[p] = grad_f;
d_o_out[p] = grad_o;
d_a_out[p] = grad_a;
if (!init) d_c_in[p] = grad_c;
}
}
}
__global__ void kLSTMOutp(float* s_in, float* s_out, float* d_out, float* dw_diag, float* db, int numcases, int num_lstms, bool init) {
extern __shared__ float sum_vals[];
const int lstm_id = gridDim.x * blockIdx.y + blockIdx.x;
if (lstm_id < num_lstms) {
float* d_i = d_out + numcases * (num_lstms * 2 + lstm_id);
float* d_f = d_out + numcases * (num_lstms * 3 + lstm_id);
float* d_a = d_out + numcases * (num_lstms * 4 + lstm_id);
float* d_o = d_out + numcases * (num_lstms * 5 + lstm_id);
float* s_c = s_out + numcases * (num_lstms * 1 + lstm_id);
float* s_c_old = s_in + numcases * (num_lstms * 1 + lstm_id);
float dwi = 0, dwf = 0, dwo = 0, dbi = 0, dbf = 0, dba = 0, dbo = 0;
float c_old, grad_i, grad_f, grad_a, grad_o;
for (unsigned int i = threadIdx.x; i < numcases; i += blockDim.x) {
c_old = init ? 0 : s_c_old[i];
grad_i = d_i[i];
grad_f = d_f[i];
grad_a = d_a[i];
grad_o = d_o[i];
dwi += c_old * grad_i;
dwf += c_old * grad_f;
dwo += s_c[i] * grad_o;
dbi += grad_i;
dbf += grad_f;
dba += grad_a;
dbo += grad_o;
}
sum_vals[threadIdx.x] = dwi;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) dw_diag[lstm_id] += sum_vals[0];
sum_vals[threadIdx.x] = dwf;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) dw_diag[lstm_id + num_lstms] += sum_vals[0];
sum_vals[threadIdx.x] = dwo;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) dw_diag[lstm_id + num_lstms * 2] += sum_vals[0];
sum_vals[threadIdx.x] = dbi;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id] += sum_vals[0];
sum_vals[threadIdx.x] = dbf;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id + num_lstms] += sum_vals[0];
sum_vals[threadIdx.x] = dba;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id + num_lstms * 2] += sum_vals[0];
sum_vals[threadIdx.x] = dbo;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id + num_lstms * 3] += sum_vals[0];
}
}
__global__ void kBNBprop(float* d, float* x, float* gamma, float* mu, float* sigma,
float* target, unsigned int width, unsigned int height, float scale_targets) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float mu_val = mu[column];
float sigma_val = sigma[column];
float gamma_val = gamma[column];
__syncthreads();
float *cur_x = &x[column * height] ;
float *cur_d = &d[column * height] ;
float *cur_target = &target[column * height] ;
float cur_sum = 0, cur_sum2 = 0, val;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += (cur_x[i] - mu_val) * cur_d[i];
}
sum_vals[threadIdx.x] = cur_sum / ((height - 1) * sigma_val * sigma_val);
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sum_vals[0];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = gamma_val * (cur_d[i] - (cur_x[i] - mu_val) * cur_sum) / sigma_val;
cur_sum2 += val;
cur_target[i] = scale_targets * cur_target[i] + val;
}
sum_vals[threadIdx.x] = cur_sum2 / height;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sum_vals[0];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_target[i] -= cur_sum;
}
__syncthreads();
}
}
__global__ void kBNBpropInplace(float* d, float* y, float* dgamma, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data1 = &d[column * height];
float *cur_data2 = &y[column * height];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data1[i] * cur_data2[i];
}
sum_vals[threadIdx.x] = cur_sum / height;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
float stat = sum_vals[0];
if (threadIdx.x == 0) dgamma[column] = stat;
__syncthreads();
cur_sum = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data1[i] -= stat * cur_data2[i];
cur_sum += cur_data1[i];
}
sum_vals[threadIdx.x] = cur_sum / height;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
stat = sum_vals[0];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data1[i] -= stat;
}
}
}
__global__ void kBNGrad(float* d, float* x, float* mu, float* sigma,
float* dgamma, float* dbeta, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float mu_val = mu[column];
float sigma_val = sigma[column];
__syncthreads();
float *cur_x = &x[column * height] ;
float *cur_d = &d[column * height] ;
float z, d, sum_gamma = 0, sum_beta = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
z = (cur_x[i] - mu_val) / sigma_val;
d = cur_d[i];
sum_gamma += z * d;
sum_beta += d;
}
sum_vals[threadIdx.x] = sum_gamma; reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads();
if (threadIdx.x == 0) dgamma[column] = sum_vals[0];
__syncthreads();
sum_vals[threadIdx.x] = sum_beta; reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads();
if (threadIdx.x == 0) dbeta[column] = sum_vals[0];
__syncthreads();
}
}
__global__ void kLSTMFprop2Init(float *gates, float* cell, float* output, float* w, int num_lstms, int num_cases) {
const unsigned int case_id = blockIdx.y;
const unsigned int lstm_id = blockIdx.x * blockDim.x + threadIdx.x;
if (lstm_id < num_lstms && case_id < num_cases) {
gates += case_id * num_lstms * 4;
cell += case_id * num_lstms;
output += case_id * num_lstms;
float w_co = w[lstm_id + 2 * num_lstms];
float i = gates[lstm_id],
f = gates[lstm_id + num_lstms],
a = gates[lstm_id + num_lstms * 2],
o = gates[lstm_id + num_lstms * 3];
i = sigmoid(i);
f = sigmoid(f);
a = tanh(a);
float c = i * a;
o = sigmoid(o + c * w_co);
float r = tanh(c) * o;
gates[lstm_id] = i;
gates[lstm_id + num_lstms] = f;
gates[lstm_id + 2 * num_lstms] = a;
gates[lstm_id + 3 * num_lstms] = o;
cell[lstm_id] = c;
output[lstm_id] = r;
}
}
__global__ void kLSTMFprop2(float *gates, float* cell_prev, float* cell, float* output, float* w, int num_lstms, int num_cases) {
const unsigned int case_id = blockIdx.y;
const unsigned int lstm_id = blockIdx.x * blockDim.x + threadIdx.x;
if (lstm_id < num_lstms && case_id < num_cases) {
gates += case_id * num_lstms * 4;
cell_prev += case_id * num_lstms;
cell += case_id * num_lstms;
output += case_id * num_lstms;
float w_ci = w[lstm_id],
w_cf = w[lstm_id + num_lstms],
w_co = w[lstm_id + 2 * num_lstms];
float i = gates[lstm_id],
f = gates[lstm_id + num_lstms],
a = gates[lstm_id + num_lstms * 2],
o = gates[lstm_id + num_lstms * 3],
c = cell_prev[lstm_id];
i = sigmoid(i + c * w_ci);
f = sigmoid(f + c * w_cf);
a = tanh(a);
c = c * f + i * a;
o = sigmoid(o + c * w_co);
float r = tanh(c) * o;
gates[lstm_id] = i;
gates[lstm_id + num_lstms] = f;
gates[lstm_id + 2 * num_lstms] = a;
gates[lstm_id + 3 * num_lstms] = o;
cell[lstm_id] = c;
output[lstm_id] = r;
}
}
__global__ void kLSTMBprop2Init(float *gates, float* gates_deriv, float* cell, float* cell_deriv,
float* output_deriv, float* w, int num_lstms, int num_cases) {
const unsigned int case_id = blockIdx.y;
const unsigned int lstm_id = blockIdx.x * blockDim.x + threadIdx.x;
if (lstm_id < num_lstms && case_id < num_cases) {
gates += case_id * num_lstms * 4;
gates_deriv += case_id * num_lstms * 4;
cell += case_id * num_lstms;
cell_deriv += case_id * num_lstms;
output_deriv += case_id * num_lstms;
float w_co = w[lstm_id + 2 * num_lstms];
float i = gates[lstm_id],
a = gates[lstm_id + num_lstms * 2],
o = gates[lstm_id + num_lstms * 3],
c = cell[lstm_id],
c_d = cell_deriv[lstm_id],
r_d = output_deriv[lstm_id];
float tanhc = tanh(c);
float o_d = r_d * tanhc * deriv_of_sigmoid(o);
c_d += o * r_d * deriv_of_tanh(tanhc) + o_d * w_co;
float a_d = c_d * i * deriv_of_tanh(a);
float i_d = c_d * a * deriv_of_sigmoid(i);
gates_deriv[lstm_id] = i_d;
gates_deriv[lstm_id + num_lstms] = 0;
gates_deriv[lstm_id + 2 * num_lstms] = a_d;
gates_deriv[lstm_id + 3 * num_lstms] = o_d;
cell_deriv[lstm_id] = c_d;
}
}
__global__ void kLSTMBprop2(float *gates, float* gates_deriv, float* cell_prev, float* cell_prev_deriv,
float* cell, float* cell_deriv, float* output_deriv, float* w, int num_lstms, int num_cases) {
const unsigned int case_id = blockIdx.y;
const unsigned int lstm_id = blockIdx.x * blockDim.x + threadIdx.x;
if (lstm_id < num_lstms && case_id < num_cases) {
gates += case_id * num_lstms * 4;
gates_deriv += case_id * num_lstms * 4;
cell += case_id * num_lstms;
cell_deriv += case_id * num_lstms;
cell_prev += case_id * num_lstms;
cell_prev_deriv += case_id * num_lstms;
output_deriv += case_id * num_lstms;
float w_ci = w[lstm_id],
w_cf = w[lstm_id + num_lstms],
w_co = w[lstm_id + 2 * num_lstms];
float i = gates[lstm_id],
f = gates[lstm_id + num_lstms],
a = gates[lstm_id + num_lstms * 2],
o = gates[lstm_id + num_lstms * 3],
c = cell[lstm_id],
c_d = cell_deriv[lstm_id],
c_p = cell_prev[lstm_id],
r_d = output_deriv[lstm_id];
float tanhc = tanh(c);
float o_d = r_d * tanhc * deriv_of_sigmoid(o);
c_d += o * r_d * deriv_of_tanh(tanhc) + o_d * w_co;
float a_d = c_d * i * deriv_of_tanh(a);
float i_d = c_d * a * deriv_of_sigmoid(i);
float f_d = c_d * c_p * deriv_of_sigmoid(f);
float c_p_d = c_d * f + i_d * w_ci + f_d * w_cf;
gates_deriv[lstm_id] = i_d;
gates_deriv[lstm_id + num_lstms] = f_d;
gates_deriv[lstm_id + 2 * num_lstms] = a_d;
gates_deriv[lstm_id + 3 * num_lstms] = o_d;
cell_deriv[lstm_id] = c_d;
cell_prev_deriv[lstm_id] = c_p_d;
}
}
__global__ void kCapsuleActivation(float* h, float* l, float* s, float* output, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
// Compute length.
float cur_sum = 0;
float *cur_data = &h[column * height];
float *cur_data_output = &output[column * height];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i] * cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) {
cur_sum = sqrt(sum_vals[0]); // length.
float f = cyl_bessel_i1f(cur_sum) / cyl_bessel_i0f(cur_sum); // Apply activation.
l[column] = cur_sum;
s[column] = f;
sum_vals[0] = f / cur_sum;
}
__syncthreads();
cur_sum = sum_vals[0];
// Scale the data.
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data_output[i] = cur_data[i] * cur_sum;
}
}
}
__global__ void kBpropCapsuleActivation(float* d, float* y, float* l, float* s, float* output_d,
float sparsity_cost, float sparsity_scale, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0.0f;
float *cur_data_d = &d[column * height];
float *cur_data_d_output = &d[column * height];
float *cur_data_y = &y[column * height];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data_d[i] * cur_data_y[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) {
float f = s[column];
float length = l[column];
float d_f = 1.0f - f * f - f / length;
cur_sum = (sum_vals[0] / f) * (d_f / f - 1.0f / length);
cur_sum += sparsity_cost * length / (f * (sparsity_scale * sparsity_scale + length * length));
sum_vals[0] = cur_sum;
sum_vals[1] = f / length;
}
__syncthreads();
cur_sum = sum_vals[0];
float scale = sum_vals[1];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data_d_output[i] = cur_sum * cur_data_y[i] + scale * cur_data_d[i];
}
}
}
__global__ void kSampleVMF(unsigned int* rndMults, unsigned long long* rndWords, float* kappa, float* target, int n, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rand;
for (unsigned int i = idx; i < len; i += NUM_RND_STREAMS) {
float k = kappa[i];
float m = (n - 1.0f) / 2.0f;
float b = (-k + sqrtf(k * k + m * m)) / m;
float x = (1 - b) / (1 + b);
float c = k * x + 2 * m * __logf(1 - x * x);
float w;
float accept_val = -1.0f;
int counter = 0;
while (accept_val < 0) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rand = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
float theta = rand * PI;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rand = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
float s = rand;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rand = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
float r = sqrtf(s);
float u = r * __cosf(theta), v = r * __sinf(theta);
float z = 0.5 + ((u * v) / s) * ((n > 2) ? sqrtf(1 - powf(s, 1.0f / (m - 0.5))) : 1.0);
w = (1 - (1 + b) * z) / (1 - (1 - b) * z);
w = (w < -1) ? -1 : ((w > 1) ? 1 : w);
accept_val = k * w + 2 * m * __logf(1 - x * w + tiny) - c - __logf(rand + tiny);
if (++counter > 100) {
w = 1.0f;
break;
}
}
__syncthreads();
target[i] = w;
}
rndWords[idx] = rndWord;
}
|
100f7ddafce10989cfa52b3aeeb2db6a94915dad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define epsilon (float)1e-5
// Thread block size
#define NB 32
// Forward declaration
void randomInit (float*, int);
void MatMul_cpu (const float *, const float *, float *, int );
void MatMul_gpu (const float *, const float *, float *, int );
__global__ void MatMul_kernel(float *, float *, float *, int);
int main(int argc, char** argv) {
// Matrix dimensions: N x N
// Matrix dimensions are assumed to be multiples of NB
int N = 32*NB;
// matrices on the host
float *h_A, *h_B;
// results on host
float *cpu_result;
float *gpu_result;
// size in bytes
size_t size = N*N * sizeof(float);
// allocate matrices on the host
h_A = (float *) malloc(size * sizeof(float));
h_B = (float *) malloc(size * sizeof(float));
// init matrices
randomInit(h_A, N*N);
randomInit(h_B, N*N);
// allocate matrices to compare the results CPU/GPU
cpu_result = (float *) malloc(size * sizeof(float));
gpu_result = (float *) malloc(size * sizeof(float));
// compute on GPU
MatMul_gpu (h_A, h_B, gpu_result, N);
// compute on CPU
MatMul_cpu (h_A, h_B, cpu_result, N);
// check results
int error = 0;
for(int i=0; i<N*N; i++) {
float cpu_value = cpu_result[i];
if(fabs(cpu_value - gpu_result[i])> epsilon*cpu_value)
error++;
}
if(error==0)
printf("\nTEST PASSED\n");
else
printf("\n\nTEST FAILED: number of errors: %d\n", error);
free(h_A);
free(h_B);
free(cpu_result);
free(gpu_result);
}
// Matrices are stored in row-major order:
// M(row, col) = *(M + row * N + col)
__device__ int get_offset (int idx_i, int idx_j, int N) {
return idx_i * N * NB + idx_j * NB;
}
void MatMul_gpu(const float *h_A, const float *h_B, float *h_C, int N) {
hipEvent_t start, stop;
size_t size = N*N * sizeof(float);
float *d_A, *d_B, *d_C;
// Load A and B to device memory
hipMalloc((void **)&d_A, size);
hipMalloc((void **)&d_B, size);
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Allocate C in device memory
hipMalloc((void **)&d_C, size);
// Grid specify
dim3 dimBlock (NB, NB);
dim3 dimGrid (N / dimBlock.x, N / dimBlock.x);
hipEventCreate(&start);
hipEventCreate(&stop);
// Start timing
hipEventRecord(start);
// Invoke kernel
hipLaunchKernelGGL(( MatMul_kernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
// End timing
hipEventRecord(stop);
hipEventSynchronize(stop);
float gpu_time;
hipEventElapsedTime(&gpu_time, start, stop);
double time_sec = gpu_time/1000.0;
double num_ops = 2.0 * (double) N * (double) N * (double) N;
double gflops = 1.0e-9 * num_ops/time_sec;
printf("CUDA Gflops = %.4f , Time = %.5f s dim=%d\n", gflops, time_sec, N);
// Read C from device memory
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
// Matrix multiplication kernel called by MatMul_gpu()
__global__ void MatMul_kernel(float *A, float *B, float *C, int N) {
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[NB][NB];
__shared__ float Bs[NB][NB];
// Block row and column
int ib = blockIdx.y;
int jb = blockIdx.x;
// Thread row and column within Csub
int it = threadIdx.y;
int jt = threadIdx.x;
int a_offset, b_offset, c_offset;
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0.0f;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub.
// Multiply each pair of sub-matrices together
// and accumulate the results.
for (int kb = 0; kb < (N / NB); ++kb) {
// Get the starting address (a_offset) of Asub
// (sub-matrix of A of dimension NB x NB)
// Asub is located i_block sub-matrices to the right and
// k_block sub-matrices down from the upper-left corner of A
a_offset = get_offset (ib, kb, N);
// Get the starting address (b_offset) of Bsub
b_offset = get_offset (kb, jb, N);
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
// ---------------- //
// INSERT CUDA CODE //
// ---------------- //
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
// ---------------- //
// INSERT CUDA CODE //
// ---------------- //
// Multiply As and Bs together
for (int k = 0; k < NB; ++k) {
// ---------------- //
// INSERT CUDA CODE //
// ---------------- //
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
// ---------------- //
// INSERT CUDA CODE //
// ---------------- //
}
c_offset = get_offset (ib, jb, N);
// Each thread block computes one sub-matrix Csub of C
// ---------------- //
// INSERT CUDA CODE //
// ---------------- //
}
void randomInit(float* data, int size) {
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void MatMul_cpu (const float *A, const float *B, float *C, int N) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
float value = 0.0f;
for (int k = 0; k < N; k++) {
value += A[i*N+k] * B[k*N+j];
}
C[i*N + j] = value;
}
}
}
| 100f7ddafce10989cfa52b3aeeb2db6a94915dad.cu | #include <stdio.h>
#define epsilon (float)1e-5
// Thread block size
#define NB 32
// Forward declaration
void randomInit (float*, int);
void MatMul_cpu (const float *, const float *, float *, int );
void MatMul_gpu (const float *, const float *, float *, int );
__global__ void MatMul_kernel(float *, float *, float *, int);
int main(int argc, char** argv) {
// Matrix dimensions: N x N
// Matrix dimensions are assumed to be multiples of NB
int N = 32*NB;
// matrices on the host
float *h_A, *h_B;
// results on host
float *cpu_result;
float *gpu_result;
// size in bytes
size_t size = N*N * sizeof(float);
// allocate matrices on the host
h_A = (float *) malloc(size * sizeof(float));
h_B = (float *) malloc(size * sizeof(float));
// init matrices
randomInit(h_A, N*N);
randomInit(h_B, N*N);
// allocate matrices to compare the results CPU/GPU
cpu_result = (float *) malloc(size * sizeof(float));
gpu_result = (float *) malloc(size * sizeof(float));
// compute on GPU
MatMul_gpu (h_A, h_B, gpu_result, N);
// compute on CPU
MatMul_cpu (h_A, h_B, cpu_result, N);
// check results
int error = 0;
for(int i=0; i<N*N; i++) {
float cpu_value = cpu_result[i];
if(fabs(cpu_value - gpu_result[i])> epsilon*cpu_value)
error++;
}
if(error==0)
printf("\nTEST PASSED\n");
else
printf("\n\nTEST FAILED: number of errors: %d\n", error);
free(h_A);
free(h_B);
free(cpu_result);
free(gpu_result);
}
// Matrices are stored in row-major order:
// M(row, col) = *(M + row * N + col)
__device__ int get_offset (int idx_i, int idx_j, int N) {
return idx_i * N * NB + idx_j * NB;
}
void MatMul_gpu(const float *h_A, const float *h_B, float *h_C, int N) {
cudaEvent_t start, stop;
size_t size = N*N * sizeof(float);
float *d_A, *d_B, *d_C;
// Load A and B to device memory
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
cudaMalloc((void **)&d_C, size);
// Grid specify
dim3 dimBlock (NB, NB);
dim3 dimGrid (N / dimBlock.x, N / dimBlock.x);
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Start timing
cudaEventRecord(start);
// Invoke kernel
MatMul_kernel <<<dimGrid, dimBlock>>> (d_A, d_B, d_C, N);
// End timing
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float gpu_time;
cudaEventElapsedTime(&gpu_time, start, stop);
double time_sec = gpu_time/1000.0;
double num_ops = 2.0 * (double) N * (double) N * (double) N;
double gflops = 1.0e-9 * num_ops/time_sec;
printf("CUDA Gflops = %.4f , Time = %.5f s dim=%d\n", gflops, time_sec, N);
// Read C from device memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
// Matrix multiplication kernel called by MatMul_gpu()
__global__ void MatMul_kernel(float *A, float *B, float *C, int N) {
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[NB][NB];
__shared__ float Bs[NB][NB];
// Block row and column
int ib = blockIdx.y;
int jb = blockIdx.x;
// Thread row and column within Csub
int it = threadIdx.y;
int jt = threadIdx.x;
int a_offset, b_offset, c_offset;
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0.0f;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub.
// Multiply each pair of sub-matrices together
// and accumulate the results.
for (int kb = 0; kb < (N / NB); ++kb) {
// Get the starting address (a_offset) of Asub
// (sub-matrix of A of dimension NB x NB)
// Asub is located i_block sub-matrices to the right and
// k_block sub-matrices down from the upper-left corner of A
a_offset = get_offset (ib, kb, N);
// Get the starting address (b_offset) of Bsub
b_offset = get_offset (kb, jb, N);
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
// ---------------- //
// INSERT CUDA CODE //
// ---------------- //
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
// ---------------- //
// INSERT CUDA CODE //
// ---------------- //
// Multiply As and Bs together
for (int k = 0; k < NB; ++k) {
// ---------------- //
// INSERT CUDA CODE //
// ---------------- //
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
// ---------------- //
// INSERT CUDA CODE //
// ---------------- //
}
c_offset = get_offset (ib, jb, N);
// Each thread block computes one sub-matrix Csub of C
// ---------------- //
// INSERT CUDA CODE //
// ---------------- //
}
void randomInit(float* data, int size) {
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void MatMul_cpu (const float *A, const float *B, float *C, int N) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
float value = 0.0f;
for (int k = 0; k < N; k++) {
value += A[i*N+k] * B[k*N+j];
}
C[i*N + j] = value;
}
}
}
|
5345cb41edf77986ff67436e67f6b30fce8613db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudaDefs.h>
#include <time.h>
#include <math.h>
#include <random>
namespace lesson4 {
using namespace std;
hipError_t error = hipSuccess;
hipDeviceProp_t deviceProp = hipDeviceProp_t();
struct FooBar {
int foo;
float bar;
};
__constant__ __device__ int dScalar;
__constant__ __device__ FooBar dFooBar;
const int FooBarsCount = 5;
__constant__ __device__ FooBar dFooBars[FooBarsCount];
__global__ void kernelScalar()
{
printf("Device scalar: %d\n", dScalar);
}
void runScalar()
{
int hScalarValue = 5;
checkCudaErrors(hipMemcpyToSymbol(static_cast<const void*>(&dScalar), static_cast<const void*>(&hScalarValue), 1 * sizeof(int)));
kernelScalar << <1, 1 >> > ();
int hScalarValue2 = -1;
checkCudaErrors(hipMemcpyFromSymbol(static_cast<void*>(&hScalarValue2), static_cast<const void*>(&dScalar), 1 * sizeof(int)));
printf("Scalar: %d -> %d\n", hScalarValue, hScalarValue2);
}
__global__ void kernelStruct()
{
printf("Device struct: (%d, %f)\n", dFooBar.foo, dFooBar.bar);
}
void runStruct()
{
FooBar hFooBarValue{ 42, 0.42f };
checkCudaErrors(hipMemcpyToSymbol(static_cast<const void*>(&dFooBar), static_cast<const void*>(&hFooBarValue), 1 * sizeof(FooBar)));
kernelStruct << <1, 1 >> > ();
FooBar hFooBarValue2;
checkCudaErrors(hipMemcpyFromSymbol(static_cast<void*>(&hFooBarValue2), static_cast<const void*>(&dFooBar), 1 * sizeof(FooBar)));
printf("Struct (%d, %f) -> (%d, %f)\n", hFooBarValue.foo, hFooBarValue.bar, hFooBarValue2.foo, hFooBarValue2.bar);
}
__global__ void kernelArrayOfStructs()
{
int i = threadIdx.x;
printf("Device struct[%d]: (%d, %f)\n", i, dFooBars[i].foo, dFooBars[i].bar);
}
void runArrayOfStructs()
{
FooBar *hFooBarValues = new FooBar[FooBarsCount];
for (int i = 0; i < FooBarsCount; i++)
{
hFooBarValues[i].foo = 42 + i;
hFooBarValues[i].bar = 0.42f + i;
}
checkCudaErrors(hipMemcpyToSymbol(static_cast<const void*>(dFooBars), static_cast<const void*>(hFooBarValues), FooBarsCount * sizeof(FooBar)));
kernelArrayOfStructs << <1, FooBarsCount >> > ();
FooBar *hFooBarValues2 = new FooBar[FooBarsCount];
checkCudaErrors(hipMemcpyFromSymbol(static_cast<void*>(hFooBarValues2), static_cast<const void*>(dFooBars), FooBarsCount * sizeof(FooBar)));
}
void run()
{
runScalar();
runStruct();
runArrayOfStructs();
}
} | 5345cb41edf77986ff67436e67f6b30fce8613db.cu | #include <cudaDefs.h>
#include <time.h>
#include <math.h>
#include <random>
namespace lesson4 {
using namespace std;
cudaError_t error = cudaSuccess;
cudaDeviceProp deviceProp = cudaDeviceProp();
struct FooBar {
int foo;
float bar;
};
__constant__ __device__ int dScalar;
__constant__ __device__ FooBar dFooBar;
const int FooBarsCount = 5;
__constant__ __device__ FooBar dFooBars[FooBarsCount];
__global__ void kernelScalar()
{
printf("Device scalar: %d\n", dScalar);
}
void runScalar()
{
int hScalarValue = 5;
checkCudaErrors(cudaMemcpyToSymbol(static_cast<const void*>(&dScalar), static_cast<const void*>(&hScalarValue), 1 * sizeof(int)));
kernelScalar << <1, 1 >> > ();
int hScalarValue2 = -1;
checkCudaErrors(cudaMemcpyFromSymbol(static_cast<void*>(&hScalarValue2), static_cast<const void*>(&dScalar), 1 * sizeof(int)));
printf("Scalar: %d -> %d\n", hScalarValue, hScalarValue2);
}
__global__ void kernelStruct()
{
printf("Device struct: (%d, %f)\n", dFooBar.foo, dFooBar.bar);
}
void runStruct()
{
FooBar hFooBarValue{ 42, 0.42f };
checkCudaErrors(cudaMemcpyToSymbol(static_cast<const void*>(&dFooBar), static_cast<const void*>(&hFooBarValue), 1 * sizeof(FooBar)));
kernelStruct << <1, 1 >> > ();
FooBar hFooBarValue2;
checkCudaErrors(cudaMemcpyFromSymbol(static_cast<void*>(&hFooBarValue2), static_cast<const void*>(&dFooBar), 1 * sizeof(FooBar)));
printf("Struct (%d, %f) -> (%d, %f)\n", hFooBarValue.foo, hFooBarValue.bar, hFooBarValue2.foo, hFooBarValue2.bar);
}
__global__ void kernelArrayOfStructs()
{
int i = threadIdx.x;
printf("Device struct[%d]: (%d, %f)\n", i, dFooBars[i].foo, dFooBars[i].bar);
}
void runArrayOfStructs()
{
FooBar *hFooBarValues = new FooBar[FooBarsCount];
for (int i = 0; i < FooBarsCount; i++)
{
hFooBarValues[i].foo = 42 + i;
hFooBarValues[i].bar = 0.42f + i;
}
checkCudaErrors(cudaMemcpyToSymbol(static_cast<const void*>(dFooBars), static_cast<const void*>(hFooBarValues), FooBarsCount * sizeof(FooBar)));
kernelArrayOfStructs << <1, FooBarsCount >> > ();
FooBar *hFooBarValues2 = new FooBar[FooBarsCount];
checkCudaErrors(cudaMemcpyFromSymbol(static_cast<void*>(hFooBarValues2), static_cast<const void*>(dFooBars), FooBarsCount * sizeof(FooBar)));
}
void run()
{
runScalar();
runStruct();
runArrayOfStructs();
}
} |
47c48846b824f29ac7a8fa3b1807b905a077ac79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/nll_loss_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/nll_loss.h"
namespace phi {
template <typename T, typename Context>
void NllLossRawKernel(const Context& dev_ctx,
const DenseTensor& input,
const DenseTensor& label,
const paddle::optional<DenseTensor>& weight,
int64_t ignore_index,
const std::string& reduction,
DenseTensor* out,
DenseTensor* total_weight) {
auto* x = &input;
auto x_data = x->data<T>();
auto out_data = dev_ctx.template Alloc<T>(out);
auto total_weight_data = dev_ctx.template Alloc<T>(total_weight);
auto label_data = label.data<int64_t>();
auto weight_data = weight.get_ptr() ? weight.get_ptr()->data<T>() : nullptr;
#ifdef PADDLE_WITH_HIP
hipMemset(total_weight_data, 0, sizeof(T));
#else
hipMemset(total_weight_data, 0, sizeof(T));
#endif
auto x_dims = x->dims();
auto batch_size = x_dims[0];
auto n_classes = x_dims[1];
int64_t size_average = (int64_t)(reduction == "mean");
if (x_dims.size() == 2) {
int blocks = NumBlocks(batch_size);
int threads = kNumCUDAThreads;
if (reduction == "none") {
hipLaunchKernelGGL(( GPUNLLLossForward1D_no_reduce<T>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), out_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
ignore_index);
} else {
hipLaunchKernelGGL(( GPUNLLLossForward1D_with_reduce<T>)
, dim3(1), dim3(NTHREADS), 0, dev_ctx.stream(), out_data,
total_weight_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
size_average,
ignore_index);
}
} else if (x_dims.size() == 4) {
const auto in_dim2 = x_dims[2];
const auto in_dim3 = x_dims[3];
const auto map_size = in_dim2 * in_dim3;
const auto out_numel = batch_size * in_dim2 * in_dim3;
int blocks = NumBlocks(out_numel);
int threads = kNumCUDAThreads;
if (reduction == "none") {
hipLaunchKernelGGL(( GPUNLLLossForward2D_no_reduce<T>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), out_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
in_dim2,
in_dim3,
ignore_index);
} else {
int blocks_per_sample = NumBlocks(map_size) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
hipLaunchKernelGGL(( GPUNLLLossForward2D_with_reduce<T>)
, dim3(total_blocks), dim3(threads), 0, dev_ctx.stream(), out_data,
total_weight_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
map_size,
blocks_per_sample,
ignore_index);
if (size_average) {
hipLaunchKernelGGL(( GPUNLLLossForward2D_size_average<T>)
, dim3(1), dim3(1), 0, dev_ctx.stream(), out_data, total_weight_data);
}
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(
nll_loss, GPU, ALL_LAYOUT, phi::NllLossRawKernel, float, double) {}
| 47c48846b824f29ac7a8fa3b1807b905a077ac79.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/nll_loss_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpu/nll_loss.h"
namespace phi {
template <typename T, typename Context>
void NllLossRawKernel(const Context& dev_ctx,
const DenseTensor& input,
const DenseTensor& label,
const paddle::optional<DenseTensor>& weight,
int64_t ignore_index,
const std::string& reduction,
DenseTensor* out,
DenseTensor* total_weight) {
auto* x = &input;
auto x_data = x->data<T>();
auto out_data = dev_ctx.template Alloc<T>(out);
auto total_weight_data = dev_ctx.template Alloc<T>(total_weight);
auto label_data = label.data<int64_t>();
auto weight_data = weight.get_ptr() ? weight.get_ptr()->data<T>() : nullptr;
#ifdef PADDLE_WITH_HIP
hipMemset(total_weight_data, 0, sizeof(T));
#else
cudaMemset(total_weight_data, 0, sizeof(T));
#endif
auto x_dims = x->dims();
auto batch_size = x_dims[0];
auto n_classes = x_dims[1];
int64_t size_average = (int64_t)(reduction == "mean");
if (x_dims.size() == 2) {
int blocks = NumBlocks(batch_size);
int threads = kNumCUDAThreads;
if (reduction == "none") {
GPUNLLLossForward1D_no_reduce<T>
<<<blocks, threads, 0, dev_ctx.stream()>>>(out_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
ignore_index);
} else {
GPUNLLLossForward1D_with_reduce<T>
<<<1, NTHREADS, 0, dev_ctx.stream()>>>(out_data,
total_weight_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
size_average,
ignore_index);
}
} else if (x_dims.size() == 4) {
const auto in_dim2 = x_dims[2];
const auto in_dim3 = x_dims[3];
const auto map_size = in_dim2 * in_dim3;
const auto out_numel = batch_size * in_dim2 * in_dim3;
int blocks = NumBlocks(out_numel);
int threads = kNumCUDAThreads;
if (reduction == "none") {
GPUNLLLossForward2D_no_reduce<T>
<<<blocks, threads, 0, dev_ctx.stream()>>>(out_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
in_dim2,
in_dim3,
ignore_index);
} else {
int blocks_per_sample = NumBlocks(map_size) / 128;
blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample;
int total_blocks = blocks_per_sample * batch_size;
GPUNLLLossForward2D_with_reduce<T>
<<<total_blocks, threads, 0, dev_ctx.stream()>>>(out_data,
total_weight_data,
x_data,
label_data,
weight_data,
batch_size,
n_classes,
map_size,
blocks_per_sample,
ignore_index);
if (size_average) {
GPUNLLLossForward2D_size_average<T>
<<<1, 1, 0, dev_ctx.stream()>>>(out_data, total_weight_data);
}
}
}
}
} // namespace phi
PD_REGISTER_KERNEL(
nll_loss, GPU, ALL_LAYOUT, phi::NllLossRawKernel, float, double) {}
|
717adf36bb50dc7bdd7cb1d10c6f58819fe19432.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathReduce.cu"
#else
THC_API void
THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<real>(),
ReduceMultiply<real, accreal>(),
ReduceMultiply<accreal, accreal>(),
ScalarConvert<int, accreal>::to(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor_(sum)(state, self, src, dim, keepdim);
THCTensor_(div)(state, self, self, ScalarConvert<int64_t, real>::to(THCTensor_(size)(state, src, dim)));
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void
THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, real maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
ptrdiff_t size = THCTensor_(nElement)(state, data)/data->size[0];
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimension)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<real>::gt(value, ScalarConvert<int, real>::to(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimension)(state, src) > 1, 1, "need at least 2 dimensions");
dim3 grid(data->size[0]);
dim3 threads(32);
hipLaunchKernelGGL(( THCTensor_kernel_renorm<real>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, data), value, size, maxnorm);
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
THC_API void
THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
TensorUtils<THCTensor>::preserveReduceDimSemantics(
state, self_, THCTensor_(nDimension)(state, src), dimension, keepdim);
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, true>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, true>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API void
THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
TensorUtils<THCTensor>::preserveReduceDimSemantics(
state, self_, THCTensor_(nDimension)(state, src), dimension, keepdim);
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, false>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, false>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API accreal
THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
THC_API accreal
THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll(state, self,
SquareFunctor<accreal, real>(mean),
ReduceAdd<accreal, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
ScalarConvert<ptrdiff_t, accreal>::to(THCTensor_(nElement)(state, self) - (biased ? 0 : 1))
);
THCudaCheck(hipGetLastError());
return val;
}
THC_API void
THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) {
THC_reduceDim(state, self, src,
TensorNonZeroOp<real>(), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) {
THC_reduceDim(state, self, src,
TensorNormOp<real, 1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) {
THC_reduceDim(state, self, src,
TensorNormOp<real, 2>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
THCTensor_(pow)(state, self, self, ScalarConvert<float, real>::to(0.5));
} else {
THC_reduceDim(state, self, src,
TensorNormOp<real, -1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
THCTensor_(pow)(state, self, self, THCNumerics<real>::cinv(value));
}
THCudaCheck(hipGetLastError());
}
THC_API accreal
THCTensor_(normall)(THCState *state, THCTensor *self, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) {
THC_reduceAll(state, self,
TensorNonZeroOp<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) {
THC_reduceAll(state, self,
TensorNormOp<real, 1>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) {
THC_reduceAll(state, self,
TensorNormOp<real, 2>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else {
THC_reduceAll(state, self,
TensorNormOp<real, -1>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
result = THCNumerics<accreal>::pow(
result,
ScalarConvert<real, accreal>::to(THCNumerics<real>::cinv(value))
);
}
THCudaCheck(hipGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<real> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, ScalarConvert<int, accreal>::to(0),
thrust::plus<accreal>(),
TensorDistOp<accreal, real>(ScalarConvert<real, accreal>::to(value)));
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return THCNumerics<accreal>::pow(result, 1.0 / ScalarConvert<real, accreal>::to(value));
}
#endif
THC_API accreal
THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API accreal
THCTensor_(prodall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMultiply<real, accreal>(),
ReduceMultiply<accreal, accreal>(),
ScalarConvert<int, accreal>::to(1),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API accreal
THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
THArgCheck(self->nDimension > 0, 1, "empty Tensor");
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
THC_API real
THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMin<real>(),
ReduceMin<real>(),
THCNumerics<real>::max(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API real
THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMax<real>(),
ReduceMax<real>(),
THCNumerics<real>::min(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API real
THCTensor_(medianall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
ptrdiff_t nelem, k;
nelem = THCTensor_(nElement)(state, self);
k = (nelem-1) >> 1;
THLongStorage *size = THLongStorage_newWithSize1(nelem);
THCTensor *view = THCTensor_(newView)(state, self, size);
THLongStorage_free(size);
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, indices, view, 0, 0);
val = THCTensor_(get1d)(state, sorted, k);
THCTensor_(free)(state, view);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, indices);
THCudaCheck(hipGetLastError());
return val;
}
THC_API void
THCTensor_(median)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *self,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
int64_t t_size_dim, k;
t_size_dim = THCTensor_(size)(state, self, dimension);
k = (t_size_dim-1) >> 1;
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *sorted_indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, sorted_indices, self, dimension, 0);
THCTensor *newValues = THCTensor_(newNarrow)(state, sorted, dimension, k, 1);
THCudaLongTensor *newIndices = THCudaLongTensor_newNarrow(state, sorted_indices, dimension, k, 1);
if (!keepdim) {
THCTensor_(squeeze1d)(state, newValues, newValues, dimension);
THCudaLongTensor_squeeze1d(state, newIndices, newIndices, dimension);
}
THCTensor_(resizeAs)(state, values, newValues);
THCudaLongTensor_resizeAs(state, indices, newIndices);
THCTensor_(copy)(state, values, newValues);
THCudaLongTensor_copy(state, indices, newIndices);
THCTensor_(free)(state, newValues);
THCudaLongTensor_free(state, newIndices);
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, int64_t>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, int64_t>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::min(), 0);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<typename TensorUtils<THCTensor>::DataType, int64_t>());
}
THC_API void
THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, int64_t>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, int64_t>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::max(), 0);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<typename TensorUtils<THCTensor>::DataType, int64_t>());
}
#endif
| 717adf36bb50dc7bdd7cb1d10c6f58819fe19432.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathReduce.cu"
#else
THC_API void
THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<real>(),
ReduceMultiply<real, accreal>(),
ReduceMultiply<accreal, accreal>(),
ScalarConvert<int, accreal>::to(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor_(sum)(state, self, src, dim, keepdim);
THCTensor_(div)(state, self, self, ScalarConvert<int64_t, real>::to(THCTensor_(size)(state, src, dim)));
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void
THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, real maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
ptrdiff_t size = THCTensor_(nElement)(state, data)/data->size[0];
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimension)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<real>::gt(value, ScalarConvert<int, real>::to(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimension)(state, src) > 1, 1, "need at least 2 dimensions");
dim3 grid(data->size[0]);
dim3 threads(32);
THCTensor_kernel_renorm<real><<<grid, threads, 0, THCState_getCurrentStream(state)>>>(THCTensor_(data)(state, data), value, size, maxnorm);
cudaError errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
THC_API void
THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
TensorUtils<THCTensor>::preserveReduceDimSemantics(
state, self_, THCTensor_(nDimension)(state, src), dimension, keepdim);
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, true>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, true>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API void
THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
TensorUtils<THCTensor>::preserveReduceDimSemantics(
state, self_, THCTensor_(nDimension)(state, src), dimension, keepdim);
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, false>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, false>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API accreal
THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
THC_API accreal
THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll(state, self,
SquareFunctor<accreal, real>(mean),
ReduceAdd<accreal, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
ScalarConvert<ptrdiff_t, accreal>::to(THCTensor_(nElement)(state, self) - (biased ? 0 : 1))
);
THCudaCheck(cudaGetLastError());
return val;
}
THC_API void
THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) {
THC_reduceDim(state, self, src,
TensorNonZeroOp<real>(), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) {
THC_reduceDim(state, self, src,
TensorNormOp<real, 1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) {
THC_reduceDim(state, self, src,
TensorNormOp<real, 2>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
THCTensor_(pow)(state, self, self, ScalarConvert<float, real>::to(0.5));
} else {
THC_reduceDim(state, self, src,
TensorNormOp<real, -1>(value), ReduceAdd<real, accreal>(), ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0), dimension, keepdim);
THCTensor_(pow)(state, self, self, THCNumerics<real>::cinv(value));
}
THCudaCheck(cudaGetLastError());
}
THC_API accreal
THCTensor_(normall)(THCState *state, THCTensor *self, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(0.0))) {
THC_reduceAll(state, self,
TensorNonZeroOp<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(1.0))) {
THC_reduceAll(state, self,
TensorNormOp<real, 1>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
} else if (THCNumerics<real>::eq(value, ScalarConvert<float, real>::to(2.0))) {
THC_reduceAll(state, self,
TensorNormOp<real, 2>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else {
THC_reduceAll(state, self,
TensorNormOp<real, -1>(value),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<float, accreal>::to(0.0f),
&result, 0);
result = THCNumerics<accreal>::pow(
result,
ScalarConvert<real, accreal>::to(THCNumerics<real>::cinv(value))
);
}
THCudaCheck(cudaGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<real> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result = thrust::inner_product(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, ScalarConvert<int, accreal>::to(0),
thrust::plus<accreal>(),
TensorDistOp<accreal, real>(ScalarConvert<real, accreal>::to(value)));
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return THCNumerics<accreal>::pow(result, 1.0 / ScalarConvert<real, accreal>::to(value));
}
#endif
THC_API accreal
THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceAdd<real, accreal>(),
ReduceAdd<accreal, accreal>(),
ScalarConvert<int, accreal>::to(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API accreal
THCTensor_(prodall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMultiply<real, accreal>(),
ReduceMultiply<accreal, accreal>(),
ScalarConvert<int, accreal>::to(1),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API accreal
THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
THArgCheck(self->nDimension > 0, 1, "empty Tensor");
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
THC_API real
THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMin<real>(),
ReduceMin<real>(),
THCNumerics<real>::max(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API real
THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
if (!THC_reduceAll(state, self,
thrust::identity<real>(),
ReduceMax<real>(),
ReduceMax<real>(),
THCNumerics<real>::min(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API real
THCTensor_(medianall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
ptrdiff_t nelem, k;
nelem = THCTensor_(nElement)(state, self);
k = (nelem-1) >> 1;
THLongStorage *size = THLongStorage_newWithSize1(nelem);
THCTensor *view = THCTensor_(newView)(state, self, size);
THLongStorage_free(size);
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, indices, view, 0, 0);
val = THCTensor_(get1d)(state, sorted, k);
THCTensor_(free)(state, view);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, indices);
THCudaCheck(cudaGetLastError());
return val;
}
THC_API void
THCTensor_(median)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *self,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
int64_t t_size_dim, k;
t_size_dim = THCTensor_(size)(state, self, dimension);
k = (t_size_dim-1) >> 1;
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *sorted_indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, sorted_indices, self, dimension, 0);
THCTensor *newValues = THCTensor_(newNarrow)(state, sorted, dimension, k, 1);
THCudaLongTensor *newIndices = THCudaLongTensor_newNarrow(state, sorted_indices, dimension, k, 1);
if (!keepdim) {
THCTensor_(squeeze1d)(state, newValues, newValues, dimension);
THCudaLongTensor_squeeze1d(state, newIndices, newIndices, dimension);
}
THCTensor_(resizeAs)(state, values, newValues);
THCudaLongTensor_resizeAs(state, indices, newIndices);
THCTensor_(copy)(state, values, newValues);
THCudaLongTensor_copy(state, indices, newIndices);
THCTensor_(free)(state, newValues);
THCudaLongTensor_free(state, newIndices);
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, int64_t>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, int64_t>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::min(), 0);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<typename TensorUtils<THCTensor>::DataType, int64_t>());
}
THC_API void
THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, int64_t>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, int64_t>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::max(), 0);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<typename TensorUtils<THCTensor>::DataType, int64_t>());
}
#endif
|
da306f0cd73ee7b7aa86e0490f0949cfc3b05023.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <hiprand/hiprand_kernel.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include "constants.c"
#include "utils.h"
/*
* Mutation kernel
*/
__global__ void mutation(int* population_d, float* population_cost_d, float* population_fitness_d, hiprandState_t* states_d) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= ISLANDS) return;
hiprandState_t localState = states_d[tid];
// Only mutate by random chance
if (hiprand_uniform(&localState) < mutation_ratio) {
// Don't mutate the first city in the route.
// Using a float version of 1 as implicit type-cast
int random_num1 = 1 + hiprand_uniform(&localState) * (num_cities - 1.00001);
int random_num2 = 1 + hiprand_uniform(&localState) * (num_cities - 1.00001);
int city_temp = population_d[tid*num_cities + random_num1];
population_d[tid*num_cities + random_num1] = population_d[tid*num_cities + random_num2];
population_d[tid*num_cities + random_num2] = city_temp;
states_d[tid] = localState;
}
}
/*
* Fitness kernel: Evaluates population fitness
*/
__global__ void getPopulationFitness(int* population_d, float* population_cost_d, float* population_fitness_d, float* citymap_d) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= ISLANDS) return;
// Calcuates cost and fitness of the route
evaluateRoute(population_d, population_cost_d, population_fitness_d, citymap_d, tid);
}
/*
* Crossover kernel: Perform merging of parents
*/
__global__ void crossover(int* population_d, float* population_cost_d,
float* population_fitness_d, int* parent_cities_d, hiprandState_t* states_d, float* citymap_d, int index) {
// Get thread (particle) ID
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= ISLANDS) return;
// For ease of implementation, the rows are indexed out in registers
population_d[tid*num_cities] = parent_cities_d[tid* (2*num_cities)];
int parent_city_ptr[num_cities];
for(int i=0; i<num_cities;i++)
parent_city_ptr[i] = parent_cities_d[tid*num_cities*2 + i];
int tourarray[num_cities];
for(int i=0; i<num_cities;i++)
tourarray[i] = population_d[tid*num_cities + i];
int current_city_id = population_d[tid*num_cities + index - 1];
// Choose next valid city based on the last one in the route from each parent
int c1 = getValidNextCity(parent_city_ptr, tourarray, current_city_id, index);
for(int i=0; i<num_cities;i++)
parent_city_ptr[i] = parent_cities_d[tid*num_cities*2+num_cities + i];
int c2 = getValidNextCity(parent_city_ptr, tourarray, current_city_id, index);
// Keep the better choice from both the parents by checking the one that is closer
if(citymap_d[c1*num_cities + current_city_id] <= citymap_d[c2*num_cities + current_city_id])
population_d[tid*num_cities + index] = c1;
else
population_d[tid*num_cities + index] = c2;
}
/*
* Tourname Selection kernel
* Subroutine of Selection kernel
* Subsamples a tournament from the existing population and chooses the best
* candidate route based on fitness
*/
__device__ int* tournamentSelection(int* population_d, float* population_cost_d,
float* population_fitness_d, hiprandState_t* states_d, int tid) {
int tournament[tournament_size*num_cities];
float tournament_fitness[tournament_size];
float tournament_cost[tournament_size];
int random_num;
for (int i = 0; i < tournament_size; i++) {
// gets random number from global random state on GPU
random_num = hiprand_uniform(&states_d[tid]) * (ISLANDS - 1);
for(int c=0; c<num_cities; c++) {
tournament[i*num_cities + c] = population_d[random_num*num_cities + c];
tournament_cost[i] = population_cost_d[random_num];
tournament_fitness[i] = population_fitness_d[random_num];
}
}
int fittest = getFittestTourIndex(tournament, tournament_cost, tournament_fitness);
int fittest_route[num_cities];
for(int c=0; c<num_cities; c++) {
fittest_route[c] = tournament[fittest*num_cities + c];
}
return fittest_route;
}
/*
* Selection kernel: Chooses 2 parent throught tournament selection
* and stores them in the parent array in global memory
*/
__global__ void selection(int* population_d, float* population_cost_d,
float* population_fitness_d, int* parent_cities_d, hiprandState_t* states_d) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= ISLANDS) return;
int* parent1;
/*
if(ELITISM && (blockIdx.x == 0)) {
int fittest = getFittestTourIndex(population_d, population_cost_d, population_fitness_d);
for(int c=0; c<num_cities; c++) {
parent_cities_d[tid* (2*num_cities) +c] = population_d[fittest*num_cities + c];
parent_cities_d[tid* (2*num_cities) +num_cities +c] = population_d[fittest*num_cities + c];
}
} else {
*/
parent1 = tournamentSelection(population_d, population_cost_d,
population_fitness_d, states_d, tid);
for(int c=0; c<num_cities; c++)
parent_cities_d[tid* (2*num_cities) +c] = parent1[c];
parent1 = tournamentSelection(population_d, population_cost_d,
population_fitness_d, states_d, tid);
for(int c=0; c<num_cities; c++)
parent_cities_d[tid* (2*num_cities) +num_cities +c] = parent1[c];
//}
}
/* this GPU kernel function is used to initialize the random states */
__global__ void init(hiprandState_t* states) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= ISLANDS) return;
hiprand_init(1337, tid, 0, &states[tid]);
}
/*
* Main Function
* Declare relevant variables in host
* Intialize random tours and adjacecny matrix
*/
int main() {
hipSetDevice(0);
hipError_t err = hipSuccess;
int max_val = 250;
//initialising 1D array of cost of ith city to jth city called citymap
float citymap[num_cities*num_cities];
int* population = (int*)calloc(ISLANDS*num_cities, sizeof(int));
float* population_fitness = (float*)calloc(ISLANDS, sizeof(float));
float* population_cost = (float*)calloc(ISLANDS, sizeof(float));
printf("Num islands: %d\n", ISLANDS);
printf("Population size: %d\n", ISLANDS*num_cities);
//building cost table (citymap)
for(int i=0; i<num_cities; i++) {
for(int j=0; j<num_cities; j++) {
if(i!=j) {
citymap[i*num_cities+j] = L2distance(city_x[i], city_y[i], city_x[j], city_y[j]);
} else {
citymap[i*num_cities+j] = max_val * max_val;
}
}
}
initalizeRandomPopulation(population, population_cost, population_fitness, citymap);
int fittest = getFittestScore(population_fitness);
printf("min distance: %f\n", population_cost[fittest]);
// Device Variables
int* population_d;
float* population_fitness_d;
float* population_cost_d;
int* parent_cities_d;
float* citymap_d;
hiprandState_t *states_d;
float milliseconds;
hipEvent_t start, stop;
hipEventCreate (&start);
hipEventCreate (&stop);
hipEventRecord (start);
hipMalloc((void **)&population_d, ISLANDS*num_cities*sizeof(int));
hipMalloc((void **)&population_cost_d, ISLANDS*sizeof(float));
hipMalloc((void **)&population_fitness_d, ISLANDS*sizeof(float));
hipMalloc((void **)&parent_cities_d, 2*ISLANDS*num_cities*sizeof(int));
hipMalloc((void **)&citymap_d, num_cities*num_cities*sizeof(float));
hipMalloc((void **)&states_d, ISLANDS*sizeof(hiprandState_t));
hipMemcpy(population_d, population, ISLANDS*num_cities*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(population_cost_d, population_cost, ISLANDS*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(population_fitness_d, population_fitness, ISLANDS*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(citymap_d, citymap, num_cities*num_cities*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( init), dim3(num_blocks), dim3(num_threads), 0, 0, states_d);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Init Kernel: %s\n", hipGetErrorString(err));
exit(0);
}
// Get initial fitness of population
hipLaunchKernelGGL(( getPopulationFitness), dim3(num_blocks), dim3(num_threads), 0, 0,
population_d, population_cost_d, population_fitness_d, citymap_d);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Fitness Kernel: %s\n", hipGetErrorString(err));
exit(0);
}
for(int i = 0; i < num_generations; i++ ) {
hipLaunchKernelGGL(( selection), dim3(num_blocks), dim3(num_threads), 0, 0,
population_d, population_cost_d, population_fitness_d, parent_cities_d, states_d);
//hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Selection Kernel: %s\n", hipGetErrorString(err));
exit(0);
}
for (int j = 1; j < num_cities; j++){
hipLaunchKernelGGL(( crossover), dim3(num_blocks), dim3(num_threads), 0, 0, population_d, population_cost_d, population_fitness_d, parent_cities_d, states_d, citymap_d, j);
//printf("%d", j);
//hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Crossover Kernel: %s\n", hipGetErrorString(err));
exit(0);
}
}
hipLaunchKernelGGL(( mutation), dim3(num_blocks), dim3(num_threads), 0, 0,
population_d, population_cost_d, population_fitness_d, states_d);
//hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Mutation Kernel: %s\n", hipGetErrorString(err));
exit(0);
}
hipLaunchKernelGGL(( getPopulationFitness), dim3(num_blocks), dim3(num_threads), 0, 0,
population_d, population_cost_d, population_fitness_d, citymap_d);
//hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Mutation Kernel: %s\n", hipGetErrorString(err));
exit(0);
}
// Print things for sanity check
if(i > 0 && i % print_interval == 0) {
hipMemcpy(population_fitness, population_fitness_d, ISLANDS*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(population_cost, population_cost_d, ISLANDS*sizeof(float), hipMemcpyDeviceToHost);
fittest = getFittestScore(population_fitness);
printf("Iteration:%d, min distance: %f\n", i, population_cost[fittest]);
}
}
hipEventRecord (stop);
hipEventSynchronize (stop);
hipEventElapsedTime (&milliseconds, start, stop);
hipMemcpy(population, population_d, ISLANDS*num_cities*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(population_fitness, population_fitness_d, ISLANDS*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(population_cost, population_cost_d, ISLANDS*sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
fittest = getFittestScore(population_fitness);
printf("time: %f, min distance: %f\n", milliseconds/1000, population_cost[fittest]);
hipFree(population_d);
hipFree(population_fitness_d);
hipFree(population_cost_d);
hipFree(parent_cities_d);
hipFree(citymap_d);
hipFree(states_d);
return 0;
}
| da306f0cd73ee7b7aa86e0490f0949cfc3b05023.cu | #include <stdio.h>
#include <stdlib.h>
#include <curand_kernel.h>
#include <time.h>
#include <string.h>
#include <math.h>
#include "constants.c"
#include "utils.h"
/*
* Mutation kernel
*/
__global__ void mutation(int* population_d, float* population_cost_d, float* population_fitness_d, curandState* states_d) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= ISLANDS) return;
curandState localState = states_d[tid];
// Only mutate by random chance
if (curand_uniform(&localState) < mutation_ratio) {
// Don't mutate the first city in the route.
// Using a float version of 1 as implicit type-cast
int random_num1 = 1 + curand_uniform(&localState) * (num_cities - 1.00001);
int random_num2 = 1 + curand_uniform(&localState) * (num_cities - 1.00001);
int city_temp = population_d[tid*num_cities + random_num1];
population_d[tid*num_cities + random_num1] = population_d[tid*num_cities + random_num2];
population_d[tid*num_cities + random_num2] = city_temp;
states_d[tid] = localState;
}
}
/*
* Fitness kernel: Evaluates population fitness
*/
__global__ void getPopulationFitness(int* population_d, float* population_cost_d, float* population_fitness_d, float* citymap_d) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= ISLANDS) return;
// Calcuates cost and fitness of the route
evaluateRoute(population_d, population_cost_d, population_fitness_d, citymap_d, tid);
}
/*
* Crossover kernel: Perform merging of parents
*/
__global__ void crossover(int* population_d, float* population_cost_d,
float* population_fitness_d, int* parent_cities_d, curandState* states_d, float* citymap_d, int index) {
// Get thread (particle) ID
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= ISLANDS) return;
// For ease of implementation, the rows are indexed out in registers
population_d[tid*num_cities] = parent_cities_d[tid* (2*num_cities)];
int parent_city_ptr[num_cities];
for(int i=0; i<num_cities;i++)
parent_city_ptr[i] = parent_cities_d[tid*num_cities*2 + i];
int tourarray[num_cities];
for(int i=0; i<num_cities;i++)
tourarray[i] = population_d[tid*num_cities + i];
int current_city_id = population_d[tid*num_cities + index - 1];
// Choose next valid city based on the last one in the route from each parent
int c1 = getValidNextCity(parent_city_ptr, tourarray, current_city_id, index);
for(int i=0; i<num_cities;i++)
parent_city_ptr[i] = parent_cities_d[tid*num_cities*2+num_cities + i];
int c2 = getValidNextCity(parent_city_ptr, tourarray, current_city_id, index);
// Keep the better choice from both the parents by checking the one that is closer
if(citymap_d[c1*num_cities + current_city_id] <= citymap_d[c2*num_cities + current_city_id])
population_d[tid*num_cities + index] = c1;
else
population_d[tid*num_cities + index] = c2;
}
/*
* Tourname Selection kernel
* Subroutine of Selection kernel
* Subsamples a tournament from the existing population and chooses the best
* candidate route based on fitness
*/
__device__ int* tournamentSelection(int* population_d, float* population_cost_d,
float* population_fitness_d, curandState* states_d, int tid) {
int tournament[tournament_size*num_cities];
float tournament_fitness[tournament_size];
float tournament_cost[tournament_size];
int random_num;
for (int i = 0; i < tournament_size; i++) {
// gets random number from global random state on GPU
random_num = curand_uniform(&states_d[tid]) * (ISLANDS - 1);
for(int c=0; c<num_cities; c++) {
tournament[i*num_cities + c] = population_d[random_num*num_cities + c];
tournament_cost[i] = population_cost_d[random_num];
tournament_fitness[i] = population_fitness_d[random_num];
}
}
int fittest = getFittestTourIndex(tournament, tournament_cost, tournament_fitness);
int fittest_route[num_cities];
for(int c=0; c<num_cities; c++) {
fittest_route[c] = tournament[fittest*num_cities + c];
}
return fittest_route;
}
/*
* Selection kernel: Chooses 2 parent throught tournament selection
* and stores them in the parent array in global memory
*/
__global__ void selection(int* population_d, float* population_cost_d,
float* population_fitness_d, int* parent_cities_d, curandState* states_d) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= ISLANDS) return;
int* parent1;
/*
if(ELITISM && (blockIdx.x == 0)) {
int fittest = getFittestTourIndex(population_d, population_cost_d, population_fitness_d);
for(int c=0; c<num_cities; c++) {
parent_cities_d[tid* (2*num_cities) +c] = population_d[fittest*num_cities + c];
parent_cities_d[tid* (2*num_cities) +num_cities +c] = population_d[fittest*num_cities + c];
}
} else {
*/
parent1 = tournamentSelection(population_d, population_cost_d,
population_fitness_d, states_d, tid);
for(int c=0; c<num_cities; c++)
parent_cities_d[tid* (2*num_cities) +c] = parent1[c];
parent1 = tournamentSelection(population_d, population_cost_d,
population_fitness_d, states_d, tid);
for(int c=0; c<num_cities; c++)
parent_cities_d[tid* (2*num_cities) +num_cities +c] = parent1[c];
//}
}
/* this GPU kernel function is used to initialize the random states */
__global__ void init(curandState_t* states) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if(tid >= ISLANDS) return;
curand_init(1337, tid, 0, &states[tid]);
}
/*
* Main Function
* Declare relevant variables in host
* Intialize random tours and adjacecny matrix
*/
int main() {
cudaSetDevice(0);
cudaError_t err = cudaSuccess;
int max_val = 250;
//initialising 1D array of cost of ith city to jth city called citymap
float citymap[num_cities*num_cities];
int* population = (int*)calloc(ISLANDS*num_cities, sizeof(int));
float* population_fitness = (float*)calloc(ISLANDS, sizeof(float));
float* population_cost = (float*)calloc(ISLANDS, sizeof(float));
printf("Num islands: %d\n", ISLANDS);
printf("Population size: %d\n", ISLANDS*num_cities);
//building cost table (citymap)
for(int i=0; i<num_cities; i++) {
for(int j=0; j<num_cities; j++) {
if(i!=j) {
citymap[i*num_cities+j] = L2distance(city_x[i], city_y[i], city_x[j], city_y[j]);
} else {
citymap[i*num_cities+j] = max_val * max_val;
}
}
}
initalizeRandomPopulation(population, population_cost, population_fitness, citymap);
int fittest = getFittestScore(population_fitness);
printf("min distance: %f\n", population_cost[fittest]);
// Device Variables
int* population_d;
float* population_fitness_d;
float* population_cost_d;
int* parent_cities_d;
float* citymap_d;
curandState *states_d;
float milliseconds;
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start);
cudaMalloc((void **)&population_d, ISLANDS*num_cities*sizeof(int));
cudaMalloc((void **)&population_cost_d, ISLANDS*sizeof(float));
cudaMalloc((void **)&population_fitness_d, ISLANDS*sizeof(float));
cudaMalloc((void **)&parent_cities_d, 2*ISLANDS*num_cities*sizeof(int));
cudaMalloc((void **)&citymap_d, num_cities*num_cities*sizeof(float));
cudaMalloc((void **)&states_d, ISLANDS*sizeof(curandState));
cudaMemcpy(population_d, population, ISLANDS*num_cities*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(population_cost_d, population_cost, ISLANDS*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(population_fitness_d, population_fitness, ISLANDS*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(citymap_d, citymap, num_cities*num_cities*sizeof(float), cudaMemcpyHostToDevice);
init<<<num_blocks, num_threads>>>(states_d);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Init Kernel: %s\n", cudaGetErrorString(err));
exit(0);
}
// Get initial fitness of population
getPopulationFitness<<<num_blocks, num_threads>>>(
population_d, population_cost_d, population_fitness_d, citymap_d);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Fitness Kernel: %s\n", cudaGetErrorString(err));
exit(0);
}
for(int i = 0; i < num_generations; i++ ) {
selection<<<num_blocks, num_threads>>>(
population_d, population_cost_d, population_fitness_d, parent_cities_d, states_d);
//cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Selection Kernel: %s\n", cudaGetErrorString(err));
exit(0);
}
for (int j = 1; j < num_cities; j++){
crossover<<<num_blocks, num_threads>>>(population_d, population_cost_d, population_fitness_d, parent_cities_d, states_d, citymap_d, j);
//printf("%d", j);
//cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Crossover Kernel: %s\n", cudaGetErrorString(err));
exit(0);
}
}
mutation<<<num_blocks, num_threads>>>(
population_d, population_cost_d, population_fitness_d, states_d);
//cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Mutation Kernel: %s\n", cudaGetErrorString(err));
exit(0);
}
getPopulationFitness<<<num_blocks, num_threads>>>(
population_d, population_cost_d, population_fitness_d, citymap_d);
//cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Mutation Kernel: %s\n", cudaGetErrorString(err));
exit(0);
}
// Print things for sanity check
if(i > 0 && i % print_interval == 0) {
cudaMemcpy(population_fitness, population_fitness_d, ISLANDS*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(population_cost, population_cost_d, ISLANDS*sizeof(float), cudaMemcpyDeviceToHost);
fittest = getFittestScore(population_fitness);
printf("Iteration:%d, min distance: %f\n", i, population_cost[fittest]);
}
}
cudaEventRecord (stop);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&milliseconds, start, stop);
cudaMemcpy(population, population_d, ISLANDS*num_cities*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(population_fitness, population_fitness_d, ISLANDS*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(population_cost, population_cost_d, ISLANDS*sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
fittest = getFittestScore(population_fitness);
printf("time: %f, min distance: %f\n", milliseconds/1000, population_cost[fittest]);
cudaFree(population_d);
cudaFree(population_fitness_d);
cudaFree(population_cost_d);
cudaFree(parent_cities_d);
cudaFree(citymap_d);
cudaFree(states_d);
return 0;
}
|
c6d3531314308752bb02dada9764211b54b135c4.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <string>
#include <hip/hip_runtime.h>
#include </home/ksugumar/project/headers/helper_functions.h>
#include </home/ksugumar/project/headers/helper_cuda.h>
#include "device_launch_parameters.h"
#include <chrono>
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "/home/ksugumar/project/headers/stb_image.h"
#include "/home/ksugumar/project/headers/stb_image_write.h"
using namespace std;
using namespace chrono;
static void HandleError(hipError_t err,
const char *file,
int line) {
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//Kernel function for image filtering using convolution
__global__ void imageFilteringkernel
(const float *d_image_pad, const unsigned int r_pad, const unsigned int c_pad, const float *d_filter,
const int padding_size, float *d_conv_image, const unsigned int rows, const unsigned int cols)
{
unsigned int filter_size = 2 * padding_size + 1;
//const int S = padding_size;
//Determine the pixel co-ordinates
const unsigned int j = blockIdx.x * blockDim.x + threadIdx.x + padding_size;
const unsigned int i = blockIdx.y * blockDim.y + threadIdx.y + padding_size;
//Multiply and add operation for pixel (j,i)
if (j > padding_size && j < c_pad - padding_size && i > padding_size && i < r_pad - padding_size)
{
unsigned int conv_pixel_pos = (i - padding_size) * cols + (j - padding_size);
d_conv_image[conv_pixel_pos] = 0.0;
for (int k = -padding_size; k <= padding_size; k++)
{
for (int l = -padding_size; l <= padding_size; l++)
{
unsigned int img_pixel_pos = (i + k) * c_pad + (j + l);
unsigned int filter_pos = (k + padding_size) * filter_size + (l + padding_size);
d_conv_image[conv_pixel_pos] += d_image_pad[img_pixel_pos] * d_filter[filter_pos];
}
}
}
}
inline unsigned int iDivUp(const unsigned int &a, const unsigned int &b) { return (a%b != 0) ? (a / b + 1) : (a / b); }
int main(int argc, char** argv)
{
// Read the image file on host
int rows, cols, bpp;
uint8_t* h_original_image = stbi_load(argv[1], &cols, &rows, &bpp, 1);
cout << "The number of rows is " << rows << "\n";
cout << "The number of columns is " << cols << "\n";
// Declare Image variables
int padding_size = 1;
unsigned int r_pad = rows + 2 * padding_size;
unsigned int c_pad = cols + 2 * padding_size;
int imsize = rows*cols;
int imsize_pad = r_pad*c_pad;
//Allocate space on host for padded input image
float **h_padded_image;
h_padded_image = new float*[r_pad];
for (int i = 0; i < r_pad; i++)
{
h_padded_image[i] = new float[c_pad];
}
// Fill the 2D array with zeros
for (int i = 0; i < r_pad; i++)
{
for (int j = 0; j < c_pad; j++)
{
h_padded_image[i][j] = 0;
}
}
// Copy pixels from the original image to the 2D array, without affecting the padded 0
for (int i = padding_size; i < r_pad - padding_size; i++)
{
for (int j = padding_size; j < c_pad - padding_size; j++)
{
h_padded_image[i][j] = *(h_original_image + ((i - padding_size)*cols) + (j - padding_size));
}
}
// Convert the padded image to a 1D array. Accessing 1D arrays are more efficient in GPUs
float *h_padded_image_1d = new float[imsize_pad];
for (int q = 0; q < r_pad; q++)
{
for (int t = 0; t < c_pad; t++)
{
h_padded_image_1d[q * r_pad + t] = h_padded_image[q][t];
}
}
// delete the original 2D padded image after reshaping it to 1D
delete h_padded_image;
// Initialize the kernel to be used for convolution as a 1D array
float h_filter[9] = { -1, -1, -1, \
- 1, 8, -1, \
- 1, -1, -1 };
//float h_filter[9] = {0, 0, 0, \
// 0, 1, 0, \
// 0, 0, 0 };
// Initialize a 1D array to hold the convoluted image
float *h_conv_image_1d = new float[imsize];
unsigned int filter_size = 2 * padding_size + 1;
// MEMORY ALLOCATION ON DEVICE STARTS HERE
//Allocate memory on device for image and transfer image from host to device
float *d_padded_image;
unsigned int d_imsize_pad = r_pad * c_pad * sizeof(float);
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_padded_image), d_imsize_pad));
auto h_start = steady_clock::now();
checkCudaErrors(hipMemcpy(d_padded_image, h_padded_image_1d, d_imsize_pad, hipMemcpyHostToDevice));
//Allocate memory on device for filter and transfer filter from host to device
float *d_filter;
unsigned int d_filtersize = filter_size * filter_size * sizeof(float);
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_filter), d_filtersize));
checkCudaErrors(hipMemcpy(d_filter, h_filter, d_filtersize, hipMemcpyHostToDevice));
//Set up the grid and block dimensions for execution
const unsigned int block_col = 32;
const unsigned int block_row = 32;
const dim3 grid( iDivUp(cols, block_col), iDivUp(rows, block_row));
const dim3 threadBlock(block_col, block_row);
//Memory allocation for filtered image
float *d_conv_image;
unsigned int conv_imsize = rows * cols * sizeof(float);
checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&d_conv_image), conv_imsize));
// **** CONVOLUTION STARTS HERE ! ****
float elapsed = 0;
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( imageFilteringkernel) , dim3(grid), dim3(threadBlock), 0, 0, d_padded_image, r_pad, c_pad, d_filter, padding_size, d_conv_image, rows, cols);
checkCudaErrors(hipDeviceSynchronize());
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&elapsed, start, stop));
HANDLE_ERROR(hipEventDestroy(start));
HANDLE_ERROR(hipEventDestroy(stop));
cout << "Total Elapsed Time for the Kernel(GPU): " << elapsed << " ms" << endl;
checkCudaErrors(hipMemcpy(h_conv_image_1d, d_conv_image, conv_imsize, hipMemcpyDeviceToHost));
auto h_end = steady_clock::now();
cout << "Total Elapsed Time(including data transfer): " << (duration<double>\
(h_end - h_start).count())*1000.0 << " ms\n" << endl;
// **** CONVOLUTION ENDS HERE ! ****
static uint8_t conv_image_final[1024][1024];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
conv_image_final[i][j] = 0;
}
}
// perform convertion of 1d to 2d
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
int pixel = h_conv_image_1d[i*rows + j];
if (pixel > 255)
conv_image_final[i][j] = 255;
else if (pixel < 0)
conv_image_final[i][j] = 0;
else
conv_image_final[i][j] = pixel;
}
}
// Write convoluted image to file
stbi_write_jpg(argv[2], cols, rows, 1, conv_image_final, cols);
return 0;
}
| c6d3531314308752bb02dada9764211b54b135c4.cu | #include <iostream>
#include <string>
#include <cuda_runtime.h>
#include </home/ksugumar/project/headers/helper_functions.h>
#include </home/ksugumar/project/headers/helper_cuda.h>
#include "device_launch_parameters.h"
#include <chrono>
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "/home/ksugumar/project/headers/stb_image.h"
#include "/home/ksugumar/project/headers/stb_image_write.h"
using namespace std;
using namespace chrono;
static void HandleError(cudaError_t err,
const char *file,
int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//Kernel function for image filtering using convolution
__global__ void imageFilteringkernel
(const float *d_image_pad, const unsigned int r_pad, const unsigned int c_pad, const float *d_filter,
const int padding_size, float *d_conv_image, const unsigned int rows, const unsigned int cols)
{
unsigned int filter_size = 2 * padding_size + 1;
//const int S = padding_size;
//Determine the pixel co-ordinates
const unsigned int j = blockIdx.x * blockDim.x + threadIdx.x + padding_size;
const unsigned int i = blockIdx.y * blockDim.y + threadIdx.y + padding_size;
//Multiply and add operation for pixel (j,i)
if (j > padding_size && j < c_pad - padding_size && i > padding_size && i < r_pad - padding_size)
{
unsigned int conv_pixel_pos = (i - padding_size) * cols + (j - padding_size);
d_conv_image[conv_pixel_pos] = 0.0;
for (int k = -padding_size; k <= padding_size; k++)
{
for (int l = -padding_size; l <= padding_size; l++)
{
unsigned int img_pixel_pos = (i + k) * c_pad + (j + l);
unsigned int filter_pos = (k + padding_size) * filter_size + (l + padding_size);
d_conv_image[conv_pixel_pos] += d_image_pad[img_pixel_pos] * d_filter[filter_pos];
}
}
}
}
inline unsigned int iDivUp(const unsigned int &a, const unsigned int &b) { return (a%b != 0) ? (a / b + 1) : (a / b); }
int main(int argc, char** argv)
{
// Read the image file on host
int rows, cols, bpp;
uint8_t* h_original_image = stbi_load(argv[1], &cols, &rows, &bpp, 1);
cout << "The number of rows is " << rows << "\n";
cout << "The number of columns is " << cols << "\n";
// Declare Image variables
int padding_size = 1;
unsigned int r_pad = rows + 2 * padding_size;
unsigned int c_pad = cols + 2 * padding_size;
int imsize = rows*cols;
int imsize_pad = r_pad*c_pad;
//Allocate space on host for padded input image
float **h_padded_image;
h_padded_image = new float*[r_pad];
for (int i = 0; i < r_pad; i++)
{
h_padded_image[i] = new float[c_pad];
}
// Fill the 2D array with zeros
for (int i = 0; i < r_pad; i++)
{
for (int j = 0; j < c_pad; j++)
{
h_padded_image[i][j] = 0;
}
}
// Copy pixels from the original image to the 2D array, without affecting the padded 0
for (int i = padding_size; i < r_pad - padding_size; i++)
{
for (int j = padding_size; j < c_pad - padding_size; j++)
{
h_padded_image[i][j] = *(h_original_image + ((i - padding_size)*cols) + (j - padding_size));
}
}
// Convert the padded image to a 1D array. Accessing 1D arrays are more efficient in GPUs
float *h_padded_image_1d = new float[imsize_pad];
for (int q = 0; q < r_pad; q++)
{
for (int t = 0; t < c_pad; t++)
{
h_padded_image_1d[q * r_pad + t] = h_padded_image[q][t];
}
}
// delete the original 2D padded image after reshaping it to 1D
delete h_padded_image;
// Initialize the kernel to be used for convolution as a 1D array
float h_filter[9] = { -1, -1, -1, \
- 1, 8, -1, \
- 1, -1, -1 };
//float h_filter[9] = {0, 0, 0, \
// 0, 1, 0, \
// 0, 0, 0 };
// Initialize a 1D array to hold the convoluted image
float *h_conv_image_1d = new float[imsize];
unsigned int filter_size = 2 * padding_size + 1;
// MEMORY ALLOCATION ON DEVICE STARTS HERE
//Allocate memory on device for image and transfer image from host to device
float *d_padded_image;
unsigned int d_imsize_pad = r_pad * c_pad * sizeof(float);
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_padded_image), d_imsize_pad));
auto h_start = steady_clock::now();
checkCudaErrors(cudaMemcpy(d_padded_image, h_padded_image_1d, d_imsize_pad, cudaMemcpyHostToDevice));
//Allocate memory on device for filter and transfer filter from host to device
float *d_filter;
unsigned int d_filtersize = filter_size * filter_size * sizeof(float);
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_filter), d_filtersize));
checkCudaErrors(cudaMemcpy(d_filter, h_filter, d_filtersize, cudaMemcpyHostToDevice));
//Set up the grid and block dimensions for execution
const unsigned int block_col = 32;
const unsigned int block_row = 32;
const dim3 grid( iDivUp(cols, block_col), iDivUp(rows, block_row));
const dim3 threadBlock(block_col, block_row);
//Memory allocation for filtered image
float *d_conv_image;
unsigned int conv_imsize = rows * cols * sizeof(float);
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_conv_image), conv_imsize));
// **** CONVOLUTION STARTS HERE ! ****
float elapsed = 0;
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
checkCudaErrors(cudaDeviceSynchronize());
imageFilteringkernel <<<grid, threadBlock>>>(d_padded_image, r_pad, c_pad, d_filter, padding_size, d_conv_image, rows, cols);
checkCudaErrors(cudaDeviceSynchronize());
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&elapsed, start, stop));
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
cout << "Total Elapsed Time for the Kernel(GPU): " << elapsed << " ms" << endl;
checkCudaErrors(cudaMemcpy(h_conv_image_1d, d_conv_image, conv_imsize, cudaMemcpyDeviceToHost));
auto h_end = steady_clock::now();
cout << "Total Elapsed Time(including data transfer): " << (duration<double>\
(h_end - h_start).count())*1000.0 << " ms\n" << endl;
// **** CONVOLUTION ENDS HERE ! ****
static uint8_t conv_image_final[1024][1024];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
conv_image_final[i][j] = 0;
}
}
// perform convertion of 1d to 2d
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
int pixel = h_conv_image_1d[i*rows + j];
if (pixel > 255)
conv_image_final[i][j] = 255;
else if (pixel < 0)
conv_image_final[i][j] = 0;
else
conv_image_final[i][j] = pixel;
}
}
// Write convoluted image to file
stbi_write_jpg(argv[2], cols, rows, 1, conv_image_final, cols);
return 0;
}
|
2c26623d48f40a9672d8da3297c0e11a72ebb2fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Author: Zachariah Bryant
* Description: This is a class wrap for runnning SU(2) lattice qcd
* operations using CUDA.
*/
// ********************
// * Headers *
// ********************
#include "./Headers/LattiCuda.cuh"
#include "./Headers/LattiCuda_Device.cuh"
#include "./Headers/Complex.cuh"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
using namespace std;
// *****************************************
// * Global Variable Declarations *
// *****************************************
/**
* Constant Variables for device code
* @var d_size - Size of the lattice
* @var d_beta - Value of Beta
*/
__constant__ int d_size;
__constant__ double d_beta;
// *****************************
// * Kernal Functions *
// *****************************
/**
* Initializes the lattice to unit matrices.
* @param d_lattice - Array to lattice in device memory.
* @param tdim - Time dimension to initialize.
*/
__global__ void
gpu_Initialize(bach::complex<double> *d_lattice, int tdim){
LattiCuda_Device device(&d_size, &d_beta, d_lattice, tdim);
device.initialize();
};
/**
* Equilibrates the lattice.
* @param d_lattice - Pointer to lattice in device memory.
* @param tdim - Time dimension to equilibrate.
* @param dir - Direction to equilibrate.
*/
__global__ void
gpu_Equilibrate(bach::complex<double> *d_lattice, int tdim, int dir){
LattiCuda_Device device(&d_size, &d_beta, d_lattice, tdim);
device.equilibrate(dir);
};
/**
* Gets the sum of plaquettes for the lattice.
* @param d_lattice - Pointer to lattice in device memory.
* @param tdim - Time slice to look in.
* @param d_plaq - Array to hold sum of plaquettes unique
* for eaach lattice point.
* @param d_iter - Amount of plaquettes counted unique for each lattice point.
*/
__global__ void
gpu_AvgPlaquette(bach::complex<double> *d_lattice, int tdim, double *d_plaq, double *d_iter){
LattiCuda_Device device(&d_size, &d_beta, d_lattice, tdim);
device.avgPlaquette(d_plaq, d_iter);
};
/**
* Generates the sum of traces of two polykov loops
* @param d_lattice - Pointer to lattice in device memory.
* @param d_poly - Array holding the sum of the trace of two polykov loops.
* @param d_iter - Amount of traces calculated.
* @param dist - Distance of separation of the polykov loops.
*/
__global__ void
gpu_Polykov(bach::complex<double> *d_lattice, double *d_poly, double *d_iter, int dist){
//Create a gpu object with time slice set to zero
LattiCuda_Device device(&d_size, &d_beta, d_lattice, 0);
device.polykov(d_poly, d_iter, dist);
};
// *************************************
// * Private Member Functions *
// *************************************
/**
* Initializes all lattice links to unit matrix by envoking kernal.
*/
__host__ void
LattiCuda::initialize(){
int half = h_size/4;
dim3 in_Threads(4, 4, 4);
dim3 in_Blocks(half, half, half);
for(int t = 0; t < h_size; t++) {
hipLaunchKernelGGL(( gpu_Initialize), dim3(in_Blocks),dim3(in_Threads), 0, 0, d_lattice, t);
}
};
/**
* Returns 1D array location for linearized 4D lattice
* @param dim - Array with lattice dimension location t,x,y,z
* @param d - Direction to look in
* @param m - Matrix element for link
* @return - Int for array location
*/
__host__ int
LattiCuda::loc(int *dim, int d, int m){
int coor{0};
coor = dim[1] + dim[2]*(h_size) + dim[3]*(h_size)*(h_size) + dim[0]*(h_size)*(h_size)*(h_size)
+ d*(h_size)*(h_size)*(h_size)*(h_size) + m*(h_size)*(h_size)*(h_size)*(h_size)*(4);
return coor;
};
// ************************************
// * Public Member Functions *
// ************************************
/**
* Constructor for the Lattice QCD wrapper
* @param LattSize - Size of desired lattice
* @param inBeta - Beta value
*/
__host__
LattiCuda::LattiCuda(int LattSize, double inBeta){
//Construct Host Variables
h_size = LattSize;
h_beta = inBeta;
memsize = h_size*h_size*h_size*h_size*4*4;
//Create Host Lattice
h_lattice = new bach::complex<double>[memsize];
//Create Device Lattice
hipMalloc((void**)&d_lattice, memsize*sizeof(bach::complex<double>));
//Construct Constant Device Variables
hipMemcpyToSymbol(d_size, &h_size, sizeof(int));
hipMemcpyToSymbol(d_beta, &h_beta, sizeof(double));
//Initialize the lattice on creation
initialize();
};
/**
* Destructor for the Lattice QCD wrapper
*/
__host__
LattiCuda::~LattiCuda(){
delete[] h_lattice;
hipFree(d_lattice);
};
/**
* Equilibrates the lattice by envoking the gpu kernal.
*/
__host__ void
LattiCuda::equilibrate(){
int split = h_size/4;
//Dimensions for the kernal
dim3 Threads(4, 4, 4);
dim3 Blocks(split, split, split);
//All directions need to updated independently
for(int d = 0; d < 4; d++) {
//Checkerboard pattern for T dimension
for(int offset = 0; offset <= 1; offset++) {
for(int tdim = 0; tdim < h_size/2; tdim++) {
hipLaunchKernelGGL(( gpu_Equilibrate), dim3(Blocks), dim3(Threads), 0, 0, d_lattice, ((tdim)*2 + offset), d);
}
hipDeviceSynchronize();
}
}
};
/**
* Generates the value of the average plaquette for the lattice.
* @return double
*/
__host__ double
LattiCuda::avgPlaquette(){
int split = h_size/4;
//Dimensions for the kernal
dim3 Threads(4, 4, 4);
dim3 Blocks(split, split, split);
//Array to hold total avg plaquett per thread and total amount of iterations
double *h_plaq;
double *h_iter;
h_plaq = new double[h_size*h_size*h_size*h_size];
h_iter = new double[h_size*h_size*h_size*h_size];
double *d_plaq;
double *d_iter;
hipMalloc((void**)&d_plaq, sizeof(double)*h_size*h_size*h_size*h_size);
hipMalloc((void**)&d_iter, sizeof(double)*h_size*h_size*h_size*h_size);
//Run on gpu for each time slice
for(int tdim = 0; tdim < h_size; tdim++) {
hipLaunchKernelGGL(( gpu_AvgPlaquette), dim3(Blocks), dim3(Threads), 0, 0,
d_lattice, tdim, d_plaq, d_iter);
}
hipDeviceSynchronize();
//Copy results from gpu
hipMemcpy(h_plaq, d_plaq, sizeof(double)*h_size*h_size*h_size*h_size, hipMemcpyDeviceToHost);
hipMemcpy(h_iter, d_iter, sizeof(double)*h_size*h_size*h_size*h_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//Evaluate results
double totplaq{0};
double totiter{0};
for(int i = 0; i < h_size*h_size*h_size*h_size; i++) {
//cout << i << " "<< h_plaq[i] << "\n";
totplaq += h_plaq[i];
totiter += h_iter[i];
}
hipFree(d_plaq);
hipFree(d_iter);
delete[] h_plaq;
delete[] h_iter;
return (1 - totplaq/totiter);
};
/**
* Calculates the expectation value of the trace of two polykov loops
* @param dist - Distance of loops
* @return double
*/
__host__ double
LattiCuda::polykov(int dist){
int split = h_size/4;
//Dimensions for the kernal
dim3 Threads(4, 4, 4);
dim3 Blocks(split, split, split);
//Array to hold total avg plaquett per thread and total amount of iterations
double *h_poly;
double *h_iter;
h_poly = new double[h_size*h_size*h_size];
h_iter = new double[h_size*h_size*h_size];
double *d_poly;
double *d_iter;
//Allocate GPU memory
hipMalloc((void**)&d_poly, sizeof(double)*h_size*h_size*h_size);
hipMalloc((void**)&d_iter, sizeof(double)*h_size*h_size*h_size);
//Run on GPU (Only 0 time slice)
hipLaunchKernelGGL(( gpu_Polykov), dim3(Blocks), dim3(Threads), 0, 0, d_lattice, d_poly, d_iter, dist);
hipDeviceSynchronize();
//Copy results from gpu
hipMemcpy(h_poly, d_poly, sizeof(double)*h_size*h_size*h_size, hipMemcpyDeviceToHost);
hipMemcpy(h_iter, d_iter, sizeof(double)*h_size*h_size*h_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//Evaluate results
double totpoly{0};
double totiter{0};
for(int i = 0; i < h_size*h_size*h_size; i++) {
//cout << i << " "<< h_plaq[i] << "\n";
totpoly += h_poly[i];
totiter += h_iter[i];
}
hipFree(d_poly);
hipFree(d_iter);
delete[] h_poly;
delete[] h_iter;
return totpoly/totiter;
};
/**
* Saves the lattice configuration to a file.
*/
__host__ void
LattiCuda::save(){
printf("Saving Lattice Configuration......\n");
//Copy device lattice to host lattice
hipMemcpy(h_lattice, d_lattice, memsize*sizeof(bach::complex<double>), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//File to write to
fstream File1;
File1.open("../Data/LatticeConfig.dat", ios::out | ios::trunc);
int pos[4] = {0,0,0,0};
for( pos[0] = 0; pos[0] < h_size; pos[0]++) { // T dimension
for( pos[1] = 0; pos[1] < h_size; pos[1]++) { // X dimension
for( pos[2] = 0; pos[2] < h_size; pos[2]++) { // Y dimension
for( pos[3] = 0; pos[3] < h_size; pos[3]++) { // Z dimension
for(int d = 0; d < 4; d++) { // direction
File1 << h_lattice[loc(pos, d, 0)].real() << " " << h_lattice[loc(pos, d, 0)].imag()
<< " " << h_lattice[loc(pos, d, 1)].real() << " " << h_lattice[loc(pos, d, 1)].imag() << endl;
}
}
}
}
}
File1.close();
printf("Done Saving.\n");
};
/**
* Loads a lattice configuration from a file
*/
__host__ void
LattiCuda::load(){
printf("Loading Lattice Configuration.......\n");
fstream File;
File.open("../Data/LatticeConfig.dat", ios::in );
double real, imag;
bool test = File.is_open();
if(test) {
int pos[4] = {0,0,0,0};
for( pos[0] = 0; pos[0] < h_size; pos[0]++) { // T dimension
for( pos[1] = 0; pos[1] < h_size; pos[1]++) { // X dimension
for( pos[2] = 0; pos[2] < h_size; pos[2]++) { // Y dimension
for( pos[3] = 0; pos[3] < h_size; pos[3]++) { // Z dimension
for(int d = 0; d < 4; d++) { // direction
File >> real;
File >> imag;
h_lattice[loc(pos, d, 0)] = bach::complex<double>(real, imag);
h_lattice[loc(pos, d, 3)] = bach::complex<double>(real, (-1)*imag);
File >> real;
File >> imag;
h_lattice[loc(pos, d, 1)] = bach::complex<double>(real, imag);
h_lattice[loc(pos, d, 2)] = bach::complex<double>((-1)*real, imag);
}
}
}
}
}
}
File.close();
//Copy host lattice to device lattice
hipMemcpy(d_lattice, h_lattice, memsize*sizeof(bach::complex<double>), hipMemcpyHostToDevice);
hipDeviceSynchronize();
printf("Done Loading.\n");
};
| 2c26623d48f40a9672d8da3297c0e11a72ebb2fb.cu | /**
* Author: Zachariah Bryant
* Description: This is a class wrap for runnning SU(2) lattice qcd
* operations using CUDA.
*/
// ********************
// * Headers *
// ********************
#include "./Headers/LattiCuda.cuh"
#include "./Headers/LattiCuda_Device.cuh"
#include "./Headers/Complex.cuh"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
using namespace std;
// *****************************************
// * Global Variable Declarations *
// *****************************************
/**
* Constant Variables for device code
* @var d_size - Size of the lattice
* @var d_beta - Value of Beta
*/
__constant__ int d_size;
__constant__ double d_beta;
// *****************************
// * Kernal Functions *
// *****************************
/**
* Initializes the lattice to unit matrices.
* @param d_lattice - Array to lattice in device memory.
* @param tdim - Time dimension to initialize.
*/
__global__ void
gpu_Initialize(bach::complex<double> *d_lattice, int tdim){
LattiCuda_Device device(&d_size, &d_beta, d_lattice, tdim);
device.initialize();
};
/**
* Equilibrates the lattice.
* @param d_lattice - Pointer to lattice in device memory.
* @param tdim - Time dimension to equilibrate.
* @param dir - Direction to equilibrate.
*/
__global__ void
gpu_Equilibrate(bach::complex<double> *d_lattice, int tdim, int dir){
LattiCuda_Device device(&d_size, &d_beta, d_lattice, tdim);
device.equilibrate(dir);
};
/**
* Gets the sum of plaquettes for the lattice.
* @param d_lattice - Pointer to lattice in device memory.
* @param tdim - Time slice to look in.
* @param d_plaq - Array to hold sum of plaquettes unique
* for eaach lattice point.
* @param d_iter - Amount of plaquettes counted unique for each lattice point.
*/
__global__ void
gpu_AvgPlaquette(bach::complex<double> *d_lattice, int tdim, double *d_plaq, double *d_iter){
LattiCuda_Device device(&d_size, &d_beta, d_lattice, tdim);
device.avgPlaquette(d_plaq, d_iter);
};
/**
* Generates the sum of traces of two polykov loops
* @param d_lattice - Pointer to lattice in device memory.
* @param d_poly - Array holding the sum of the trace of two polykov loops.
* @param d_iter - Amount of traces calculated.
* @param dist - Distance of separation of the polykov loops.
*/
__global__ void
gpu_Polykov(bach::complex<double> *d_lattice, double *d_poly, double *d_iter, int dist){
//Create a gpu object with time slice set to zero
LattiCuda_Device device(&d_size, &d_beta, d_lattice, 0);
device.polykov(d_poly, d_iter, dist);
};
// *************************************
// * Private Member Functions *
// *************************************
/**
* Initializes all lattice links to unit matrix by envoking kernal.
*/
__host__ void
LattiCuda::initialize(){
int half = h_size/4;
dim3 in_Threads(4, 4, 4);
dim3 in_Blocks(half, half, half);
for(int t = 0; t < h_size; t++) {
gpu_Initialize<<<in_Blocks,in_Threads>>>(d_lattice, t);
}
};
/**
* Returns 1D array location for linearized 4D lattice
* @param dim - Array with lattice dimension location t,x,y,z
* @param d - Direction to look in
* @param m - Matrix element for link
* @return - Int for array location
*/
__host__ int
LattiCuda::loc(int *dim, int d, int m){
int coor{0};
coor = dim[1] + dim[2]*(h_size) + dim[3]*(h_size)*(h_size) + dim[0]*(h_size)*(h_size)*(h_size)
+ d*(h_size)*(h_size)*(h_size)*(h_size) + m*(h_size)*(h_size)*(h_size)*(h_size)*(4);
return coor;
};
// ************************************
// * Public Member Functions *
// ************************************
/**
* Constructor for the Lattice QCD wrapper
* @param LattSize - Size of desired lattice
* @param inBeta - Beta value
*/
__host__
LattiCuda::LattiCuda(int LattSize, double inBeta){
//Construct Host Variables
h_size = LattSize;
h_beta = inBeta;
memsize = h_size*h_size*h_size*h_size*4*4;
//Create Host Lattice
h_lattice = new bach::complex<double>[memsize];
//Create Device Lattice
cudaMalloc((void**)&d_lattice, memsize*sizeof(bach::complex<double>));
//Construct Constant Device Variables
cudaMemcpyToSymbol(d_size, &h_size, sizeof(int));
cudaMemcpyToSymbol(d_beta, &h_beta, sizeof(double));
//Initialize the lattice on creation
initialize();
};
/**
* Destructor for the Lattice QCD wrapper
*/
__host__
LattiCuda::~LattiCuda(){
delete[] h_lattice;
cudaFree(d_lattice);
};
/**
* Equilibrates the lattice by envoking the gpu kernal.
*/
__host__ void
LattiCuda::equilibrate(){
int split = h_size/4;
//Dimensions for the kernal
dim3 Threads(4, 4, 4);
dim3 Blocks(split, split, split);
//All directions need to updated independently
for(int d = 0; d < 4; d++) {
//Checkerboard pattern for T dimension
for(int offset = 0; offset <= 1; offset++) {
for(int tdim = 0; tdim < h_size/2; tdim++) {
gpu_Equilibrate<<<Blocks, Threads>>>(d_lattice, ((tdim)*2 + offset), d);
}
cudaDeviceSynchronize();
}
}
};
/**
* Generates the value of the average plaquette for the lattice.
* @return double
*/
__host__ double
LattiCuda::avgPlaquette(){
int split = h_size/4;
//Dimensions for the kernal
dim3 Threads(4, 4, 4);
dim3 Blocks(split, split, split);
//Array to hold total avg plaquett per thread and total amount of iterations
double *h_plaq;
double *h_iter;
h_plaq = new double[h_size*h_size*h_size*h_size];
h_iter = new double[h_size*h_size*h_size*h_size];
double *d_plaq;
double *d_iter;
cudaMalloc((void**)&d_plaq, sizeof(double)*h_size*h_size*h_size*h_size);
cudaMalloc((void**)&d_iter, sizeof(double)*h_size*h_size*h_size*h_size);
//Run on gpu for each time slice
for(int tdim = 0; tdim < h_size; tdim++) {
gpu_AvgPlaquette<<<Blocks, Threads>>>
(d_lattice, tdim, d_plaq, d_iter);
}
cudaDeviceSynchronize();
//Copy results from gpu
cudaMemcpy(h_plaq, d_plaq, sizeof(double)*h_size*h_size*h_size*h_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_iter, d_iter, sizeof(double)*h_size*h_size*h_size*h_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//Evaluate results
double totplaq{0};
double totiter{0};
for(int i = 0; i < h_size*h_size*h_size*h_size; i++) {
//cout << i << " "<< h_plaq[i] << "\n";
totplaq += h_plaq[i];
totiter += h_iter[i];
}
cudaFree(d_plaq);
cudaFree(d_iter);
delete[] h_plaq;
delete[] h_iter;
return (1 - totplaq/totiter);
};
/**
* Calculates the expectation value of the trace of two polykov loops
* @param dist - Distance of loops
* @return double
*/
__host__ double
LattiCuda::polykov(int dist){
int split = h_size/4;
//Dimensions for the kernal
dim3 Threads(4, 4, 4);
dim3 Blocks(split, split, split);
//Array to hold total avg plaquett per thread and total amount of iterations
double *h_poly;
double *h_iter;
h_poly = new double[h_size*h_size*h_size];
h_iter = new double[h_size*h_size*h_size];
double *d_poly;
double *d_iter;
//Allocate GPU memory
cudaMalloc((void**)&d_poly, sizeof(double)*h_size*h_size*h_size);
cudaMalloc((void**)&d_iter, sizeof(double)*h_size*h_size*h_size);
//Run on GPU (Only 0 time slice)
gpu_Polykov<<<Blocks, Threads>>>(d_lattice, d_poly, d_iter, dist);
cudaDeviceSynchronize();
//Copy results from gpu
cudaMemcpy(h_poly, d_poly, sizeof(double)*h_size*h_size*h_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_iter, d_iter, sizeof(double)*h_size*h_size*h_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//Evaluate results
double totpoly{0};
double totiter{0};
for(int i = 0; i < h_size*h_size*h_size; i++) {
//cout << i << " "<< h_plaq[i] << "\n";
totpoly += h_poly[i];
totiter += h_iter[i];
}
cudaFree(d_poly);
cudaFree(d_iter);
delete[] h_poly;
delete[] h_iter;
return totpoly/totiter;
};
/**
* Saves the lattice configuration to a file.
*/
__host__ void
LattiCuda::save(){
printf("Saving Lattice Configuration......\n");
//Copy device lattice to host lattice
cudaMemcpy(h_lattice, d_lattice, memsize*sizeof(bach::complex<double>), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//File to write to
fstream File1;
File1.open("../Data/LatticeConfig.dat", ios::out | ios::trunc);
int pos[4] = {0,0,0,0};
for( pos[0] = 0; pos[0] < h_size; pos[0]++) { // T dimension
for( pos[1] = 0; pos[1] < h_size; pos[1]++) { // X dimension
for( pos[2] = 0; pos[2] < h_size; pos[2]++) { // Y dimension
for( pos[3] = 0; pos[3] < h_size; pos[3]++) { // Z dimension
for(int d = 0; d < 4; d++) { // direction
File1 << h_lattice[loc(pos, d, 0)].real() << " " << h_lattice[loc(pos, d, 0)].imag()
<< " " << h_lattice[loc(pos, d, 1)].real() << " " << h_lattice[loc(pos, d, 1)].imag() << endl;
}
}
}
}
}
File1.close();
printf("Done Saving.\n");
};
/**
* Loads a lattice configuration from a file
*/
__host__ void
LattiCuda::load(){
printf("Loading Lattice Configuration.......\n");
fstream File;
File.open("../Data/LatticeConfig.dat", ios::in );
double real, imag;
bool test = File.is_open();
if(test) {
int pos[4] = {0,0,0,0};
for( pos[0] = 0; pos[0] < h_size; pos[0]++) { // T dimension
for( pos[1] = 0; pos[1] < h_size; pos[1]++) { // X dimension
for( pos[2] = 0; pos[2] < h_size; pos[2]++) { // Y dimension
for( pos[3] = 0; pos[3] < h_size; pos[3]++) { // Z dimension
for(int d = 0; d < 4; d++) { // direction
File >> real;
File >> imag;
h_lattice[loc(pos, d, 0)] = bach::complex<double>(real, imag);
h_lattice[loc(pos, d, 3)] = bach::complex<double>(real, (-1)*imag);
File >> real;
File >> imag;
h_lattice[loc(pos, d, 1)] = bach::complex<double>(real, imag);
h_lattice[loc(pos, d, 2)] = bach::complex<double>((-1)*real, imag);
}
}
}
}
}
}
File.close();
//Copy host lattice to device lattice
cudaMemcpy(d_lattice, h_lattice, memsize*sizeof(bach::complex<double>), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
printf("Done Loading.\n");
};
|
bac7e095a58122ae49e8bbbf7041b39e47a8c9cb.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <iostream>
#include <chrono>
#include <fstream>
using namespace std::chrono;
static std::ofstream output;
// static ofstream file_1D2D = ofstream("file_1D2D.csv", std::iostream::out);
// static ofstream file_2D2D = ofstream("file_2D2D.csv", std::iostream::out);
// static ofstream file_CPU = ofstream("file_CPU.csv", std::iostream::out);
int MSBsqrt(const int n)
{
int sn = sqrt(n);
if( 0 != n*n%sn )
{
int counter = 1;
int psn = sn;
while(psn)
{
psn = (psn >> 1);
counter++;
}
counter--;
sn = 1;
sn = (sn << counter);
}
return sn;
}
template<class T>
__global__ void printThreadIndex2D(const T* MatA, const int nx, const int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy*nx + ix;
printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d)"
"global index %2d ival %2f\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, MatA[idx]);
}
template<class T>
__device__ void printThreadIndex1D(const T* MatC, const int ny, const int ntotal)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
int ix = idx%ny;
int iy = idx/ny;
if(idx < ntotal)
{
printf("thread_id (%d) block_id (%d) coordinate (%d,%d)"
"global index %2d ival %.2f\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, MatC[idx]);
}
}
template<class T>
__global__ void matrixAdd_1D1D(const float* MatA, const float* MatB, float* MatC, const int ny, const int ntotal)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
// int ix = idx%ny;
// int iy = idx/ny;
if(idx < ntotal)
{
MatC[idx] = MatA[idx] + MatB[idx];
#if 0
printf("grid_dim (%d, %d, %d), block_dim (%d, %d, %d)\n", gridDim.x, gridDim.y, gridDim.z,blockDim.x, blockDim.y, blockDim.z);
printf("block_dim (%d) thread_id (%d) block_id (%d) coordinate (%d,%d)"
"global index %2d ival %.2f\n", blockDim.x, threadIdx.x, blockIdx.x, ix, iy, idx, MatC[idx]);
#endif
}
}
template<class T>
__global__ void matrixAdd_2D2D(const T* MatA, const T* MatB, T* MatC, const int nx, const int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy*ny + ix;
if(ix < nx && iy < ny)
{
MatC[idx] = MatA[idx] + MatB[idx];
#if 0
printf("matC=%.2f, matA=%.2f, matB=%.2f\n", MatC[idx], MatA[idx], MatB[idx]);
printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d)"
"global index %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, MatC[idx]);
#endif
}
}
template<class T>
__global__ void matrixAdd_2D1D(const T* MatA, const T* MatB, T* MatC, const int nx, const int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y*gridDim.x*blockDim.x;
unsigned int idx = iy + ix;
if( idx < nx*ny )
{
MatC[idx] = MatA[idx] + MatB[idx];
#if 0
printf("matC=%.2f, matA=%.2f, matB=%.2f\n", MatC[idx], MatA[idx], MatB[idx]);
printf("thread_id (%d) block_id (%d,%d) coordinate (%d,%d)"
"global index %2d ival %.2f\n", threadIdx.x, blockIdx.x, blockIdx.y, ix, iy, idx, MatC[idx]);
#endif
}
}
float* allocateFloatMatrix(const int nx, const int ny)
{
float* handle = (float*)malloc(nx * ny * sizeof(float));
return handle;
}
template<class T>
void fillMatrixRandom(T* matrix, const int nx, const int ny)
{
for(int j = 0; j < nx; j++)
{
for(int i = 0; i < ny; i++)
{
matrix[j * nx + i] = rand()/(float)RAND_MAX;
}
}
}
template<>
void fillMatrixRandom<int>(int* matrix, const int nx, const int ny)
{
for(int j = 0; j < nx; j++)
{
for(int i = 0; i < ny; i++)
{
matrix[j * nx + i] = rand()%100;
}
}
}
void exec_1D1D(const int nx, const int ny)
{
hipError_t err = hipSuccess;
size_t sizeOfAllocationOnGraphicsCard = nx*ny*sizeof(float);
float* host_matrixA = allocateFloatMatrix(nx, ny);
float* host_matrixB = allocateFloatMatrix(nx, ny);
float* host_matrixC = allocateFloatMatrix(nx, ny);
if (host_matrixA == NULL || host_matrixB == NULL || host_matrixC == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
//device allocation
float* dev_matrixA, *dev_matrixB, *dev_matrixC;
if( hipSuccess != (err = hipMalloc((void**)&dev_matrixA, sizeOfAllocationOnGraphicsCard)) ||
hipSuccess != (err = hipMalloc((void**)&dev_matrixB, sizeOfAllocationOnGraphicsCard)) ||
hipSuccess != (err = hipMalloc((void**)&dev_matrixC, sizeOfAllocationOnGraphicsCard)))
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//filling matrices with random values
printf("Fill matrices\n");
fillMatrixRandom(host_matrixA, nx, ny);
fillMatrixRandom(host_matrixB, nx, ny);
fillMatrixRandom(host_matrixC, nx, ny);
// Error code to check return values for CUDA calls
//copying data on graphics card
if( hipSuccess != (err = hipMemcpy(dev_matrixA, host_matrixA, sizeOfAllocationOnGraphicsCard, hipMemcpyHostToDevice)) ||
hipSuccess != (err = hipMemcpy(dev_matrixB, host_matrixB, sizeOfAllocationOnGraphicsCard, hipMemcpyHostToDevice)))
{
fprintf(stderr, "Failed to copy matrix from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the matrixAdd CUDA Kernel
int blocksPerGrid = 1;
int threadsPerBlock = nx * ny;
dim3 threadsInBlock(threadsPerBlock);
printf("%d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
auto start = high_resolution_clock::now();
hipLaunchKernelGGL(( matrixAdd_1D1D<float>), dim3(blocksPerGrid), dim3(threadsInBlock), 0, 0, dev_matrixA, dev_matrixB, dev_matrixC, ny, nx*ny);
auto stop = high_resolution_clock::now();
auto durationOnCUDA = duration_cast<microseconds>(stop - start);
output << durationOnCUDA.count();
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "%s: Failed to launch matrixAdd kernel (error code %s)!\n", __func__, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(host_matrixC, dev_matrixC, sizeOfAllocationOnGraphicsCard, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < nx * ny; ++i)
{
if (fabs(host_matrixA[i] + host_matrixB[i] - host_matrixC[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
// Free host memory
free(host_matrixA);
free(host_matrixB);
free(host_matrixC);
printf("Done\n");
}
void exec_1D2D(const int nx, const int ny)
{
hipError_t err = hipSuccess;
size_t sizeOfAllocationOnGraphicsCard = nx*ny*sizeof(float);
float* host_matrixA = allocateFloatMatrix(nx, ny);
float* host_matrixB = allocateFloatMatrix(nx, ny);
float* host_matrixC = allocateFloatMatrix(nx, ny);
if (host_matrixA == NULL || host_matrixB == NULL || host_matrixC == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
//device allocation
float* dev_matrixA, *dev_matrixB, *dev_matrixC;
if( hipSuccess != (err = hipMalloc((void**)&dev_matrixA, sizeOfAllocationOnGraphicsCard)) ||
hipSuccess != (err = hipMalloc((void**)&dev_matrixB, sizeOfAllocationOnGraphicsCard)) ||
hipSuccess != (err = hipMalloc((void**)&dev_matrixC, sizeOfAllocationOnGraphicsCard)))
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//filling matrices with random values
printf("Fill matrices\n");
fillMatrixRandom(host_matrixA, nx, ny);
fillMatrixRandom(host_matrixB, nx, ny);
fillMatrixRandom(host_matrixC, nx, ny);
// Error code to check return values for CUDA calls
//copying data on graphics card
if( hipSuccess != (err = hipMemcpy(dev_matrixA, host_matrixA, sizeOfAllocationOnGraphicsCard, hipMemcpyHostToDevice)) ||
hipSuccess != (err = hipMemcpy(dev_matrixB, host_matrixB, sizeOfAllocationOnGraphicsCard, hipMemcpyHostToDevice)))
{
fprintf(stderr, "Failed to copy matrix from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the matrixAdd CUDA Kernel
int snx = MSBsqrt(nx);
int sny = MSBsqrt(ny);
dim3 blocksPerGrid(snx, sny);
//int threadsPerBlock = nx * ny;
dim3 threadsInBlock((nx*ny)/(snx*sny));
printf("(%d,%d) blocks of (%d) threads\n", blocksPerGrid.x, blocksPerGrid.y, threadsInBlock.x);
auto start = high_resolution_clock::now();
hipLaunchKernelGGL(( matrixAdd_2D1D<float>), dim3(blocksPerGrid), dim3(threadsInBlock), 0, 0, dev_matrixA, dev_matrixB, dev_matrixC, nx, ny);
auto stop = high_resolution_clock::now();
auto durationOnCUDA = duration_cast<microseconds>(stop - start);
output << durationOnCUDA.count();
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "%s: Failed to launch matrixAdd kernel (error code %s)!\n", __func__, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(host_matrixC, dev_matrixC, sizeOfAllocationOnGraphicsCard, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < nx * ny; ++i)
{
if (fabs(host_matrixA[i] + host_matrixB[i] - host_matrixC[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
// Free host memory
free(host_matrixA);
free(host_matrixB);
free(host_matrixC);
printf("Done\n");
}
void exec_2D2D(const int nx, const int ny)
{
hipError_t err = hipSuccess;
size_t sizeOfAllocationOnGraphicsCard = nx*ny*sizeof(float);
float* host_matrixA = allocateFloatMatrix(nx, ny);
float* host_matrixB = allocateFloatMatrix(nx, ny);
float* host_matrixC = allocateFloatMatrix(nx, ny);
if (host_matrixA == NULL || host_matrixB == NULL || host_matrixC == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
//device allocation
float* dev_matrixA, *dev_matrixB, *dev_matrixC;
if( hipSuccess != (err = hipMalloc((void**)&dev_matrixA, sizeOfAllocationOnGraphicsCard)) ||
hipSuccess != (err = hipMalloc((void**)&dev_matrixB, sizeOfAllocationOnGraphicsCard)) ||
hipSuccess != (err = hipMalloc((void**)&dev_matrixC, sizeOfAllocationOnGraphicsCard)))
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//filling matrices with random values
printf("Fill matrices\n");
fillMatrixRandom(host_matrixA, nx, ny);
fillMatrixRandom(host_matrixB, nx, ny);
fillMatrixRandom(host_matrixC, nx, ny);
// Error code to check return values for CUDA calls
//copying data on graphics card
if( hipSuccess != (err = hipMemcpy(dev_matrixA, host_matrixA, sizeOfAllocationOnGraphicsCard, hipMemcpyHostToDevice)) ||
hipSuccess != (err = hipMemcpy(dev_matrixB, host_matrixB, sizeOfAllocationOnGraphicsCard, hipMemcpyHostToDevice)))
{
fprintf(stderr, "%s: Failed to copy matrix from host to device (error code %s)!\n", __func__, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the matrixAdd CUDA Kernel
int snx = MSBsqrt(nx);
int sny = MSBsqrt(ny);
dim3 blocksPerGrid(snx, sny);
//int threadsPerBlock = nx * ny;
dim3 threadsInBlock((nx/snx), (ny/sny));
printf("(%d,%d) blocks of (%d,%d) threads\n", blocksPerGrid.x, blocksPerGrid.y, threadsInBlock.x, threadsInBlock.y);
auto start = high_resolution_clock::now();
hipLaunchKernelGGL(( matrixAdd_2D2D<float>), dim3(blocksPerGrid), dim3(threadsInBlock), 0, 0, dev_matrixA, dev_matrixB, dev_matrixC, ny, ny);
auto stop = high_resolution_clock::now();
auto durationOnCUDA = duration_cast<microseconds>(stop - start);
output << durationOnCUDA.count();
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "%s: Failed to launch matrixAdd kernel (error code %s)!\n", __func__, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(host_matrixC, dev_matrixC, sizeOfAllocationOnGraphicsCard, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < nx * ny; ++i)
{
if (fabs(host_matrixA[i] + host_matrixB[i] - host_matrixC[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
// Free host memory
free(host_matrixA);
free(host_matrixB);
free(host_matrixC);
printf("Done\n");
}
void exec_CPU(const int nx, const int ny)
{
float* host_matrixA = allocateFloatMatrix(nx, ny);
float* host_matrixB = allocateFloatMatrix(nx, ny);
float* host_matrixC = allocateFloatMatrix(nx, ny);
if (host_matrixA == NULL || host_matrixB == NULL || host_matrixC == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
//filling matrices with random values
printf("Fill matrices\n");
fillMatrixRandom(host_matrixA, nx, ny);
fillMatrixRandom(host_matrixB, nx, ny);
fillMatrixRandom(host_matrixC, nx, ny);
auto start = high_resolution_clock::now();
for(int x = 0; x < nx; x++)
{
for(int y = 0; y < ny; y++)
{
host_matrixC[x+y*ny] = host_matrixA[x+y*ny] + host_matrixB[x+y*ny];
}
}
auto stop = high_resolution_clock::now();
auto durationOnCUDA = duration_cast<microseconds>(stop - start);
output << durationOnCUDA.count();
}
int main(void)
{
int filteredNum = 0;
output.open("output.csv");
// twelve iterations
for(int numElements = 0x2; 0x0 == (filteredNum = numElements & 0xFFFFF800) ; numElements = (numElements << 1))
{
const int nx = numElements;
const int ny = numElements;
output << numElements << ",";
printf("Case elements: %d\n", numElements*numElements);
if(numElements <= 32) //that will make 1024 threds in single block, which is maximum
exec_1D1D(ny, nx);
output << ",";
exec_1D2D(ny, nx);
output << ",";
exec_2D2D(ny, nx);
output << ",";
exec_CPU(ny, nx);
output << std::endl;
}
output.close();
// file_1D2D.close();
// file_2D2D.close();
// file_CPU.close();
return 0;
}
| bac7e095a58122ae49e8bbbf7041b39e47a8c9cb.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <iostream>
#include <chrono>
#include <fstream>
using namespace std::chrono;
static std::ofstream output;
// static ofstream file_1D2D = ofstream("file_1D2D.csv", std::iostream::out);
// static ofstream file_2D2D = ofstream("file_2D2D.csv", std::iostream::out);
// static ofstream file_CPU = ofstream("file_CPU.csv", std::iostream::out);
int MSBsqrt(const int n)
{
int sn = sqrt(n);
if( 0 != n*n%sn )
{
int counter = 1;
int psn = sn;
while(psn)
{
psn = (psn >> 1);
counter++;
}
counter--;
sn = 1;
sn = (sn << counter);
}
return sn;
}
template<class T>
__global__ void printThreadIndex2D(const T* MatA, const int nx, const int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy*nx + ix;
printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d)"
"global index %2d ival %2f\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, MatA[idx]);
}
template<class T>
__device__ void printThreadIndex1D(const T* MatC, const int ny, const int ntotal)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
int ix = idx%ny;
int iy = idx/ny;
if(idx < ntotal)
{
printf("thread_id (%d) block_id (%d) coordinate (%d,%d)"
"global index %2d ival %.2f\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, MatC[idx]);
}
}
template<class T>
__global__ void matrixAdd_1D1D(const float* MatA, const float* MatB, float* MatC, const int ny, const int ntotal)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
// int ix = idx%ny;
// int iy = idx/ny;
if(idx < ntotal)
{
MatC[idx] = MatA[idx] + MatB[idx];
#if 0
printf("grid_dim (%d, %d, %d), block_dim (%d, %d, %d)\n", gridDim.x, gridDim.y, gridDim.z,blockDim.x, blockDim.y, blockDim.z);
printf("block_dim (%d) thread_id (%d) block_id (%d) coordinate (%d,%d)"
"global index %2d ival %.2f\n", blockDim.x, threadIdx.x, blockIdx.x, ix, iy, idx, MatC[idx]);
#endif
}
}
template<class T>
__global__ void matrixAdd_2D2D(const T* MatA, const T* MatB, T* MatC, const int nx, const int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy*ny + ix;
if(ix < nx && iy < ny)
{
MatC[idx] = MatA[idx] + MatB[idx];
#if 0
printf("matC=%.2f, matA=%.2f, matB=%.2f\n", MatC[idx], MatA[idx], MatB[idx]);
printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d)"
"global index %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, MatC[idx]);
#endif
}
}
template<class T>
__global__ void matrixAdd_2D1D(const T* MatA, const T* MatB, T* MatC, const int nx, const int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y*gridDim.x*blockDim.x;
unsigned int idx = iy + ix;
if( idx < nx*ny )
{
MatC[idx] = MatA[idx] + MatB[idx];
#if 0
printf("matC=%.2f, matA=%.2f, matB=%.2f\n", MatC[idx], MatA[idx], MatB[idx]);
printf("thread_id (%d) block_id (%d,%d) coordinate (%d,%d)"
"global index %2d ival %.2f\n", threadIdx.x, blockIdx.x, blockIdx.y, ix, iy, idx, MatC[idx]);
#endif
}
}
float* allocateFloatMatrix(const int nx, const int ny)
{
float* handle = (float*)malloc(nx * ny * sizeof(float));
return handle;
}
template<class T>
void fillMatrixRandom(T* matrix, const int nx, const int ny)
{
for(int j = 0; j < nx; j++)
{
for(int i = 0; i < ny; i++)
{
matrix[j * nx + i] = rand()/(float)RAND_MAX;
}
}
}
template<>
void fillMatrixRandom<int>(int* matrix, const int nx, const int ny)
{
for(int j = 0; j < nx; j++)
{
for(int i = 0; i < ny; i++)
{
matrix[j * nx + i] = rand()%100;
}
}
}
void exec_1D1D(const int nx, const int ny)
{
cudaError_t err = cudaSuccess;
size_t sizeOfAllocationOnGraphicsCard = nx*ny*sizeof(float);
float* host_matrixA = allocateFloatMatrix(nx, ny);
float* host_matrixB = allocateFloatMatrix(nx, ny);
float* host_matrixC = allocateFloatMatrix(nx, ny);
if (host_matrixA == NULL || host_matrixB == NULL || host_matrixC == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
//device allocation
float* dev_matrixA, *dev_matrixB, *dev_matrixC;
if( cudaSuccess != (err = cudaMalloc((void**)&dev_matrixA, sizeOfAllocationOnGraphicsCard)) ||
cudaSuccess != (err = cudaMalloc((void**)&dev_matrixB, sizeOfAllocationOnGraphicsCard)) ||
cudaSuccess != (err = cudaMalloc((void**)&dev_matrixC, sizeOfAllocationOnGraphicsCard)))
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//filling matrices with random values
printf("Fill matrices\n");
fillMatrixRandom(host_matrixA, nx, ny);
fillMatrixRandom(host_matrixB, nx, ny);
fillMatrixRandom(host_matrixC, nx, ny);
// Error code to check return values for CUDA calls
//copying data on graphics card
if( cudaSuccess != (err = cudaMemcpy(dev_matrixA, host_matrixA, sizeOfAllocationOnGraphicsCard, cudaMemcpyHostToDevice)) ||
cudaSuccess != (err = cudaMemcpy(dev_matrixB, host_matrixB, sizeOfAllocationOnGraphicsCard, cudaMemcpyHostToDevice)))
{
fprintf(stderr, "Failed to copy matrix from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the matrixAdd CUDA Kernel
int blocksPerGrid = 1;
int threadsPerBlock = nx * ny;
dim3 threadsInBlock(threadsPerBlock);
printf("%d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
auto start = high_resolution_clock::now();
matrixAdd_1D1D<float><<<blocksPerGrid, threadsInBlock>>>(dev_matrixA, dev_matrixB, dev_matrixC, ny, nx*ny);
auto stop = high_resolution_clock::now();
auto durationOnCUDA = duration_cast<microseconds>(stop - start);
output << durationOnCUDA.count();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "%s: Failed to launch matrixAdd kernel (error code %s)!\n", __func__, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(host_matrixC, dev_matrixC, sizeOfAllocationOnGraphicsCard, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < nx * ny; ++i)
{
if (fabs(host_matrixA[i] + host_matrixB[i] - host_matrixC[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
// Free host memory
free(host_matrixA);
free(host_matrixB);
free(host_matrixC);
printf("Done\n");
}
void exec_1D2D(const int nx, const int ny)
{
cudaError_t err = cudaSuccess;
size_t sizeOfAllocationOnGraphicsCard = nx*ny*sizeof(float);
float* host_matrixA = allocateFloatMatrix(nx, ny);
float* host_matrixB = allocateFloatMatrix(nx, ny);
float* host_matrixC = allocateFloatMatrix(nx, ny);
if (host_matrixA == NULL || host_matrixB == NULL || host_matrixC == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
//device allocation
float* dev_matrixA, *dev_matrixB, *dev_matrixC;
if( cudaSuccess != (err = cudaMalloc((void**)&dev_matrixA, sizeOfAllocationOnGraphicsCard)) ||
cudaSuccess != (err = cudaMalloc((void**)&dev_matrixB, sizeOfAllocationOnGraphicsCard)) ||
cudaSuccess != (err = cudaMalloc((void**)&dev_matrixC, sizeOfAllocationOnGraphicsCard)))
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//filling matrices with random values
printf("Fill matrices\n");
fillMatrixRandom(host_matrixA, nx, ny);
fillMatrixRandom(host_matrixB, nx, ny);
fillMatrixRandom(host_matrixC, nx, ny);
// Error code to check return values for CUDA calls
//copying data on graphics card
if( cudaSuccess != (err = cudaMemcpy(dev_matrixA, host_matrixA, sizeOfAllocationOnGraphicsCard, cudaMemcpyHostToDevice)) ||
cudaSuccess != (err = cudaMemcpy(dev_matrixB, host_matrixB, sizeOfAllocationOnGraphicsCard, cudaMemcpyHostToDevice)))
{
fprintf(stderr, "Failed to copy matrix from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the matrixAdd CUDA Kernel
int snx = MSBsqrt(nx);
int sny = MSBsqrt(ny);
dim3 blocksPerGrid(snx, sny);
//int threadsPerBlock = nx * ny;
dim3 threadsInBlock((nx*ny)/(snx*sny));
printf("(%d,%d) blocks of (%d) threads\n", blocksPerGrid.x, blocksPerGrid.y, threadsInBlock.x);
auto start = high_resolution_clock::now();
matrixAdd_2D1D<float><<<blocksPerGrid, threadsInBlock>>>(dev_matrixA, dev_matrixB, dev_matrixC, nx, ny);
auto stop = high_resolution_clock::now();
auto durationOnCUDA = duration_cast<microseconds>(stop - start);
output << durationOnCUDA.count();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "%s: Failed to launch matrixAdd kernel (error code %s)!\n", __func__, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(host_matrixC, dev_matrixC, sizeOfAllocationOnGraphicsCard, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < nx * ny; ++i)
{
if (fabs(host_matrixA[i] + host_matrixB[i] - host_matrixC[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
// Free host memory
free(host_matrixA);
free(host_matrixB);
free(host_matrixC);
printf("Done\n");
}
void exec_2D2D(const int nx, const int ny)
{
cudaError_t err = cudaSuccess;
size_t sizeOfAllocationOnGraphicsCard = nx*ny*sizeof(float);
float* host_matrixA = allocateFloatMatrix(nx, ny);
float* host_matrixB = allocateFloatMatrix(nx, ny);
float* host_matrixC = allocateFloatMatrix(nx, ny);
if (host_matrixA == NULL || host_matrixB == NULL || host_matrixC == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
//device allocation
float* dev_matrixA, *dev_matrixB, *dev_matrixC;
if( cudaSuccess != (err = cudaMalloc((void**)&dev_matrixA, sizeOfAllocationOnGraphicsCard)) ||
cudaSuccess != (err = cudaMalloc((void**)&dev_matrixB, sizeOfAllocationOnGraphicsCard)) ||
cudaSuccess != (err = cudaMalloc((void**)&dev_matrixC, sizeOfAllocationOnGraphicsCard)))
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//filling matrices with random values
printf("Fill matrices\n");
fillMatrixRandom(host_matrixA, nx, ny);
fillMatrixRandom(host_matrixB, nx, ny);
fillMatrixRandom(host_matrixC, nx, ny);
// Error code to check return values for CUDA calls
//copying data on graphics card
if( cudaSuccess != (err = cudaMemcpy(dev_matrixA, host_matrixA, sizeOfAllocationOnGraphicsCard, cudaMemcpyHostToDevice)) ||
cudaSuccess != (err = cudaMemcpy(dev_matrixB, host_matrixB, sizeOfAllocationOnGraphicsCard, cudaMemcpyHostToDevice)))
{
fprintf(stderr, "%s: Failed to copy matrix from host to device (error code %s)!\n", __func__, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the matrixAdd CUDA Kernel
int snx = MSBsqrt(nx);
int sny = MSBsqrt(ny);
dim3 blocksPerGrid(snx, sny);
//int threadsPerBlock = nx * ny;
dim3 threadsInBlock((nx/snx), (ny/sny));
printf("(%d,%d) blocks of (%d,%d) threads\n", blocksPerGrid.x, blocksPerGrid.y, threadsInBlock.x, threadsInBlock.y);
auto start = high_resolution_clock::now();
matrixAdd_2D2D<float><<<blocksPerGrid, threadsInBlock>>>(dev_matrixA, dev_matrixB, dev_matrixC, ny, ny);
auto stop = high_resolution_clock::now();
auto durationOnCUDA = duration_cast<microseconds>(stop - start);
output << durationOnCUDA.count();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "%s: Failed to launch matrixAdd kernel (error code %s)!\n", __func__, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(host_matrixC, dev_matrixC, sizeOfAllocationOnGraphicsCard, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy matrix C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < nx * ny; ++i)
{
if (fabs(host_matrixA[i] + host_matrixB[i] - host_matrixC[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
// Free host memory
free(host_matrixA);
free(host_matrixB);
free(host_matrixC);
printf("Done\n");
}
void exec_CPU(const int nx, const int ny)
{
float* host_matrixA = allocateFloatMatrix(nx, ny);
float* host_matrixB = allocateFloatMatrix(nx, ny);
float* host_matrixC = allocateFloatMatrix(nx, ny);
if (host_matrixA == NULL || host_matrixB == NULL || host_matrixC == NULL)
{
fprintf(stderr, "Failed to allocate host matrices!\n");
exit(EXIT_FAILURE);
}
//filling matrices with random values
printf("Fill matrices\n");
fillMatrixRandom(host_matrixA, nx, ny);
fillMatrixRandom(host_matrixB, nx, ny);
fillMatrixRandom(host_matrixC, nx, ny);
auto start = high_resolution_clock::now();
for(int x = 0; x < nx; x++)
{
for(int y = 0; y < ny; y++)
{
host_matrixC[x+y*ny] = host_matrixA[x+y*ny] + host_matrixB[x+y*ny];
}
}
auto stop = high_resolution_clock::now();
auto durationOnCUDA = duration_cast<microseconds>(stop - start);
output << durationOnCUDA.count();
}
int main(void)
{
int filteredNum = 0;
output.open("output.csv");
// twelve iterations
for(int numElements = 0x2; 0x0 == (filteredNum = numElements & 0xFFFFF800) ; numElements = (numElements << 1))
{
const int nx = numElements;
const int ny = numElements;
output << numElements << ",";
printf("Case elements: %d\n", numElements*numElements);
if(numElements <= 32) //that will make 1024 threds in single block, which is maximum
exec_1D1D(ny, nx);
output << ",";
exec_1D2D(ny, nx);
output << ",";
exec_2D2D(ny, nx);
output << ",";
exec_CPU(ny, nx);
output << std::endl;
}
output.close();
// file_1D2D.close();
// file_2D2D.close();
// file_CPU.close();
return 0;
}
|
1d00f47a38297df7dcd53570db4ed526c961968b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
extern __shared__ double cache[];
__global__ void kernel(int *Ss, int *Nn, int *mask, double *xyz,
double *cost){
int i1 = blockIdx.x;
int i2 = threadIdx.x;
int j = threadIdx.y;
int dimx= blockDim.x, dimy = blockDim.y;
int cacheIndex = j*dimx+i2, cIndexMax = dimx*dimy;
cache[cacheIndex] = 0.5;
__syncthreads();
//Somar todos os indices do cache aqui
//ofset separado
int i;
for(i=1; i<cIndexMax; i++)
cache[0]+=cache[i];
cost[i1] += cache[0];
}
int main()
{
//Initializing
int N = 3, S = 10 ;
double xyz[3][N][S], linxyz[3*N*S],soma;
double cost[S];
int mask[3][N]={0}, linmask[3*N];
long int i1,i2;
long int j=0,k=0;
//mask
for(k=0; k<3; k++){
for(j=0; j<N; j++){
mask[k][j] = 1;
if(j%(k+1)==0)
mask[k][j] = 1;
}
}
for(k=0; k<3; k++){
for(j=0; j<N; j++){
linmask[j+N*k] = mask[k][j];
}
}
//mask
for(k=0; k<3; k++){
for(j=0; j<N;j++){
for(i1=0; i1<S; i1++){
xyz[k][j][i1] = 0.1*(i1+S*j + S*N*k);
linxyz[i1+S*j + S*N*k] = xyz[k][j][i1];
}
}
}
//CPU
soma = 0;
for (i1 = 0 ; i1 < S ; ++i1) {
for (i2 = 0; i2< S ; ++i2) {
if(i1!=i2){
soma = 0;
for(j=0; j<N; j++){
for(k=0; k<3; k++){
if( linmask [N*k+j] ){
soma+=(linxyz[k*N*S+j*S+i1] - linxyz[k*N*S+j*S+i2])*(linxyz[k*N*S+j*S+i1] - linxyz[k*N*S+j*S+i2]);
}
} //for k
} //for j
cost[i1]+=soma;
} //for if
} //for i2
} //for i1
//GPU
int *devN, *devS;
hipMalloc((void **)&devN, sizeof(int));
hipMalloc((void **)&devS, sizeof(int));
hipMemcpy(devN, &N, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(devS, &S, sizeof(int), hipMemcpyHostToDevice);
int *dmask;
hipMalloc((void **)&dmask, sizeof(linmask));
hipMemcpy(dmask, linmask, sizeof(linmask), hipMemcpyHostToDevice);
double *d_xyz, *d_cost, cost2[S];
hipMalloc((void **)&d_xyz, sizeof(linxyz));
hipMalloc((void **)&d_cost, S*sizeof(double));
hipMemcpy(d_xyz, linxyz, sizeof(linxyz), hipMemcpyHostToDevice);
dim3 grids(S);
dim3 threads(S,N);
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(threads),S*N*sizeof(double), 0, devS, devN, dmask, d_xyz, d_cost);
hipMemcpy(cost2, d_cost, S*sizeof(double), hipMemcpyDeviceToHost);
printf("%f %f \n", cost2[0], cost2[1]);
hipFree(devN); hipFree(devS); hipFree(dmask);
hipFree(d_xyz); hipFree(d_cost);
/*__global__ void kernel
(int *Ss, int *Nn, int *mask, double *xyz, double *cost)*/
return 0;
}
| 1d00f47a38297df7dcd53570db4ed526c961968b.cu | #include <stdio.h>
#include <cuda.h>
extern __shared__ double cache[];
__global__ void kernel(int *Ss, int *Nn, int *mask, double *xyz,
double *cost){
int i1 = blockIdx.x;
int i2 = threadIdx.x;
int j = threadIdx.y;
int dimx= blockDim.x, dimy = blockDim.y;
int cacheIndex = j*dimx+i2, cIndexMax = dimx*dimy;
cache[cacheIndex] = 0.5;
__syncthreads();
//Somar todos os indices do cache aqui
//ofset separado
int i;
for(i=1; i<cIndexMax; i++)
cache[0]+=cache[i];
cost[i1] += cache[0];
}
int main()
{
//Initializing
int N = 3, S = 10 ;
double xyz[3][N][S], linxyz[3*N*S],soma;
double cost[S];
int mask[3][N]={0}, linmask[3*N];
long int i1,i2;
long int j=0,k=0;
//mask
for(k=0; k<3; k++){
for(j=0; j<N; j++){
mask[k][j] = 1;
if(j%(k+1)==0)
mask[k][j] = 1;
}
}
for(k=0; k<3; k++){
for(j=0; j<N; j++){
linmask[j+N*k] = mask[k][j];
}
}
//mask
for(k=0; k<3; k++){
for(j=0; j<N;j++){
for(i1=0; i1<S; i1++){
xyz[k][j][i1] = 0.1*(i1+S*j + S*N*k);
linxyz[i1+S*j + S*N*k] = xyz[k][j][i1];
}
}
}
//CPU
soma = 0;
for (i1 = 0 ; i1 < S ; ++i1) {
for (i2 = 0; i2< S ; ++i2) {
if(i1!=i2){
soma = 0;
for(j=0; j<N; j++){
for(k=0; k<3; k++){
if( linmask [N*k+j] ){
soma+=(linxyz[k*N*S+j*S+i1] - linxyz[k*N*S+j*S+i2])*(linxyz[k*N*S+j*S+i1] - linxyz[k*N*S+j*S+i2]);
}
} //for k
} //for j
cost[i1]+=soma;
} //for if
} //for i2
} //for i1
//GPU
int *devN, *devS;
cudaMalloc((void **)&devN, sizeof(int));
cudaMalloc((void **)&devS, sizeof(int));
cudaMemcpy(devN, &N, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(devS, &S, sizeof(int), cudaMemcpyHostToDevice);
int *dmask;
cudaMalloc((void **)&dmask, sizeof(linmask));
cudaMemcpy(dmask, linmask, sizeof(linmask), cudaMemcpyHostToDevice);
double *d_xyz, *d_cost, cost2[S];
cudaMalloc((void **)&d_xyz, sizeof(linxyz));
cudaMalloc((void **)&d_cost, S*sizeof(double));
cudaMemcpy(d_xyz, linxyz, sizeof(linxyz), cudaMemcpyHostToDevice);
dim3 grids(S);
dim3 threads(S,N);
kernel<<<grids,threads,S*N*sizeof(double)>>>(devS, devN, dmask, d_xyz, d_cost);
cudaMemcpy(cost2, d_cost, S*sizeof(double), cudaMemcpyDeviceToHost);
printf("%f %f \n", cost2[0], cost2[1]);
cudaFree(devN); cudaFree(devS); cudaFree(dmask);
cudaFree(d_xyz); cudaFree(d_cost);
/*__global__ void kernel
(int *Ss, int *Nn, int *mask, double *xyz, double *cost)*/
return 0;
}
|
ee534e46a4c26bc4d9a674a9484107672e65b368.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "util.cuh"
using namespace std;
__global__ void hello_world(int *x){
printf("hello from gpu\n");
for(int i=0; i<10; i++)
printf("%i\t", x[i]);
printf("\n");
}
int main(){
hipDeviceReset();
int *dx, *dx2, *dx3;
int x[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
int x2[10] = {-1, -2, -3, -4, -5, -6, -7, -8, -9, -10};
int x3[10] = {-11, -22, -3, -4, -5, -6, -7, -8, -9, -10};
hipMalloc((void **)&dx, sizeof(int)*10);
hipMemcpy(dx, x, sizeof(int)*10, hipMemcpyHostToDevice);
hipMalloc((void **)&dx2, sizeof(int)*10);
hipMemcpy(dx2, x2, sizeof(int)*10, hipMemcpyHostToDevice);
hipMalloc((void **)&dx3, sizeof(int)*10);
hipMemcpy(dx3, x3, sizeof(int)*10, hipMemcpyHostToDevice);
constexpr size_t numGPUArrays = 3;
size_t gpuArraySize = 10;
display<int, numGPUArrays>(gpuArraySize, dx, dx2, dx3);
return 0;
}
| ee534e46a4c26bc4d9a674a9484107672e65b368.cu | #include <stdio.h>
#include "util.cuh"
using namespace std;
__global__ void hello_world(int *x){
printf("hello from gpu\n");
for(int i=0; i<10; i++)
printf("%i\t", x[i]);
printf("\n");
}
int main(){
cudaDeviceReset();
int *dx, *dx2, *dx3;
int x[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
int x2[10] = {-1, -2, -3, -4, -5, -6, -7, -8, -9, -10};
int x3[10] = {-11, -22, -3, -4, -5, -6, -7, -8, -9, -10};
cudaMalloc((void **)&dx, sizeof(int)*10);
cudaMemcpy(dx, x, sizeof(int)*10, cudaMemcpyHostToDevice);
cudaMalloc((void **)&dx2, sizeof(int)*10);
cudaMemcpy(dx2, x2, sizeof(int)*10, cudaMemcpyHostToDevice);
cudaMalloc((void **)&dx3, sizeof(int)*10);
cudaMemcpy(dx3, x3, sizeof(int)*10, cudaMemcpyHostToDevice);
constexpr size_t numGPUArrays = 3;
size_t gpuArraySize = 10;
display<int, numGPUArrays>(gpuArraySize, dx, dx2, dx3);
return 0;
}
|
6009bd3fc28efc0578a8483cd2c54957845abc18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <hipcub/hipcub.hpp>
#include "../cuda.cuh"
const size_t blockSize = 32;
// Functions to convert index position to/from squareform to condensed form
__device__
int calc_row_idx(const int k, const int n) {
// __ll2float_rn() casts long long to float, rounding to nearest
return n - 2 - floor(__fsqrt_rn(__int2float_rz(-8*k + 4*n*(n-1)-7))/2 - 0.5);
}
__device__
int calc_col_idx(const int k, const int i, const int n) {
return k + i + 1 - n*(n-1)/2 + (n-i)*((n-i)-1)/2;
}
// Square dist matrix kernel which stores sum == 2020 in one array, multiple in another
__global__
void add_and_multiply(int* expenses_d, char* sums, int* prods, size_t length, size_t pairs) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < pairs) {
int i, j;
i = calc_row_idx(index, length);
j = calc_col_idx(index, i, length);
*(sums + index) = (*(expenses_d + i) + *(expenses_d + j)) == 2020;
*(prods + index) = *(expenses_d + i) * *(expenses_d + j);
}
}
int main() {
std::string line;
std::ifstream infile("inputs/day1.data");
// Read input
std::vector<int> expenses;
if (infile.is_open()) {
while (std::getline(infile, line)) {
expenses.push_back(std::stoi(line));
}
infile.close();
}
// Copy input to device
int* expenses_d;
CUDA_CALL(hipMalloc((void** )&expenses_d, expenses.size() * sizeof(int)));
CUDA_CALL(hipMemcpy(expenses_d, expenses.data(), expenses.size() * sizeof(int),
hipMemcpyDefault));
// Allocate space to store output
size_t n_pairs = 0.5 * expenses.size() * (expenses.size() - 1);
char* sums;
CUDA_CALL(hipMalloc((void** )&sums, n_pairs * sizeof(char)));
CUDA_CALL(hipMemset(sums, 0, n_pairs * sizeof(char)));
int* prods;
CUDA_CALL(hipMalloc((void** )&prods, n_pairs * sizeof(int)));
CUDA_CALL(hipMemset(prods, 0, n_pairs * sizeof(int)));
// Calculate sums and products
size_t blockCount = (n_pairs + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add_and_multiply), dim3(blockCount), dim3(blockSize), 0, 0, expenses_d,
sums,
prods,
expenses.size(),
n_pairs);
// Use device select to get the answer
int *d_out;
CUDA_CALL(hipMalloc((void**)&d_out, n_pairs * sizeof(int)));
int *d_num_selected_out;
CUDA_CALL(hipMalloc((void**)&d_num_selected_out, sizeof(int)));
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// Allocate temp storage
hipcub::DeviceSelect::Flagged(d_temp_storage,
temp_storage_bytes,
prods, sums, d_out,
d_num_selected_out, n_pairs);
CUDA_CALL(hipMalloc(&d_temp_storage, temp_storage_bytes));
// Run selection
hipcub::DeviceSelect::Flagged(d_temp_storage,
temp_storage_bytes,
prods, sums, d_out,
d_num_selected_out, n_pairs);
// Get and print answer
int n_selected;
CUDA_CALL(hipMemcpy(&n_selected, d_num_selected_out, sizeof(int),
hipMemcpyDefault));
std::vector<int> answer(n_selected);
CUDA_CALL(hipMemcpy(answer.data(), d_out, n_selected * sizeof(int),
hipMemcpyDefault));
for (auto it = answer.begin(); it != answer.end(); ++it) {
std::cout << *it << std::endl;
}
// Free device memory
CUDA_CALL(hipFree(expenses_d));
CUDA_CALL(hipFree(sums));
CUDA_CALL(hipFree(prods));
CUDA_CALL(hipFree(d_out));
CUDA_CALL(hipFree(d_num_selected_out));
CUDA_CALL(hipFree(d_temp_storage));
return 0;
} | 6009bd3fc28efc0578a8483cd2c54957845abc18.cu | #include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <cub/device/device_select.cuh>
#include "../cuda.cuh"
const size_t blockSize = 32;
// Functions to convert index position to/from squareform to condensed form
__device__
int calc_row_idx(const int k, const int n) {
// __ll2float_rn() casts long long to float, rounding to nearest
return n - 2 - floor(__fsqrt_rn(__int2float_rz(-8*k + 4*n*(n-1)-7))/2 - 0.5);
}
__device__
int calc_col_idx(const int k, const int i, const int n) {
return k + i + 1 - n*(n-1)/2 + (n-i)*((n-i)-1)/2;
}
// Square dist matrix kernel which stores sum == 2020 in one array, multiple in another
__global__
void add_and_multiply(int* expenses_d, char* sums, int* prods, size_t length, size_t pairs) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < pairs) {
int i, j;
i = calc_row_idx(index, length);
j = calc_col_idx(index, i, length);
*(sums + index) = (*(expenses_d + i) + *(expenses_d + j)) == 2020;
*(prods + index) = *(expenses_d + i) * *(expenses_d + j);
}
}
int main() {
std::string line;
std::ifstream infile("inputs/day1.data");
// Read input
std::vector<int> expenses;
if (infile.is_open()) {
while (std::getline(infile, line)) {
expenses.push_back(std::stoi(line));
}
infile.close();
}
// Copy input to device
int* expenses_d;
CUDA_CALL(cudaMalloc((void** )&expenses_d, expenses.size() * sizeof(int)));
CUDA_CALL(cudaMemcpy(expenses_d, expenses.data(), expenses.size() * sizeof(int),
cudaMemcpyDefault));
// Allocate space to store output
size_t n_pairs = 0.5 * expenses.size() * (expenses.size() - 1);
char* sums;
CUDA_CALL(cudaMalloc((void** )&sums, n_pairs * sizeof(char)));
CUDA_CALL(cudaMemset(sums, 0, n_pairs * sizeof(char)));
int* prods;
CUDA_CALL(cudaMalloc((void** )&prods, n_pairs * sizeof(int)));
CUDA_CALL(cudaMemset(prods, 0, n_pairs * sizeof(int)));
// Calculate sums and products
size_t blockCount = (n_pairs + blockSize - 1) / blockSize;
add_and_multiply<<<blockCount, blockSize>>>(expenses_d,
sums,
prods,
expenses.size(),
n_pairs);
// Use device select to get the answer
int *d_out;
CUDA_CALL(cudaMalloc((void**)&d_out, n_pairs * sizeof(int)));
int *d_num_selected_out;
CUDA_CALL(cudaMalloc((void**)&d_num_selected_out, sizeof(int)));
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// Allocate temp storage
cub::DeviceSelect::Flagged(d_temp_storage,
temp_storage_bytes,
prods, sums, d_out,
d_num_selected_out, n_pairs);
CUDA_CALL(cudaMalloc(&d_temp_storage, temp_storage_bytes));
// Run selection
cub::DeviceSelect::Flagged(d_temp_storage,
temp_storage_bytes,
prods, sums, d_out,
d_num_selected_out, n_pairs);
// Get and print answer
int n_selected;
CUDA_CALL(cudaMemcpy(&n_selected, d_num_selected_out, sizeof(int),
cudaMemcpyDefault));
std::vector<int> answer(n_selected);
CUDA_CALL(cudaMemcpy(answer.data(), d_out, n_selected * sizeof(int),
cudaMemcpyDefault));
for (auto it = answer.begin(); it != answer.end(); ++it) {
std::cout << *it << std::endl;
}
// Free device memory
CUDA_CALL(cudaFree(expenses_d));
CUDA_CALL(cudaFree(sums));
CUDA_CALL(cudaFree(prods));
CUDA_CALL(cudaFree(d_out));
CUDA_CALL(cudaFree(d_num_selected_out));
CUDA_CALL(cudaFree(d_temp_storage));
return 0;
} |
1fe2412b419c9e442917ad4c837112e1b050cc81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
!=====================================================================
!
! S p e c f e m 3 D V e r s i o n 3 . 0
! ---------------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! CNRS, France
! and Princeton University, USA
! (there are currently many more authors!)
! (c) October 2017
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 3 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#include "mesh_constants_cuda.h"
/* ----------------------------------------------------------------------------------------------- */
// Helper functions
/* ----------------------------------------------------------------------------------------------- */
double get_time()
{
struct timeval t;
struct timezone tzp;
gettimeofday(&t, &tzp);
return t.tv_sec + t.tv_usec*1e-6;
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(pause_for_debug,PAUSE_FOR_DEBUG)() {
TRACE("pause_for_debug");
pause_for_debugger(1);
}
/* ----------------------------------------------------------------------------------------------- */
void pause_for_debugger(int pause) {
if (pause) {
int myrank;
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
#else
myrank = 0;
#endif
printf("I'm rank %d\n",myrank);
int i = 0;
char hostname[256];
gethostname(hostname, sizeof(hostname));
printf("PID %d on %s:%d ready for attach\n", getpid(), hostname,myrank);
FILE *file = fopen("./attach_gdb.txt","w+");
if (file != NULL){
fprintf(file,"PID %d on %s:%d ready for attach\n", getpid(), hostname,myrank);
fclose(file);
}
fflush(stdout);
while (0 == i)
sleep(5);
}
}
/* ----------------------------------------------------------------------------------------------- */
void exit_on_cuda_error(const char* kernel_name) {
// sync and check to catch errors from previous async operations
synchronize_cuda();
hipError_t err = hipGetLastError();
if (err != hipSuccess){
fprintf(stderr,"Error after %s: %s\n", kernel_name, hipGetErrorString(err));
//debugging
//pause_for_debugger(0);
// outputs error file
FILE* fp;
int myrank;
char filename[BUFSIZ];
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
#else
myrank = 0;
#endif
sprintf(filename,OUTPUT_FILES"/error_message_%06d.txt",myrank);
fp = fopen(filename,"a+");
if (fp != NULL){
fprintf(fp,"Error after %s: %s\n", kernel_name, hipGetErrorString(err));
fclose(fp);
}
// stops program
//free(kernel_name);
#ifdef WITH_MPI
MPI_Abort(MPI_COMM_WORLD,1);
#endif
exit(EXIT_FAILURE);
}
}
/* ----------------------------------------------------------------------------------------------- */
void exit_on_error(const char* info) {
printf("\nERROR: %s\n",info);
fflush(stdout);
// outputs error file
FILE* fp;
int myrank;
char filename[BUFSIZ];
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
#else
myrank = 0;
#endif
sprintf(filename,OUTPUT_FILES"/error_message_%06d.txt",myrank);
fp = fopen(filename,"a+");
if (fp != NULL){
fprintf(fp,"ERROR: %s\n",info);
fclose(fp);
}
// stops program
#ifdef WITH_MPI
MPI_Abort(MPI_COMM_WORLD,1);
#endif
//free(info);
exit(EXIT_FAILURE);
return;
}
/* ----------------------------------------------------------------------------------------------- */
void print_CUDA_error_if_any(hipError_t err, int num) {
if (hipSuccess != err)
{
printf("\nCUDA error !!!!! <%s> !!!!! \nat CUDA call error code: # %d\n",hipGetErrorString(err),num);
fflush(stdout);
// outputs error file
FILE* fp;
int myrank;
char filename[BUFSIZ];
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
#else
myrank = 0;
#endif
sprintf(filename,OUTPUT_FILES"/error_message_%06d.txt",myrank);
fp = fopen(filename,"a+");
if (fp != NULL){
fprintf(fp,"\nCUDA error !!!!! <%s> !!!!! \nat CUDA call error code: # %d\n",hipGetErrorString(err),num);
fclose(fp);
}
// stops program
#ifdef WITH_MPI
MPI_Abort(MPI_COMM_WORLD,1);
#endif
exit(EXIT_FAILURE);
}
return;
}
/* ----------------------------------------------------------------------------------------------- */
// CUDA synchronization
/* ----------------------------------------------------------------------------------------------- */
void synchronize_cuda(){
#if TORCH_HIP_VERSION >= 4000
hipDeviceSynchronize();
#else
hipDeviceSynchronize();
#endif
}
/* ----------------------------------------------------------------------------------------------- */
void synchronize_mpi(){
#ifdef WITH_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
}
/* ----------------------------------------------------------------------------------------------- */
// Timing helper functions
/* ----------------------------------------------------------------------------------------------- */
void start_timing_cuda(hipEvent_t* start,hipEvent_t* stop){
// creates & starts event
hipEventCreate(start);
hipEventCreate(stop);
hipEventRecord( *start, 0);
}
/* ----------------------------------------------------------------------------------------------- */
void stop_timing_cuda(hipEvent_t* start,hipEvent_t* stop, const char* info_str){
realw time;
// stops events
hipEventRecord( *stop, 0);
hipEventSynchronize( *stop );
hipEventElapsedTime( &time, *start, *stop );
hipEventDestroy( *start );
hipEventDestroy( *stop );
// user output
printf("%s: Execution Time = %f ms\n",info_str,time);
}
/* ----------------------------------------------------------------------------------------------- */
void stop_timing_cuda(hipEvent_t* start,hipEvent_t* stop, const char* info_str,realw* t){
realw time;
// stops events
hipEventRecord( *stop, 0);
hipEventSynchronize( *stop );
hipEventElapsedTime( &time, *start, *stop );
hipEventDestroy( *start );
hipEventDestroy( *stop );
// user output
printf("%s: Execution Time = %f ms\n",info_str,time);
// returns time
*t = time;
}
/* ----------------------------------------------------------------------------------------------- */
// CUDA kernel setup functions
/* ----------------------------------------------------------------------------------------------- */
void get_blocks_xy(int num_blocks,int* num_blocks_x,int* num_blocks_y) {
// Initially sets the blocks_x to be the num_blocks, and adds rows as needed (block size limit of 65535).
// If an additional row is added, the row length is cut in
// half. If the block count is odd, there will be 1 too many blocks,
// which must be managed at runtime with an if statement.
*num_blocks_x = num_blocks;
*num_blocks_y = 1;
while (*num_blocks_x > MAXIMUM_GRID_DIM) {
*num_blocks_x = (int) ceil(*num_blocks_x * 0.5f);
*num_blocks_y = *num_blocks_y * 2;
}
return;
}
/* ----------------------------------------------------------------------------------------------- */
// GPU device memory functions
/* ----------------------------------------------------------------------------------------------- */
void get_free_memory(double* free_db, double* used_db, double* total_db) {
TRACE("get_free_memory");
// gets memory usage in byte
size_t free_byte ;
size_t total_byte ;
hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ) ;
if (hipSuccess != cuda_status){
printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) );
exit(EXIT_FAILURE);
}
*free_db = (double)free_byte ;
*total_db = (double)total_byte ;
*used_db = *total_db - *free_db ;
return;
}
/* ----------------------------------------------------------------------------------------------- */
// Saves GPU memory usage to file
void output_free_memory(int myrank,char* info_str) {
TRACE("output_free_memory");
FILE* fp;
char filename[BUFSIZ];
double free_db,used_db,total_db;
int do_output_info;
// by default, only master process outputs device infos to avoid file cluttering
do_output_info = 0;
if (myrank == 0){
do_output_info = 1;
sprintf(filename,OUTPUT_FILES"/gpu_device_mem_usage.txt");
}
// debugging
if (DEBUG){
do_output_info = 1;
sprintf(filename,OUTPUT_FILES"/gpu_device_mem_usage_proc_%06d.txt",myrank);
}
// outputs to file
if (do_output_info){
// gets memory usage
get_free_memory(&free_db,&used_db,&total_db);
// file output
fp = fopen(filename,"a+");
if (fp != NULL){
fprintf(fp,"%d: @%s GPU memory usage: used = %f MB, free = %f MB, total = %f MB\n", myrank, info_str,
used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0);
fclose(fp);
}
}
}
/* ----------------------------------------------------------------------------------------------- */
// Fortran-callable version of above method
extern "C"
void FC_FUNC_(output_free_device_memory,
OUTPUT_FREE_DEVICE_MEMORY)(int* myrank_f) {
TRACE("output_free_device_memory");
char info[64];
int myrank = *myrank_f;
sprintf(info,"f %d:",myrank);
output_free_memory(myrank,info);
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(get_free_device_memory,
get_FREE_DEVICE_MEMORY)(realw* free, realw* used, realw* total) {
TRACE("get_free_device_memory");
double free_db,used_db,total_db;
get_free_memory(&free_db,&used_db,&total_db);
// converts to MB
*free = (realw) free_db/1024.0/1024.0;
*used = (realw) used_db/1024.0/1024.0;
*total = (realw) total_db/1024.0/1024.0;
return;
}
/* ----------------------------------------------------------------------------------------------- */
// Auxiliary functions
/* ----------------------------------------------------------------------------------------------- */
/*
__global__ void memset_to_realw_kernel(realw* array, int size, realw value){
unsigned int tid = threadIdx.x;
unsigned int bx = blockIdx.y*gridDim.x+blockIdx.x;
unsigned int i = tid + bx*blockDim.x;
if (i < size){
array[i] = *value;
}
}
*/
/* ----------------------------------------------------------------------------------------------- */
realw get_device_array_maximum_value(realw* array, int size){
// get maximum of array on GPU by copying over to CPU and handle it there
realw max = 0.0f;
// checks if anything to do
if (size > 0){
realw* h_array;
// explicitly wait for cuda kernels to finish
// (hipMemcpy implicitly synchronizes all other cuda operations)
synchronize_cuda();
h_array = (realw*)calloc(size,sizeof(realw));
print_CUDA_error_if_any(hipMemcpy(h_array,array,sizeof(realw)*size,hipMemcpyDeviceToHost),33001);
// finds maximum value in array
max = h_array[0];
for( int i=1; i < size; i++){
if (abs(h_array[i]) > max) max = abs(h_array[i]);
}
free(h_array);
}
return max;
}
/* ----------------------------------------------------------------------------------------------- */
// ACOUSTIC simulations
/* ----------------------------------------------------------------------------------------------- */
__global__ void get_maximum_kernel(field* array, int size, realw* d_max){
/* simplest version: uses only 1 thread
realw max;
max = 0;
// finds maximum value in array
if (size > 0){
max = abs(array[0]);
for( int i=1; i < size; i++){
if (abs(array[i]) > max) max = abs(array[i]);
}
}
*d_max = max;
*/
// reduction example:
__shared__ realw sdata[BLOCKSIZE_TRANSFER] ;
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int bx = blockIdx.y*gridDim.x+blockIdx.x;
unsigned int i = tid + bx*blockDim.x;
// loads absolute values into shared memory
sdata[tid] = (i < size) ? fabs(array[i]) : 0.0 ;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s){
// summation:
//sdata[tid] += sdata[tid + s];
// maximum:
if (sdata[tid] < sdata[tid + s]) sdata[tid] = sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) d_max[bx] = sdata[0];
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(get_norm_acoustic_from_device,
GET_NORM_ACOUSTIC_FROM_DEVICE)(realw* norm,long* Mesh_pointer,int* sim_type) {
TRACE("get_norm_acoustic_from_device");
//double start_time = get_time();
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
realw max = 0.0;
realw *d_max;
//initializes
*norm = 0.0f;
/* way 1 : timing Elapsed time: 8.464813e-03
realw* h_array;
h_array = (realw*)calloc(mp->NGLOB_AB,sizeof(realw));
print_CUDA_error_if_any(hipMemcpy(h_array,mp->d_potential_dot_dot_acoustic,
sizeof(realw)*(mp->NGLOB_AB),hipMemcpyDeviceToHost),131);
// finds maximum value in array
max = h_array[0];
for( int i=1; i < mp->NGLOB_AB; i++){
if (abs(h_array[i]) > max) max = abs(h_array[i]);
}
free(h_array);
*/
/* way 2: timing Elapsed time: 8.818102e-02
// launch simple kernel
hipMalloc((void**)&d_max,sizeof(realw));
dim3 grid(1,1);
dim3 threads(1,1,1);
get_maximum_kernel<<<grid,threads>>>(mp->d_potential_dot_dot_acoustic,
mp->NGLOB_AB,
d_max);
print_CUDA_error_if_any(hipMemcpy(&max,d_max, sizeof(realw), hipMemcpyDeviceToHost),222);
hipFree(d_max);
*/
// way 2 b: timing Elapsed time: 1.236916e-03
// launch simple reduction kernel
realw* h_max;
int blocksize = BLOCKSIZE_TRANSFER;
int size = mp->NGLOB_AB;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
//printf("num_blocks_x %i \n",num_blocks_x);
// on host (allocates & initializes to zero)
h_max = (realw*) calloc(num_blocks_x*num_blocks_y,sizeof(realw));
// allocates memory on device
print_CUDA_error_if_any(hipMalloc((void**)&d_max,num_blocks_x*num_blocks_y*sizeof(realw)),78001);
// initializes values to zero
print_CUDA_error_if_any(hipMemset(d_max,0,num_blocks_x*num_blocks_y*sizeof(realw)),77002);
if (*sim_type == 1){
hipLaunchKernelGGL(( get_maximum_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_potential_dot_dot_acoustic,size,d_max);
}else if (*sim_type == 3){
hipLaunchKernelGGL(( get_maximum_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_b_potential_dot_dot_acoustic,size,d_max);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("kernel get_maximum_kernel");
#endif
// synchronizes
//synchronize_cuda();
// explicitly waits for stream to finish
// (hipMemcpy implicitly synchronizes all other cuda operations)
hipStreamSynchronize(mp->compute_stream);
print_CUDA_error_if_any(hipMemcpy(h_max,d_max,num_blocks_x*num_blocks_y*sizeof(realw),
hipMemcpyDeviceToHost),222);
// determines max for all blocks
max = h_max[0];
for(int i=1;i<num_blocks_x*num_blocks_y;i++) {
if (max < h_max[i]) max = h_max[i];
}
hipFree(d_max);
free(h_max);
/* way 3: doesn't work properly...
cublasStatus status;
// Initialize CUBLAS
status = hipblasInit();
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! CUBLAS initialization error\n");
exit(1);
}
// cublas function: hipblasIsamax
// finds the smallest index of the maximum magnitude element of single
// precision vector x
int incr = 1;
int imax = 0;
imax = hipblasIsamax(mp->NGLOB_AB,(realw*)mp->d_potential_dot_dot_acoustic, incr);
status= hipblasGetError();
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! CUBLAS error in hipblasIsamax\n");
exit(1);
}
print_CUDA_error_if_any(hipMemcpy(&max,&(mp->d_potential_dot_dot_acoustic[imax]),
sizeof(realw), hipMemcpyDeviceToHost),222);
printf("maximum %i %i %f \n",mp->NGLOB_AB,imax,max);
// Shutdown
status = hipblasShutdown();
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! shutdown error (A)\n");
exit(1);
}
*/
// return result
*norm = max;
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//double end_time = get_time();
//printf("Elapsed time: %e\n",end_time-start_time);
exit_on_cuda_error("get_norm_acoustic_from_device");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
// ELASTIC simulations
/* ----------------------------------------------------------------------------------------------- */
__global__ void get_maximum_vector_kernel(realw* array, int size, realw* d_max){
// reduction example:
__shared__ realw sdata[BLOCKSIZE_TRANSFER] ;
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int bx = blockIdx.y*gridDim.x+blockIdx.x;
unsigned int i = tid + bx*blockDim.x;
// loads values into shared memory: assume array is a vector array
sdata[tid] = (i < size) ? (array[i*3]*array[i*3] + array[i*3+1]*array[i*3+1] + array[i*3+2]*array[i*3+2]) : 0.0 ;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s){
// summation:
//sdata[tid] += sdata[tid + s];
// maximum:
if (sdata[tid] < sdata[tid + s]) sdata[tid] = sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) d_max[bx] = sdata[0];
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(get_norm_elastic_from_device,
GET_NORM_ELASTIC_FROM_DEVICE)(realw* norm,
long* Mesh_pointer,
int* type) {
TRACE("\tget_norm_elastic_from_device");
//double start_time = get_time();
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
realw max,res;
realw *d_max;
//initializes
*norm = 0.0f;
// launch simple reduction kernel
realw* h_max;
int blocksize = BLOCKSIZE_TRANSFER;
int size = mp->NGLOB_AB;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
// on host (allocates & initializes to zero)
h_max = (realw*) calloc(num_blocks_x*num_blocks_y,sizeof(realw));
// allocates memory on device
print_CUDA_error_if_any(hipMalloc((void**)&d_max,num_blocks_x*num_blocks_y*sizeof(realw)),77001);
// initializes values to zero
print_CUDA_error_if_any(hipMemset(d_max,0,num_blocks_x*num_blocks_y*sizeof(realw)),77002);
if (*type == 1){
hipLaunchKernelGGL(( get_maximum_vector_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_displ,size,d_max);
}else if (*type == 3){
hipLaunchKernelGGL(( get_maximum_vector_kernel), dim3(grid),dim3(threads),0,mp->compute_stream, mp->d_b_displ,size,d_max);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//double end_time = get_time();
//printf("Elapsed time: %e\n",end_time-start_time);
exit_on_cuda_error("kernel get_norm_elastic_from_device");
#endif
// synchronizes
//synchronize_cuda();
// explicitly waits for stream to finish
// (hipMemcpy implicitly synchronizes all other cuda operations)
hipStreamSynchronize(mp->compute_stream);
// copies reduction array back to CPU
print_CUDA_error_if_any(hipMemcpy(h_max,d_max,num_blocks_x*num_blocks_y*sizeof(realw),
hipMemcpyDeviceToHost),222);
// determines max for all blocks
max = h_max[0];
for(int i=1;i<num_blocks_x*num_blocks_y;i++) {
if (max < h_max[i]) max = h_max[i];
}
res = sqrt(max);
// return result
*norm = res;
// debug
//printf("rank % d - type: %d norm: %f \n",mp->myrank,*type,res);
hipFree(d_max);
free(h_max);
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//double end_time = get_time();
//printf("Elapsed time: %e\n",end_time-start_time);
exit_on_cuda_error("get_norm_elastic_from_device");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
// unused ...
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(get_max_accel,
GET_MAX_ACCEL)(int* itf,int* sizef,long* Mesh_pointer) {
TRACE("get_max_accel");
Mesh* mp = (Mesh*)(*Mesh_pointer);
int procid;
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD,&procid);
#else
procid = 0;
#endif
int size = *sizef;
int it = *itf;
realw* accel_cpy = (realw*)malloc(size*sizeof(realw));
hipMemcpy(accel_cpy,mp->d_accel,size*sizeof(realw),hipMemcpyDeviceToHost);
realw maxval=0;
for(int i=0;i<size;++i) {
maxval = MAX(maxval,accel_cpy[i]);
}
printf("%d/%d: max=%e\n",it,procid,maxval);
free(accel_cpy);
}
*/
/* ----------------------------------------------------------------------------------------------- */
//daniel: helper function
/*
__global__ void check_phase_ispec_kernel(int num_phase_ispec,
int* phase_ispec,
int NSPEC_AB,
int* ier) {
int i,ispec,iphase,count0,count1;
*ier = 0;
for(iphase=0; iphase < 2; iphase++){
count0 = 0;
count1 = 0;
for(i=0; i < num_phase_ispec; i++){
ispec = phase_ispec[iphase*num_phase_ispec + i] - 1;
if (ispec < -1 || ispec >= NSPEC_AB){
printf("Error in d_phase_ispec_inner_elastic %d %d\n",i,ispec);
*ier = 1;
return;
}
if (ispec >= 0){ count0++;}
if (ispec < 0){ count1++;}
}
printf("check_phase_ispec done: phase %d, count = %d %d \n",iphase,count0,count1);
}
}
void check_phase_ispec(long* Mesh_pointer,int type){
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
printf("check phase_ispec for type=%d\n",type);
dim3 grid(1,1);
dim3 threads(1,1,1);
int* h_debug = (int*) calloc(1,sizeof(int));
int* d_debug;
hipMalloc((void**)&d_debug,sizeof(int));
if (type == 1){
check_phase_ispec_kernel<<<grid,threads>>>(mp->num_phase_ispec_elastic,
mp->d_phase_ispec_inner_elastic,
mp->NSPEC_AB,
d_debug);
}else if (type == 2){
check_phase_ispec_kernel<<<grid,threads>>>(mp->num_phase_ispec_acoustic,
mp->d_phase_ispec_inner_acoustic,
mp->NSPEC_AB,
d_debug);
}
hipMemcpy(h_debug,d_debug,1*sizeof(int),hipMemcpyDeviceToHost);
hipFree(d_debug);
if (*h_debug != 0){printf("error for type=%d\n",type); exit(1);}
free(h_debug);
fflush(stdout);
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("check_phase_ispec");
#endif
}
*/
/* ----------------------------------------------------------------------------------------------- */
//daniel: helper function
/*
__global__ void check_ispec_is_kernel(int NSPEC_AB,
int* ispec_is,
int* ier) {
int ispec,count0,count1;
*ier = 0;
count0 = 0;
count1 = 0;
for(ispec=0; ispec < NSPEC_AB; ispec++){
if (ispec_is[ispec] < -1 || ispec_is[ispec] > 1){
printf("Error in ispec_is %d %d\n",ispec,ispec_is[ispec]);
*ier = 1;
return;
//exit(1);
}
if (ispec_is[ispec] == 0){count0++;}
if (ispec_is[ispec] != 0){count1++;}
}
printf("check_ispec_is done: count = %d %d\n",count0,count1);
}
void check_ispec_is(long* Mesh_pointer,int type){
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
printf("check ispec_is for type=%d\n",type);
dim3 grid(1,1);
dim3 threads(1,1,1);
int* h_debug = (int*) calloc(1,sizeof(int));
int* d_debug;
hipMalloc((void**)&d_debug,sizeof(int));
if (type == 0){
check_ispec_is_kernel<<<grid,threads>>>(mp->NSPEC_AB,
mp->d_ispec_is_inner,
d_debug);
}else if (type == 1){
check_ispec_is_kernel<<<grid,threads>>>(mp->NSPEC_AB,
mp->d_ispec_is_elastic,
d_debug);
}else if (type == 2){
check_ispec_is_kernel<<<grid,threads>>>(mp->NSPEC_AB,
mp->d_ispec_is_acoustic,
d_debug);
}
hipMemcpy(h_debug,d_debug,1*sizeof(int),hipMemcpyDeviceToHost);
hipFree(d_debug);
if (*h_debug != 0){printf("error for type=%d\n",type); exit(1);}
free(h_debug);
fflush(stdout);
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("check_ispec_is");
#endif
}
*/
/* ----------------------------------------------------------------------------------------------- */
//daniel: helper function
/*
__global__ void check_array_ispec_kernel(int num_array_ispec,
int* array_ispec,
int NSPEC_AB,
int* ier) {
int i,ispec,count0,count1;
*ier = 0;
count0 = 0;
count1 = 0;
for(i=0; i < num_array_ispec; i++){
ispec = array_ispec[i] - 1;
if (ispec < -1 || ispec >= NSPEC_AB){
printf("Error in d_array_ispec %d %d\n",i,ispec);
*ier = 1;
return;
}
if (ispec >= 0){ count0++;}
if (ispec < 0){ count1++;}
}
printf("check_array_ispec done: count = %d %d \n",count0,count1);
}
void check_array_ispec(long* Mesh_pointer,int type){
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
printf("check array_ispec for type=%d\n",type);
dim3 grid(1,1);
dim3 threads(1,1,1);
int* h_debug = (int*) calloc(1,sizeof(int));
int* d_debug;
hipMalloc((void**)&d_debug,sizeof(int));
if (type == 1){
check_array_ispec_kernel<<<grid,threads>>>(mp->d_num_abs_boundary_faces,
mp->d_abs_boundary_ispec,
mp->NSPEC_AB,
d_debug);
}
hipMemcpy(h_debug,d_debug,1*sizeof(int),hipMemcpyDeviceToHost);
hipFree(d_debug);
if (*h_debug != 0){printf("error for type=%d\n",type); exit(1);}
free(h_debug);
fflush(stdout);
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("check_array_ispec");
#endif
}
*/
/* ----------------------------------------------------------------------------------------------- */
// Check functions
/* ----------------------------------------------------------------------------------------------- */
//max: helper functions
/*
extern "C"
void FC_FUNC_(check_max_norm_displ_gpu,
CHECK_MAX_NORM_DISPL_GPU)(int* size, realw* displ,long* Mesh_pointer,int* announceID) {
TRACE("check_max_norm_displ_gpu");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
hipMemcpy(displ, mp->d_displ,*size*sizeof(realw),hipMemcpyDeviceToHost);
realw maxnorm=0;
for(int i=0;i<*size;i++) {
maxnorm = MAX(maxnorm,fabsf(displ[i]));
}
printf("%d: maxnorm of forward displ = %e\n",*announceID,maxnorm);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_max_norm_vector,
CHECK_MAX_NORM_VECTOR)(int* size, realw* vector1, int* announceID) {
TRACE("check_max_norm_vector");
int procid;
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD,&procid);
#else
procid = 0;
#endif
realw maxnorm=0;
int maxloc;
for(int i=0;i<*size;i++) {
if (maxnorm<fabsf(vector1[i])) {
maxnorm = vector1[i];
maxloc = i;
}
}
printf("%d:maxnorm of vector %d [%d] = %e\n",procid,*announceID,maxloc,maxnorm);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_max_norm_displ,
CHECK_MAX_NORM_DISPL)(int* size, realw* displ, int* announceID) {
TRACE("check_max_norm_displ");
realw maxnorm=0;
for(int i=0;i<*size;i++) {
maxnorm = MAX(maxnorm,fabsf(displ[i]));
}
printf("%d: maxnorm of forward displ = %e\n",*announceID,maxnorm);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_max_norm_b_displ_gpu,
CHECK_MAX_NORM_B_DISPL_GPU)(int* size, realw* b_displ,long* Mesh_pointer,int* announceID) {
TRACE("check_max_norm_b_displ_gpu");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
realw* b_accel = (realw*)malloc(*size*sizeof(realw));
hipMemcpy(b_displ, mp->d_b_displ,*size*sizeof(realw),hipMemcpyDeviceToHost);
hipMemcpy(b_accel, mp->d_b_accel,*size*sizeof(realw),hipMemcpyDeviceToHost);
realw maxnorm=0;
realw maxnorm_accel=0;
for(int i=0;i<*size;i++) {
maxnorm = MAX(maxnorm,fabsf(b_displ[i]));
maxnorm_accel = MAX(maxnorm,fabsf(b_accel[i]));
}
free(b_accel);
printf("%d: maxnorm of backward displ = %e\n",*announceID,maxnorm);
printf("%d: maxnorm of backward accel = %e\n",*announceID,maxnorm_accel);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_max_norm_b_accel_gpu,
CHECK_MAX_NORM_B_ACCEL_GPU)(int* size, realw* b_accel,long* Mesh_pointer,int* announceID) {
TRACE("check_max_norm_b_accel_gpu");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
hipMemcpy(b_accel, mp->d_b_accel,*size*sizeof(realw),hipMemcpyDeviceToHost);
realw maxnorm=0;
for(int i=0;i<*size;i++) {
maxnorm = MAX(maxnorm,fabsf(b_accel[i]));
}
printf("%d: maxnorm of backward accel = %e\n",*announceID,maxnorm);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_max_norm_b_veloc_gpu,
CHECK_MAX_NORM_B_VELOC_GPU)(int* size, realw* b_veloc,long* Mesh_pointer,int* announceID) {
TRACE("check_max_norm_b_veloc_gpu");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
hipMemcpy(b_veloc, mp->d_b_veloc,*size*sizeof(realw),hipMemcpyDeviceToHost);
realw maxnorm=0;
for(int i=0;i<*size;i++) {
maxnorm = MAX(maxnorm,fabsf(b_veloc[i]));
}
printf("%d: maxnorm of backward veloc = %e\n",*announceID,maxnorm);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_max_norm_b_displ,
CHECK_MAX_NORM_B_DISPL)(int* size, realw* b_displ,int* announceID) {
TRACE("check_max_norm_b_displ");
realw maxnorm=0;
for(int i=0;i<*size;i++) {
maxnorm = MAX(maxnorm,fabsf(b_displ[i]));
}
printf("%d:maxnorm of backward displ = %e\n",*announceID,maxnorm);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_max_norm_b_accel,
CHECK_MAX_NORM_B_ACCEL)(int* size, realw* b_accel,int* announceID) {
TRACE("check_max_norm_b_accel");
realw maxnorm=0;
for(int i=0;i<*size;i++) {
maxnorm = MAX(maxnorm,fabsf(b_accel[i]));
}
printf("%d:maxnorm of backward accel = %e\n",*announceID,maxnorm);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_error_vectors,
CHECK_ERROR_VECTORS)(int* sizef, realw* vector1,realw* vector2) {
TRACE("check_error_vectors");
int size = *sizef;
double diff2 = 0;
double sum = 0;
double temp;
double maxerr=0;
int maxerrorloc;
for(int i=0;i<size;++i) {
temp = vector1[i]-vector2[i];
diff2 += temp*temp;
sum += vector1[i]*vector1[i];
if (maxerr < fabsf(temp)) {
maxerr = abs(temp);
maxerrorloc = i;
}
}
printf("rel error = %f, maxerr = %e @ %d\n",diff2/sum,maxerr,maxerrorloc);
int myrank;
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
#else
myrank = 0;
#endif
if (myrank == 0) {
for(int i=maxerrorloc;i>maxerrorloc-5;i--) {
printf("[%d]: %e vs. %e\n",i,vector1[i],vector2[i]);
}
}
}
*/
| 1fe2412b419c9e442917ad4c837112e1b050cc81.cu | /*
!=====================================================================
!
! S p e c f e m 3 D V e r s i o n 3 . 0
! ---------------------------------------
!
! Main historical authors: Dimitri Komatitsch and Jeroen Tromp
! CNRS, France
! and Princeton University, USA
! (there are currently many more authors!)
! (c) October 2017
!
! This program is free software; you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation; either version 3 of the License, or
! (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License along
! with this program; if not, write to the Free Software Foundation, Inc.,
! 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
!
!=====================================================================
*/
#include "mesh_constants_cuda.h"
/* ----------------------------------------------------------------------------------------------- */
// Helper functions
/* ----------------------------------------------------------------------------------------------- */
double get_time()
{
struct timeval t;
struct timezone tzp;
gettimeofday(&t, &tzp);
return t.tv_sec + t.tv_usec*1e-6;
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(pause_for_debug,PAUSE_FOR_DEBUG)() {
TRACE("pause_for_debug");
pause_for_debugger(1);
}
/* ----------------------------------------------------------------------------------------------- */
void pause_for_debugger(int pause) {
if (pause) {
int myrank;
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
#else
myrank = 0;
#endif
printf("I'm rank %d\n",myrank);
int i = 0;
char hostname[256];
gethostname(hostname, sizeof(hostname));
printf("PID %d on %s:%d ready for attach\n", getpid(), hostname,myrank);
FILE *file = fopen("./attach_gdb.txt","w+");
if (file != NULL){
fprintf(file,"PID %d on %s:%d ready for attach\n", getpid(), hostname,myrank);
fclose(file);
}
fflush(stdout);
while (0 == i)
sleep(5);
}
}
/* ----------------------------------------------------------------------------------------------- */
void exit_on_cuda_error(const char* kernel_name) {
// sync and check to catch errors from previous async operations
synchronize_cuda();
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr,"Error after %s: %s\n", kernel_name, cudaGetErrorString(err));
//debugging
//pause_for_debugger(0);
// outputs error file
FILE* fp;
int myrank;
char filename[BUFSIZ];
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
#else
myrank = 0;
#endif
sprintf(filename,OUTPUT_FILES"/error_message_%06d.txt",myrank);
fp = fopen(filename,"a+");
if (fp != NULL){
fprintf(fp,"Error after %s: %s\n", kernel_name, cudaGetErrorString(err));
fclose(fp);
}
// stops program
//free(kernel_name);
#ifdef WITH_MPI
MPI_Abort(MPI_COMM_WORLD,1);
#endif
exit(EXIT_FAILURE);
}
}
/* ----------------------------------------------------------------------------------------------- */
void exit_on_error(const char* info) {
printf("\nERROR: %s\n",info);
fflush(stdout);
// outputs error file
FILE* fp;
int myrank;
char filename[BUFSIZ];
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
#else
myrank = 0;
#endif
sprintf(filename,OUTPUT_FILES"/error_message_%06d.txt",myrank);
fp = fopen(filename,"a+");
if (fp != NULL){
fprintf(fp,"ERROR: %s\n",info);
fclose(fp);
}
// stops program
#ifdef WITH_MPI
MPI_Abort(MPI_COMM_WORLD,1);
#endif
//free(info);
exit(EXIT_FAILURE);
return;
}
/* ----------------------------------------------------------------------------------------------- */
void print_CUDA_error_if_any(cudaError_t err, int num) {
if (cudaSuccess != err)
{
printf("\nCUDA error !!!!! <%s> !!!!! \nat CUDA call error code: # %d\n",cudaGetErrorString(err),num);
fflush(stdout);
// outputs error file
FILE* fp;
int myrank;
char filename[BUFSIZ];
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
#else
myrank = 0;
#endif
sprintf(filename,OUTPUT_FILES"/error_message_%06d.txt",myrank);
fp = fopen(filename,"a+");
if (fp != NULL){
fprintf(fp,"\nCUDA error !!!!! <%s> !!!!! \nat CUDA call error code: # %d\n",cudaGetErrorString(err),num);
fclose(fp);
}
// stops program
#ifdef WITH_MPI
MPI_Abort(MPI_COMM_WORLD,1);
#endif
exit(EXIT_FAILURE);
}
return;
}
/* ----------------------------------------------------------------------------------------------- */
// CUDA synchronization
/* ----------------------------------------------------------------------------------------------- */
void synchronize_cuda(){
#if CUDA_VERSION >= 4000
cudaDeviceSynchronize();
#else
cudaThreadSynchronize();
#endif
}
/* ----------------------------------------------------------------------------------------------- */
void synchronize_mpi(){
#ifdef WITH_MPI
MPI_Barrier(MPI_COMM_WORLD);
#endif
}
/* ----------------------------------------------------------------------------------------------- */
// Timing helper functions
/* ----------------------------------------------------------------------------------------------- */
void start_timing_cuda(cudaEvent_t* start,cudaEvent_t* stop){
// creates & starts event
cudaEventCreate(start);
cudaEventCreate(stop);
cudaEventRecord( *start, 0);
}
/* ----------------------------------------------------------------------------------------------- */
void stop_timing_cuda(cudaEvent_t* start,cudaEvent_t* stop, const char* info_str){
realw time;
// stops events
cudaEventRecord( *stop, 0);
cudaEventSynchronize( *stop );
cudaEventElapsedTime( &time, *start, *stop );
cudaEventDestroy( *start );
cudaEventDestroy( *stop );
// user output
printf("%s: Execution Time = %f ms\n",info_str,time);
}
/* ----------------------------------------------------------------------------------------------- */
void stop_timing_cuda(cudaEvent_t* start,cudaEvent_t* stop, const char* info_str,realw* t){
realw time;
// stops events
cudaEventRecord( *stop, 0);
cudaEventSynchronize( *stop );
cudaEventElapsedTime( &time, *start, *stop );
cudaEventDestroy( *start );
cudaEventDestroy( *stop );
// user output
printf("%s: Execution Time = %f ms\n",info_str,time);
// returns time
*t = time;
}
/* ----------------------------------------------------------------------------------------------- */
// CUDA kernel setup functions
/* ----------------------------------------------------------------------------------------------- */
void get_blocks_xy(int num_blocks,int* num_blocks_x,int* num_blocks_y) {
// Initially sets the blocks_x to be the num_blocks, and adds rows as needed (block size limit of 65535).
// If an additional row is added, the row length is cut in
// half. If the block count is odd, there will be 1 too many blocks,
// which must be managed at runtime with an if statement.
*num_blocks_x = num_blocks;
*num_blocks_y = 1;
while (*num_blocks_x > MAXIMUM_GRID_DIM) {
*num_blocks_x = (int) ceil(*num_blocks_x * 0.5f);
*num_blocks_y = *num_blocks_y * 2;
}
return;
}
/* ----------------------------------------------------------------------------------------------- */
// GPU device memory functions
/* ----------------------------------------------------------------------------------------------- */
void get_free_memory(double* free_db, double* used_db, double* total_db) {
TRACE("get_free_memory");
// gets memory usage in byte
size_t free_byte ;
size_t total_byte ;
cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ;
if (cudaSuccess != cuda_status){
printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) );
exit(EXIT_FAILURE);
}
*free_db = (double)free_byte ;
*total_db = (double)total_byte ;
*used_db = *total_db - *free_db ;
return;
}
/* ----------------------------------------------------------------------------------------------- */
// Saves GPU memory usage to file
void output_free_memory(int myrank,char* info_str) {
TRACE("output_free_memory");
FILE* fp;
char filename[BUFSIZ];
double free_db,used_db,total_db;
int do_output_info;
// by default, only master process outputs device infos to avoid file cluttering
do_output_info = 0;
if (myrank == 0){
do_output_info = 1;
sprintf(filename,OUTPUT_FILES"/gpu_device_mem_usage.txt");
}
// debugging
if (DEBUG){
do_output_info = 1;
sprintf(filename,OUTPUT_FILES"/gpu_device_mem_usage_proc_%06d.txt",myrank);
}
// outputs to file
if (do_output_info){
// gets memory usage
get_free_memory(&free_db,&used_db,&total_db);
// file output
fp = fopen(filename,"a+");
if (fp != NULL){
fprintf(fp,"%d: @%s GPU memory usage: used = %f MB, free = %f MB, total = %f MB\n", myrank, info_str,
used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0);
fclose(fp);
}
}
}
/* ----------------------------------------------------------------------------------------------- */
// Fortran-callable version of above method
extern "C"
void FC_FUNC_(output_free_device_memory,
OUTPUT_FREE_DEVICE_MEMORY)(int* myrank_f) {
TRACE("output_free_device_memory");
char info[64];
int myrank = *myrank_f;
sprintf(info,"f %d:",myrank);
output_free_memory(myrank,info);
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(get_free_device_memory,
get_FREE_DEVICE_MEMORY)(realw* free, realw* used, realw* total) {
TRACE("get_free_device_memory");
double free_db,used_db,total_db;
get_free_memory(&free_db,&used_db,&total_db);
// converts to MB
*free = (realw) free_db/1024.0/1024.0;
*used = (realw) used_db/1024.0/1024.0;
*total = (realw) total_db/1024.0/1024.0;
return;
}
/* ----------------------------------------------------------------------------------------------- */
// Auxiliary functions
/* ----------------------------------------------------------------------------------------------- */
/*
__global__ void memset_to_realw_kernel(realw* array, int size, realw value){
unsigned int tid = threadIdx.x;
unsigned int bx = blockIdx.y*gridDim.x+blockIdx.x;
unsigned int i = tid + bx*blockDim.x;
if (i < size){
array[i] = *value;
}
}
*/
/* ----------------------------------------------------------------------------------------------- */
realw get_device_array_maximum_value(realw* array, int size){
// get maximum of array on GPU by copying over to CPU and handle it there
realw max = 0.0f;
// checks if anything to do
if (size > 0){
realw* h_array;
// explicitly wait for cuda kernels to finish
// (cudaMemcpy implicitly synchronizes all other cuda operations)
synchronize_cuda();
h_array = (realw*)calloc(size,sizeof(realw));
print_CUDA_error_if_any(cudaMemcpy(h_array,array,sizeof(realw)*size,cudaMemcpyDeviceToHost),33001);
// finds maximum value in array
max = h_array[0];
for( int i=1; i < size; i++){
if (abs(h_array[i]) > max) max = abs(h_array[i]);
}
free(h_array);
}
return max;
}
/* ----------------------------------------------------------------------------------------------- */
// ACOUSTIC simulations
/* ----------------------------------------------------------------------------------------------- */
__global__ void get_maximum_kernel(field* array, int size, realw* d_max){
/* simplest version: uses only 1 thread
realw max;
max = 0;
// finds maximum value in array
if (size > 0){
max = abs(array[0]);
for( int i=1; i < size; i++){
if (abs(array[i]) > max) max = abs(array[i]);
}
}
*d_max = max;
*/
// reduction example:
__shared__ realw sdata[BLOCKSIZE_TRANSFER] ;
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int bx = blockIdx.y*gridDim.x+blockIdx.x;
unsigned int i = tid + bx*blockDim.x;
// loads absolute values into shared memory
sdata[tid] = (i < size) ? fabs(array[i]) : 0.0 ;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s){
// summation:
//sdata[tid] += sdata[tid + s];
// maximum:
if (sdata[tid] < sdata[tid + s]) sdata[tid] = sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) d_max[bx] = sdata[0];
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(get_norm_acoustic_from_device,
GET_NORM_ACOUSTIC_FROM_DEVICE)(realw* norm,long* Mesh_pointer,int* sim_type) {
TRACE("get_norm_acoustic_from_device");
//double start_time = get_time();
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
realw max = 0.0;
realw *d_max;
//initializes
*norm = 0.0f;
/* way 1 : timing Elapsed time: 8.464813e-03
realw* h_array;
h_array = (realw*)calloc(mp->NGLOB_AB,sizeof(realw));
print_CUDA_error_if_any(cudaMemcpy(h_array,mp->d_potential_dot_dot_acoustic,
sizeof(realw)*(mp->NGLOB_AB),cudaMemcpyDeviceToHost),131);
// finds maximum value in array
max = h_array[0];
for( int i=1; i < mp->NGLOB_AB; i++){
if (abs(h_array[i]) > max) max = abs(h_array[i]);
}
free(h_array);
*/
/* way 2: timing Elapsed time: 8.818102e-02
// launch simple kernel
cudaMalloc((void**)&d_max,sizeof(realw));
dim3 grid(1,1);
dim3 threads(1,1,1);
get_maximum_kernel<<<grid,threads>>>(mp->d_potential_dot_dot_acoustic,
mp->NGLOB_AB,
d_max);
print_CUDA_error_if_any(cudaMemcpy(&max,d_max, sizeof(realw), cudaMemcpyDeviceToHost),222);
cudaFree(d_max);
*/
// way 2 b: timing Elapsed time: 1.236916e-03
// launch simple reduction kernel
realw* h_max;
int blocksize = BLOCKSIZE_TRANSFER;
int size = mp->NGLOB_AB;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
//printf("num_blocks_x %i \n",num_blocks_x);
// on host (allocates & initializes to zero)
h_max = (realw*) calloc(num_blocks_x*num_blocks_y,sizeof(realw));
// allocates memory on device
print_CUDA_error_if_any(cudaMalloc((void**)&d_max,num_blocks_x*num_blocks_y*sizeof(realw)),78001);
// initializes values to zero
print_CUDA_error_if_any(cudaMemset(d_max,0,num_blocks_x*num_blocks_y*sizeof(realw)),77002);
if (*sim_type == 1){
get_maximum_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->d_potential_dot_dot_acoustic,size,d_max);
}else if (*sim_type == 3){
get_maximum_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->d_b_potential_dot_dot_acoustic,size,d_max);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("kernel get_maximum_kernel");
#endif
// synchronizes
//synchronize_cuda();
// explicitly waits for stream to finish
// (cudaMemcpy implicitly synchronizes all other cuda operations)
cudaStreamSynchronize(mp->compute_stream);
print_CUDA_error_if_any(cudaMemcpy(h_max,d_max,num_blocks_x*num_blocks_y*sizeof(realw),
cudaMemcpyDeviceToHost),222);
// determines max for all blocks
max = h_max[0];
for(int i=1;i<num_blocks_x*num_blocks_y;i++) {
if (max < h_max[i]) max = h_max[i];
}
cudaFree(d_max);
free(h_max);
/* way 3: doesn't work properly...
cublasStatus status;
// Initialize CUBLAS
status = cublasInit();
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! CUBLAS initialization error\n");
exit(1);
}
// cublas function: cublasIsamax
// finds the smallest index of the maximum magnitude element of single
// precision vector x
int incr = 1;
int imax = 0;
imax = cublasIsamax(mp->NGLOB_AB,(realw*)mp->d_potential_dot_dot_acoustic, incr);
status= cublasGetError();
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! CUBLAS error in cublasIsamax\n");
exit(1);
}
print_CUDA_error_if_any(cudaMemcpy(&max,&(mp->d_potential_dot_dot_acoustic[imax]),
sizeof(realw), cudaMemcpyDeviceToHost),222);
printf("maximum %i %i %f \n",mp->NGLOB_AB,imax,max);
// Shutdown
status = cublasShutdown();
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf (stderr, "!!!! shutdown error (A)\n");
exit(1);
}
*/
// return result
*norm = max;
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//double end_time = get_time();
//printf("Elapsed time: %e\n",end_time-start_time);
exit_on_cuda_error("get_norm_acoustic_from_device");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
// ELASTIC simulations
/* ----------------------------------------------------------------------------------------------- */
__global__ void get_maximum_vector_kernel(realw* array, int size, realw* d_max){
// reduction example:
__shared__ realw sdata[BLOCKSIZE_TRANSFER] ;
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int bx = blockIdx.y*gridDim.x+blockIdx.x;
unsigned int i = tid + bx*blockDim.x;
// loads values into shared memory: assume array is a vector array
sdata[tid] = (i < size) ? (array[i*3]*array[i*3] + array[i*3+1]*array[i*3+1] + array[i*3+2]*array[i*3+2]) : 0.0 ;
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s){
// summation:
//sdata[tid] += sdata[tid + s];
// maximum:
if (sdata[tid] < sdata[tid + s]) sdata[tid] = sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) d_max[bx] = sdata[0];
}
/* ----------------------------------------------------------------------------------------------- */
extern "C"
void FC_FUNC_(get_norm_elastic_from_device,
GET_NORM_ELASTIC_FROM_DEVICE)(realw* norm,
long* Mesh_pointer,
int* type) {
TRACE("\tget_norm_elastic_from_device");
//double start_time = get_time();
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
realw max,res;
realw *d_max;
//initializes
*norm = 0.0f;
// launch simple reduction kernel
realw* h_max;
int blocksize = BLOCKSIZE_TRANSFER;
int size = mp->NGLOB_AB;
int size_padded = ((int)ceil(((double)size)/((double)blocksize)))*blocksize;
int num_blocks_x, num_blocks_y;
get_blocks_xy(size_padded/blocksize,&num_blocks_x,&num_blocks_y);
dim3 grid(num_blocks_x,num_blocks_y);
dim3 threads(blocksize,1,1);
// on host (allocates & initializes to zero)
h_max = (realw*) calloc(num_blocks_x*num_blocks_y,sizeof(realw));
// allocates memory on device
print_CUDA_error_if_any(cudaMalloc((void**)&d_max,num_blocks_x*num_blocks_y*sizeof(realw)),77001);
// initializes values to zero
print_CUDA_error_if_any(cudaMemset(d_max,0,num_blocks_x*num_blocks_y*sizeof(realw)),77002);
if (*type == 1){
get_maximum_vector_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->d_displ,size,d_max);
}else if (*type == 3){
get_maximum_vector_kernel<<<grid,threads,0,mp->compute_stream>>>(mp->d_b_displ,size,d_max);
}
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//double end_time = get_time();
//printf("Elapsed time: %e\n",end_time-start_time);
exit_on_cuda_error("kernel get_norm_elastic_from_device");
#endif
// synchronizes
//synchronize_cuda();
// explicitly waits for stream to finish
// (cudaMemcpy implicitly synchronizes all other cuda operations)
cudaStreamSynchronize(mp->compute_stream);
// copies reduction array back to CPU
print_CUDA_error_if_any(cudaMemcpy(h_max,d_max,num_blocks_x*num_blocks_y*sizeof(realw),
cudaMemcpyDeviceToHost),222);
// determines max for all blocks
max = h_max[0];
for(int i=1;i<num_blocks_x*num_blocks_y;i++) {
if (max < h_max[i]) max = h_max[i];
}
res = sqrt(max);
// return result
*norm = res;
// debug
//printf("rank % d - type: %d norm: %f \n",mp->myrank,*type,res);
cudaFree(d_max);
free(h_max);
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
//double end_time = get_time();
//printf("Elapsed time: %e\n",end_time-start_time);
exit_on_cuda_error("get_norm_elastic_from_device");
#endif
}
/* ----------------------------------------------------------------------------------------------- */
// unused ...
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(get_max_accel,
GET_MAX_ACCEL)(int* itf,int* sizef,long* Mesh_pointer) {
TRACE("get_max_accel");
Mesh* mp = (Mesh*)(*Mesh_pointer);
int procid;
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD,&procid);
#else
procid = 0;
#endif
int size = *sizef;
int it = *itf;
realw* accel_cpy = (realw*)malloc(size*sizeof(realw));
cudaMemcpy(accel_cpy,mp->d_accel,size*sizeof(realw),cudaMemcpyDeviceToHost);
realw maxval=0;
for(int i=0;i<size;++i) {
maxval = MAX(maxval,accel_cpy[i]);
}
printf("%d/%d: max=%e\n",it,procid,maxval);
free(accel_cpy);
}
*/
/* ----------------------------------------------------------------------------------------------- */
//daniel: helper function
/*
__global__ void check_phase_ispec_kernel(int num_phase_ispec,
int* phase_ispec,
int NSPEC_AB,
int* ier) {
int i,ispec,iphase,count0,count1;
*ier = 0;
for(iphase=0; iphase < 2; iphase++){
count0 = 0;
count1 = 0;
for(i=0; i < num_phase_ispec; i++){
ispec = phase_ispec[iphase*num_phase_ispec + i] - 1;
if (ispec < -1 || ispec >= NSPEC_AB){
printf("Error in d_phase_ispec_inner_elastic %d %d\n",i,ispec);
*ier = 1;
return;
}
if (ispec >= 0){ count0++;}
if (ispec < 0){ count1++;}
}
printf("check_phase_ispec done: phase %d, count = %d %d \n",iphase,count0,count1);
}
}
void check_phase_ispec(long* Mesh_pointer,int type){
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
printf("check phase_ispec for type=%d\n",type);
dim3 grid(1,1);
dim3 threads(1,1,1);
int* h_debug = (int*) calloc(1,sizeof(int));
int* d_debug;
cudaMalloc((void**)&d_debug,sizeof(int));
if (type == 1){
check_phase_ispec_kernel<<<grid,threads>>>(mp->num_phase_ispec_elastic,
mp->d_phase_ispec_inner_elastic,
mp->NSPEC_AB,
d_debug);
}else if (type == 2){
check_phase_ispec_kernel<<<grid,threads>>>(mp->num_phase_ispec_acoustic,
mp->d_phase_ispec_inner_acoustic,
mp->NSPEC_AB,
d_debug);
}
cudaMemcpy(h_debug,d_debug,1*sizeof(int),cudaMemcpyDeviceToHost);
cudaFree(d_debug);
if (*h_debug != 0){printf("error for type=%d\n",type); exit(1);}
free(h_debug);
fflush(stdout);
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("check_phase_ispec");
#endif
}
*/
/* ----------------------------------------------------------------------------------------------- */
//daniel: helper function
/*
__global__ void check_ispec_is_kernel(int NSPEC_AB,
int* ispec_is,
int* ier) {
int ispec,count0,count1;
*ier = 0;
count0 = 0;
count1 = 0;
for(ispec=0; ispec < NSPEC_AB; ispec++){
if (ispec_is[ispec] < -1 || ispec_is[ispec] > 1){
printf("Error in ispec_is %d %d\n",ispec,ispec_is[ispec]);
*ier = 1;
return;
//exit(1);
}
if (ispec_is[ispec] == 0){count0++;}
if (ispec_is[ispec] != 0){count1++;}
}
printf("check_ispec_is done: count = %d %d\n",count0,count1);
}
void check_ispec_is(long* Mesh_pointer,int type){
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
printf("check ispec_is for type=%d\n",type);
dim3 grid(1,1);
dim3 threads(1,1,1);
int* h_debug = (int*) calloc(1,sizeof(int));
int* d_debug;
cudaMalloc((void**)&d_debug,sizeof(int));
if (type == 0){
check_ispec_is_kernel<<<grid,threads>>>(mp->NSPEC_AB,
mp->d_ispec_is_inner,
d_debug);
}else if (type == 1){
check_ispec_is_kernel<<<grid,threads>>>(mp->NSPEC_AB,
mp->d_ispec_is_elastic,
d_debug);
}else if (type == 2){
check_ispec_is_kernel<<<grid,threads>>>(mp->NSPEC_AB,
mp->d_ispec_is_acoustic,
d_debug);
}
cudaMemcpy(h_debug,d_debug,1*sizeof(int),cudaMemcpyDeviceToHost);
cudaFree(d_debug);
if (*h_debug != 0){printf("error for type=%d\n",type); exit(1);}
free(h_debug);
fflush(stdout);
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("check_ispec_is");
#endif
}
*/
/* ----------------------------------------------------------------------------------------------- */
//daniel: helper function
/*
__global__ void check_array_ispec_kernel(int num_array_ispec,
int* array_ispec,
int NSPEC_AB,
int* ier) {
int i,ispec,count0,count1;
*ier = 0;
count0 = 0;
count1 = 0;
for(i=0; i < num_array_ispec; i++){
ispec = array_ispec[i] - 1;
if (ispec < -1 || ispec >= NSPEC_AB){
printf("Error in d_array_ispec %d %d\n",i,ispec);
*ier = 1;
return;
}
if (ispec >= 0){ count0++;}
if (ispec < 0){ count1++;}
}
printf("check_array_ispec done: count = %d %d \n",count0,count1);
}
void check_array_ispec(long* Mesh_pointer,int type){
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
printf("check array_ispec for type=%d\n",type);
dim3 grid(1,1);
dim3 threads(1,1,1);
int* h_debug = (int*) calloc(1,sizeof(int));
int* d_debug;
cudaMalloc((void**)&d_debug,sizeof(int));
if (type == 1){
check_array_ispec_kernel<<<grid,threads>>>(mp->d_num_abs_boundary_faces,
mp->d_abs_boundary_ispec,
mp->NSPEC_AB,
d_debug);
}
cudaMemcpy(h_debug,d_debug,1*sizeof(int),cudaMemcpyDeviceToHost);
cudaFree(d_debug);
if (*h_debug != 0){printf("error for type=%d\n",type); exit(1);}
free(h_debug);
fflush(stdout);
#ifdef ENABLE_VERY_SLOW_ERROR_CHECKING
exit_on_cuda_error("check_array_ispec");
#endif
}
*/
/* ----------------------------------------------------------------------------------------------- */
// Check functions
/* ----------------------------------------------------------------------------------------------- */
//max: helper functions
/*
extern "C"
void FC_FUNC_(check_max_norm_displ_gpu,
CHECK_MAX_NORM_DISPL_GPU)(int* size, realw* displ,long* Mesh_pointer,int* announceID) {
TRACE("check_max_norm_displ_gpu");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
cudaMemcpy(displ, mp->d_displ,*size*sizeof(realw),cudaMemcpyDeviceToHost);
realw maxnorm=0;
for(int i=0;i<*size;i++) {
maxnorm = MAX(maxnorm,fabsf(displ[i]));
}
printf("%d: maxnorm of forward displ = %e\n",*announceID,maxnorm);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_max_norm_vector,
CHECK_MAX_NORM_VECTOR)(int* size, realw* vector1, int* announceID) {
TRACE("check_max_norm_vector");
int procid;
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD,&procid);
#else
procid = 0;
#endif
realw maxnorm=0;
int maxloc;
for(int i=0;i<*size;i++) {
if (maxnorm<fabsf(vector1[i])) {
maxnorm = vector1[i];
maxloc = i;
}
}
printf("%d:maxnorm of vector %d [%d] = %e\n",procid,*announceID,maxloc,maxnorm);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_max_norm_displ,
CHECK_MAX_NORM_DISPL)(int* size, realw* displ, int* announceID) {
TRACE("check_max_norm_displ");
realw maxnorm=0;
for(int i=0;i<*size;i++) {
maxnorm = MAX(maxnorm,fabsf(displ[i]));
}
printf("%d: maxnorm of forward displ = %e\n",*announceID,maxnorm);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_max_norm_b_displ_gpu,
CHECK_MAX_NORM_B_DISPL_GPU)(int* size, realw* b_displ,long* Mesh_pointer,int* announceID) {
TRACE("check_max_norm_b_displ_gpu");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
realw* b_accel = (realw*)malloc(*size*sizeof(realw));
cudaMemcpy(b_displ, mp->d_b_displ,*size*sizeof(realw),cudaMemcpyDeviceToHost);
cudaMemcpy(b_accel, mp->d_b_accel,*size*sizeof(realw),cudaMemcpyDeviceToHost);
realw maxnorm=0;
realw maxnorm_accel=0;
for(int i=0;i<*size;i++) {
maxnorm = MAX(maxnorm,fabsf(b_displ[i]));
maxnorm_accel = MAX(maxnorm,fabsf(b_accel[i]));
}
free(b_accel);
printf("%d: maxnorm of backward displ = %e\n",*announceID,maxnorm);
printf("%d: maxnorm of backward accel = %e\n",*announceID,maxnorm_accel);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_max_norm_b_accel_gpu,
CHECK_MAX_NORM_B_ACCEL_GPU)(int* size, realw* b_accel,long* Mesh_pointer,int* announceID) {
TRACE("check_max_norm_b_accel_gpu");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
cudaMemcpy(b_accel, mp->d_b_accel,*size*sizeof(realw),cudaMemcpyDeviceToHost);
realw maxnorm=0;
for(int i=0;i<*size;i++) {
maxnorm = MAX(maxnorm,fabsf(b_accel[i]));
}
printf("%d: maxnorm of backward accel = %e\n",*announceID,maxnorm);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_max_norm_b_veloc_gpu,
CHECK_MAX_NORM_B_VELOC_GPU)(int* size, realw* b_veloc,long* Mesh_pointer,int* announceID) {
TRACE("check_max_norm_b_veloc_gpu");
Mesh* mp = (Mesh*)(*Mesh_pointer); //get mesh pointer out of fortran integer container
cudaMemcpy(b_veloc, mp->d_b_veloc,*size*sizeof(realw),cudaMemcpyDeviceToHost);
realw maxnorm=0;
for(int i=0;i<*size;i++) {
maxnorm = MAX(maxnorm,fabsf(b_veloc[i]));
}
printf("%d: maxnorm of backward veloc = %e\n",*announceID,maxnorm);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_max_norm_b_displ,
CHECK_MAX_NORM_B_DISPL)(int* size, realw* b_displ,int* announceID) {
TRACE("check_max_norm_b_displ");
realw maxnorm=0;
for(int i=0;i<*size;i++) {
maxnorm = MAX(maxnorm,fabsf(b_displ[i]));
}
printf("%d:maxnorm of backward displ = %e\n",*announceID,maxnorm);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_max_norm_b_accel,
CHECK_MAX_NORM_B_ACCEL)(int* size, realw* b_accel,int* announceID) {
TRACE("check_max_norm_b_accel");
realw maxnorm=0;
for(int i=0;i<*size;i++) {
maxnorm = MAX(maxnorm,fabsf(b_accel[i]));
}
printf("%d:maxnorm of backward accel = %e\n",*announceID,maxnorm);
}
*/
/* ----------------------------------------------------------------------------------------------- */
/*
extern "C"
void FC_FUNC_(check_error_vectors,
CHECK_ERROR_VECTORS)(int* sizef, realw* vector1,realw* vector2) {
TRACE("check_error_vectors");
int size = *sizef;
double diff2 = 0;
double sum = 0;
double temp;
double maxerr=0;
int maxerrorloc;
for(int i=0;i<size;++i) {
temp = vector1[i]-vector2[i];
diff2 += temp*temp;
sum += vector1[i]*vector1[i];
if (maxerr < fabsf(temp)) {
maxerr = abs(temp);
maxerrorloc = i;
}
}
printf("rel error = %f, maxerr = %e @ %d\n",diff2/sum,maxerr,maxerrorloc);
int myrank;
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
#else
myrank = 0;
#endif
if (myrank == 0) {
for(int i=maxerrorloc;i>maxerrorloc-5;i--) {
printf("[%d]: %e vs. %e\n",i,vector1[i],vector2[i]);
}
}
}
*/
|
59e55ad02f1c82651c39469e6d1cebaf46addbd4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
#pragma comment(lib, "cuda.lib")
#pragma comment(lib, "cudart.lib")
#include <hip/hip_runtime.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "device_launch_parameters.h"
#include <rocblas.h>
using namespace std;
#define blocksize 8
/*storing matrix*/
void matrix_read(double *L, int dimension){
FILE *fp;
int row, col;
fp = fopen("randomMatrix_1000.txt", "r");//open output file
if (fp == NULL)//open failed
return;
for (row = 0; row < dimension; row++){
for (col = 0; col < dimension; col++)
if (fscanf(fp, "%f,", &L[row * dimension + col]) == EOF) break;//read data
if (feof(fp)) break;//if the file is over
}
fclose(fp);//close file
}
__global__ void nodiag_normalize(double *A, double *I, int n, int i){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < n && y < n)
if (x == i && x!=y){
I[x*n + y] /= A[i*n + i];
A[x*n + y] /= A[i*n + i];
}
}
__global__ void diag_normalize(double *A, double *I, int n, int i){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < n && y < n)
if (x == y && x == i){
I[x*n + y] /= A[i*n + i];
A[x*n + y] /= A[i*n + i];
}
}
__global__ void gaussjordan(double *A, double *I, int n, int i)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < n && y < n){
if (x != i){
I[x*n + y] -= I[i*n + y] * A[x*n + i];
if (y != i){
A[x*n + y] -= A[i*n + y] * A[x*n + i];
}
}
}
}
__global__ void set_zero(double *A, double *I, int n, int i){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < n && y < n){
if (x != i){
if (y == i){
A[x*n + y] = 0;
}
}
}
}
void savetofile(double *A, string s, int n, int h)
{
std::ofstream plik;
plik.open(s);
for (int j = 0; j<h; j++){
for (int i = 0; i<h; i++){
plik << A[j*n + i] << "\t";
}
plik << endl;
}
plik.close();
}
int main()
{
const int n = 1000;
// creating input
double *iL = new double[n*n];
double *L = new double[n*n];
matrix_read(L, n);
//savetofile(L, "L.txt", n, n);
cout << "inv\n";
double *d_A, *d_L, *I, *dI;
float time;
hipError_t err;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int ddsize = n*n*sizeof(double);
dim3 threadsPerBlock(blocksize, blocksize);
dim3 numBlocks((n + blocksize - 1) / blocksize, (n + blocksize - 1) / blocksize);
// memory allocation
err = hipMalloc((void**)&d_A, ddsize);
if (err != hipSuccess){ cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; }
err = hipMalloc((void**)&dI, ddsize);
if (err != hipSuccess){ cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; }
I = new double[n*n];
for (int i = 0; i<n; i++){
for (int j = 0; j<n; j++){
if (i == j) I[i*n + i] = 1.0;
else I[i*n + j] = 0.0;
}
}
//copy data from CPU to GPU
err = hipMemcpy(d_A, L, ddsize, hipMemcpyHostToDevice);
if (err != hipSuccess){ cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; }
err = hipMemcpy(dI, I, ddsize, hipMemcpyHostToDevice);
if (err != hipSuccess){ cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; }
//timer start
hipEventRecord(start, 0);
// L^(-1)
for (int i = 0; i<n; i++){
nodiag_normalize << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i);
diag_normalize << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i);
gaussjordan << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i);
set_zero << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
//copy data from GPU to CPU
err = hipMemcpy(iL, dI, ddsize, hipMemcpyDeviceToHost);
if (err != hipSuccess){ cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; }
err = hipMemcpy(I, d_A, ddsize, hipMemcpyDeviceToHost);
if (err != hipSuccess){ cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; }
cout << "Cuda Time - inverse: " << time << "ms\n";
savetofile(iL, "inv.txt", n, n);
//savetofile(I, "I.txt", n, n);
hipFree(d_A);
hipFree(dI);
double *c = new double[n*n];
for (int i = 0; i<n; i++)
for (int j = 0; j<n; j++)
{
c[i*n+j] = 0; //put the initial value to zero
for (int x = 0; x<n; x++)
c[i*n + j] = c[i*n + j] + L[i*n+x] * iL[x*n + j]; //matrix multiplication
}
savetofile(c, "c.txt", n, n);
delete[]I;
delete[]L;
delete[]iL;
system("Pause");
return 0;
} | 59e55ad02f1c82651c39469e6d1cebaf46addbd4.cu | #include <stdio.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
#pragma comment(lib, "cuda.lib")
#pragma comment(lib, "cudart.lib")
#include <cuda.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#include <cublas_v2.h>
using namespace std;
#define blocksize 8
/*storing matrix*/
void matrix_read(double *L, int dimension){
FILE *fp;
int row, col;
fp = fopen("randomMatrix_1000.txt", "r");//open output file
if (fp == NULL)//open failed
return;
for (row = 0; row < dimension; row++){
for (col = 0; col < dimension; col++)
if (fscanf(fp, "%f,", &L[row * dimension + col]) == EOF) break;//read data
if (feof(fp)) break;//if the file is over
}
fclose(fp);//close file
}
__global__ void nodiag_normalize(double *A, double *I, int n, int i){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < n && y < n)
if (x == i && x!=y){
I[x*n + y] /= A[i*n + i];
A[x*n + y] /= A[i*n + i];
}
}
__global__ void diag_normalize(double *A, double *I, int n, int i){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < n && y < n)
if (x == y && x == i){
I[x*n + y] /= A[i*n + i];
A[x*n + y] /= A[i*n + i];
}
}
__global__ void gaussjordan(double *A, double *I, int n, int i)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < n && y < n){
if (x != i){
I[x*n + y] -= I[i*n + y] * A[x*n + i];
if (y != i){
A[x*n + y] -= A[i*n + y] * A[x*n + i];
}
}
}
}
__global__ void set_zero(double *A, double *I, int n, int i){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < n && y < n){
if (x != i){
if (y == i){
A[x*n + y] = 0;
}
}
}
}
void savetofile(double *A, string s, int n, int h)
{
std::ofstream plik;
plik.open(s);
for (int j = 0; j<h; j++){
for (int i = 0; i<h; i++){
plik << A[j*n + i] << "\t";
}
plik << endl;
}
plik.close();
}
int main()
{
const int n = 1000;
// creating input
double *iL = new double[n*n];
double *L = new double[n*n];
matrix_read(L, n);
//savetofile(L, "L.txt", n, n);
cout << "inv\n";
double *d_A, *d_L, *I, *dI;
float time;
cudaError_t err;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int ddsize = n*n*sizeof(double);
dim3 threadsPerBlock(blocksize, blocksize);
dim3 numBlocks((n + blocksize - 1) / blocksize, (n + blocksize - 1) / blocksize);
// memory allocation
err = cudaMalloc((void**)&d_A, ddsize);
if (err != cudaSuccess){ cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; }
err = cudaMalloc((void**)&dI, ddsize);
if (err != cudaSuccess){ cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; }
I = new double[n*n];
for (int i = 0; i<n; i++){
for (int j = 0; j<n; j++){
if (i == j) I[i*n + i] = 1.0;
else I[i*n + j] = 0.0;
}
}
//copy data from CPU to GPU
err = cudaMemcpy(d_A, L, ddsize, cudaMemcpyHostToDevice);
if (err != cudaSuccess){ cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; }
err = cudaMemcpy(dI, I, ddsize, cudaMemcpyHostToDevice);
if (err != cudaSuccess){ cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; }
//timer start
cudaEventRecord(start, 0);
// L^(-1)
for (int i = 0; i<n; i++){
nodiag_normalize << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i);
diag_normalize << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i);
gaussjordan << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i);
set_zero << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//copy data from GPU to CPU
err = cudaMemcpy(iL, dI, ddsize, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){ cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; }
err = cudaMemcpy(I, d_A, ddsize, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){ cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; }
cout << "Cuda Time - inverse: " << time << "ms\n";
savetofile(iL, "inv.txt", n, n);
//savetofile(I, "I.txt", n, n);
cudaFree(d_A);
cudaFree(dI);
double *c = new double[n*n];
for (int i = 0; i<n; i++)
for (int j = 0; j<n; j++)
{
c[i*n+j] = 0; //put the initial value to zero
for (int x = 0; x<n; x++)
c[i*n + j] = c[i*n + j] + L[i*n+x] * iL[x*n + j]; //matrix multiplication
}
savetofile(c, "c.txt", n, n);
delete[]I;
delete[]L;
delete[]iL;
system("Pause");
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.